python_code
stringlengths 0
992k
| repo_name
stringlengths 8
46
| file_path
stringlengths 5
162
|
---|---|---|
import os
import sys
from argparse import ArgumentParser
from getpass import getpass
from typing import List, Union
from requests.exceptions import HTTPError
from transformers.commands import BaseTransformersCLICommand
from transformers.hf_api import HfApi, HfFolder
UPLOAD_MAX_FILES = 15
class UserCommands(BaseTransformersCLICommand):
@staticmethod
def register_subcommand(parser: ArgumentParser):
login_parser = parser.add_parser("login", help="Log in using the same credentials as on huggingface.co")
login_parser.set_defaults(func=lambda args: LoginCommand(args))
whoami_parser = parser.add_parser("whoami", help="Find out which huggingface.co account you are logged in as.")
whoami_parser.set_defaults(func=lambda args: WhoamiCommand(args))
logout_parser = parser.add_parser("logout", help="Log out")
logout_parser.set_defaults(func=lambda args: LogoutCommand(args))
# s3
s3_parser = parser.add_parser("s3", help="{ls, rm} Commands to interact with the files you upload on S3.")
s3_subparsers = s3_parser.add_subparsers(help="s3 related commands")
ls_parser = s3_subparsers.add_parser("ls")
ls_parser.set_defaults(func=lambda args: ListObjsCommand(args))
rm_parser = s3_subparsers.add_parser("rm")
rm_parser.add_argument("filename", type=str, help="individual object filename to delete from S3.")
rm_parser.set_defaults(func=lambda args: DeleteObjCommand(args))
# upload
upload_parser = parser.add_parser("upload")
upload_parser.add_argument("path", type=str, help="Local path of the folder or individual file to upload.")
upload_parser.add_argument(
"--filename", type=str, default=None, help="Optional: override individual object filename on S3."
)
upload_parser.set_defaults(func=lambda args: UploadCommand(args))
class ANSI:
"""
Helper for en.wikipedia.org/wiki/ANSI_escape_code
"""
_bold = "\u001b[1m"
_reset = "\u001b[0m"
@classmethod
def bold(cls, s):
return "{}{}{}".format(cls._bold, s, cls._reset)
class BaseUserCommand:
def __init__(self, args):
self.args = args
self._api = HfApi()
class LoginCommand(BaseUserCommand):
def run(self):
print(
"""
_| _| _| _| _|_|_| _|_|_| _|_|_| _| _| _|_|_| _|_|_|_| _|_| _|_|_| _|_|_|_|
_| _| _| _| _| _| _| _|_| _| _| _| _| _| _| _|
_|_|_|_| _| _| _| _|_| _| _|_| _| _| _| _| _| _|_| _|_|_| _|_|_|_| _| _|_|_|
_| _| _| _| _| _| _| _| _| _| _|_| _| _| _| _| _| _| _|
_| _| _|_| _|_|_| _|_|_| _|_|_| _| _| _|_|_| _| _| _| _|_|_| _|_|_|_|
"""
)
username = input("Username: ")
password = getpass()
try:
token = self._api.login(username, password)
except HTTPError as e:
# probably invalid credentials, display error message.
print(e)
exit(1)
HfFolder.save_token(token)
print("Login successful")
print("Your token:", token, "\n")
print("Your token has been saved to", HfFolder.path_token)
class WhoamiCommand(BaseUserCommand):
def run(self):
token = HfFolder.get_token()
if token is None:
print("Not logged in")
exit()
try:
user = self._api.whoami(token)
print(user)
except HTTPError as e:
print(e)
class LogoutCommand(BaseUserCommand):
def run(self):
token = HfFolder.get_token()
if token is None:
print("Not logged in")
exit()
HfFolder.delete_token()
self._api.logout(token)
print("Successfully logged out.")
class ListObjsCommand(BaseUserCommand):
def tabulate(self, rows: List[List[Union[str, int]]], headers: List[str]) -> str:
"""
Inspired by:
stackoverflow.com/a/8356620/593036
stackoverflow.com/questions/9535954/printing-lists-as-tabular-data
"""
col_widths = [max(len(str(x)) for x in col) for col in zip(*rows, headers)]
row_format = ("{{:{}}} " * len(headers)).format(*col_widths)
lines = []
lines.append(row_format.format(*headers))
lines.append(row_format.format(*["-" * w for w in col_widths]))
for row in rows:
lines.append(row_format.format(*row))
return "\n".join(lines)
def run(self):
token = HfFolder.get_token()
if token is None:
print("Not logged in")
exit(1)
try:
objs = self._api.list_objs(token)
except HTTPError as e:
print(e)
exit(1)
if len(objs) == 0:
print("No shared file yet")
exit()
rows = [[obj.filename, obj.LastModified, obj.ETag, obj.Size] for obj in objs]
print(self.tabulate(rows, headers=["Filename", "LastModified", "ETag", "Size"]))
class DeleteObjCommand(BaseUserCommand):
def run(self):
token = HfFolder.get_token()
if token is None:
print("Not logged in")
exit(1)
try:
self._api.delete_obj(token, filename=self.args.filename)
except HTTPError as e:
print(e)
exit(1)
print("Done")
class UploadCommand(BaseUserCommand):
def walk_dir(self, rel_path):
"""
Recursively list all files in a folder.
"""
entries: List[os.DirEntry] = list(os.scandir(rel_path))
files = [(os.path.join(os.getcwd(), f.path), f.path) for f in entries if f.is_file()] # (filepath, filename)
for f in entries:
if f.is_dir():
files += self.walk_dir(f.path)
return files
def run(self):
token = HfFolder.get_token()
if token is None:
print("Not logged in")
exit(1)
local_path = os.path.abspath(self.args.path)
if os.path.isdir(local_path):
if self.args.filename is not None:
raise ValueError("Cannot specify a filename override when uploading a folder.")
rel_path = os.path.basename(local_path)
files = self.walk_dir(rel_path)
elif os.path.isfile(local_path):
filename = self.args.filename if self.args.filename is not None else os.path.basename(local_path)
files = [(local_path, filename)]
else:
raise ValueError("Not a valid file or directory: {}".format(local_path))
if sys.platform == "win32":
files = [(filepath, filename.replace(os.sep, "/")) for filepath, filename in files]
if len(files) > UPLOAD_MAX_FILES:
print(
"About to upload {} files to S3. This is probably wrong. Please filter files before uploading.".format(
ANSI.bold(len(files))
)
)
exit(1)
for filepath, filename in files:
print("About to upload file {} to S3 under filename {}".format(ANSI.bold(filepath), ANSI.bold(filename)))
choice = input("Proceed? [Y/n] ").lower()
if not (choice == "" or choice == "y" or choice == "yes"):
print("Abort")
exit()
print(ANSI.bold("Uploading... This might take a while if files are large"))
for filepath, filename in files:
access_url = self._api.presign_and_upload(token=token, filename=filename, filepath=filepath)
print("Your file now lives at:")
print(access_url)
| EXA-1-master | exa/models/unilm-master/xtune/src/transformers/commands/user.py |
import logging
from argparse import ArgumentParser
from transformers.commands import BaseTransformersCLICommand
from transformers.pipelines import SUPPORTED_TASKS, Pipeline, PipelineDataFormat, pipeline
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
def try_infer_format_from_ext(path: str):
if not path:
return "pipe"
for ext in PipelineDataFormat.SUPPORTED_FORMATS:
if path.endswith(ext):
return ext
raise Exception(
"Unable to determine file format from file extension {}. "
"Please provide the format through --format {}".format(path, PipelineDataFormat.SUPPORTED_FORMATS)
)
def run_command_factory(args):
nlp = pipeline(
task=args.task,
model=args.model if args.model else None,
config=args.config,
tokenizer=args.tokenizer,
device=args.device,
)
format = try_infer_format_from_ext(args.input) if args.format == "infer" else args.format
reader = PipelineDataFormat.from_str(
format=format,
output_path=args.output,
input_path=args.input,
column=args.column if args.column else nlp.default_input_names,
overwrite=args.overwrite,
)
return RunCommand(nlp, reader)
class RunCommand(BaseTransformersCLICommand):
def __init__(self, nlp: Pipeline, reader: PipelineDataFormat):
self._nlp = nlp
self._reader = reader
@staticmethod
def register_subcommand(parser: ArgumentParser):
run_parser = parser.add_parser("run", help="Run a pipeline through the CLI")
run_parser.add_argument("--task", choices=SUPPORTED_TASKS.keys(), help="Task to run")
run_parser.add_argument("--input", type=str, help="Path to the file to use for inference")
run_parser.add_argument("--output", type=str, help="Path to the file that will be used post to write results.")
run_parser.add_argument("--model", type=str, help="Name or path to the model to instantiate.")
run_parser.add_argument("--config", type=str, help="Name or path to the model's config to instantiate.")
run_parser.add_argument(
"--tokenizer", type=str, help="Name of the tokenizer to use. (default: same as the model name)"
)
run_parser.add_argument(
"--column",
type=str,
help="Name of the column to use as input. (For multi columns input as QA use column1,columns2)",
)
run_parser.add_argument(
"--format",
type=str,
default="infer",
choices=PipelineDataFormat.SUPPORTED_FORMATS,
help="Input format to read from",
)
run_parser.add_argument(
"--device",
type=int,
default=-1,
help="Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)",
)
run_parser.add_argument("--overwrite", action="store_true", help="Allow overwriting the output file.")
run_parser.set_defaults(func=run_command_factory)
def run(self):
nlp, outputs = self._nlp, []
for entry in self._reader:
output = nlp(**entry) if self._reader.is_multi_columns else nlp(entry)
if isinstance(output, dict):
outputs.append(output)
else:
outputs += output
# Saving data
if self._nlp.binary_output:
binary_path = self._reader.save_binary(outputs)
logger.warning("Current pipeline requires output to be in binary format, saving at {}".format(binary_path))
else:
self._reader.save(outputs)
| EXA-1-master | exa/models/unilm-master/xtune/src/transformers/commands/run.py |
import platform
from argparse import ArgumentParser
from transformers import __version__ as version
from transformers import is_tf_available, is_torch_available
from transformers.commands import BaseTransformersCLICommand
def info_command_factory(_):
return EnvironmentCommand()
class EnvironmentCommand(BaseTransformersCLICommand):
@staticmethod
def register_subcommand(parser: ArgumentParser):
download_parser = parser.add_parser("env")
download_parser.set_defaults(func=info_command_factory)
def run(self):
pt_version = "not installed"
pt_cuda_available = "NA"
if is_torch_available():
import torch
pt_version = torch.__version__
pt_cuda_available = torch.cuda.is_available()
tf_version = "not installed"
tf_cuda_available = "NA"
if is_tf_available():
import tensorflow as tf
tf_version = tf.__version__
try:
# deprecated in v2.1
tf_cuda_available = tf.test.is_gpu_available()
except AttributeError:
# returns list of devices, convert to bool
tf_cuda_available = bool(tf.config.list_physical_devices("GPU"))
info = {
"`transformers` version": version,
"Platform": platform.platform(),
"Python version": platform.python_version(),
"PyTorch version (GPU?)": "{} ({})".format(pt_version, pt_cuda_available),
"Tensorflow version (GPU?)": "{} ({})".format(tf_version, tf_cuda_available),
"Using GPU in script?": "<fill in>",
"Using distributed or parallel set-up in script?": "<fill in>",
}
print("\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n")
print(self.format_dict(info))
return info
@staticmethod
def format_dict(d):
return "\n".join(["- {}: {}".format(prop, val) for prop, val in d.items()]) + "\n"
| EXA-1-master | exa/models/unilm-master/xtune/src/transformers/commands/env.py |
import logging
from argparse import ArgumentParser, Namespace
from typing import Any, List, Optional
from transformers import Pipeline
from transformers.commands import BaseTransformersCLICommand
from transformers.pipelines import SUPPORTED_TASKS, pipeline
try:
from uvicorn import run
from fastapi import FastAPI, HTTPException, Body
from fastapi.routing import APIRoute
from pydantic import BaseModel
from starlette.responses import JSONResponse
_serve_dependencies_installed = True
except (ImportError, AttributeError):
BaseModel = object
def Body(*x, **y):
pass
_serve_dependencies_installed = False
logger = logging.getLogger("transformers-cli/serving")
def serve_command_factory(args: Namespace):
"""
Factory function used to instantiate serving server from provided command line arguments.
:return: ServeCommand
"""
nlp = pipeline(
task=args.task,
model=args.model if args.model else None,
config=args.config,
tokenizer=args.tokenizer,
device=args.device,
)
return ServeCommand(nlp, args.host, args.port, args.workers)
class ServeModelInfoResult(BaseModel):
"""
Expose model information
"""
infos: dict
class ServeTokenizeResult(BaseModel):
"""
Tokenize result model
"""
tokens: List[str]
tokens_ids: Optional[List[int]]
class ServeDeTokenizeResult(BaseModel):
"""
DeTokenize result model
"""
text: str
class ServeForwardResult(BaseModel):
"""
Forward result model
"""
output: Any
class ServeCommand(BaseTransformersCLICommand):
@staticmethod
def register_subcommand(parser: ArgumentParser):
"""
Register this command to argparse so it's available for the transformer-cli
:param parser: Root parser to register command-specific arguments
:return:
"""
serve_parser = parser.add_parser(
"serve", help="CLI tool to run inference requests through REST and GraphQL endpoints."
)
serve_parser.add_argument(
"--task", type=str, choices=SUPPORTED_TASKS.keys(), help="The task to run the pipeline on"
)
serve_parser.add_argument("--host", type=str, default="localhost", help="Interface the server will listen on.")
serve_parser.add_argument("--port", type=int, default=8888, help="Port the serving will listen to.")
serve_parser.add_argument("--workers", type=int, default=1, help="Number of http workers")
serve_parser.add_argument("--model", type=str, help="Model's name or path to stored model.")
serve_parser.add_argument("--config", type=str, help="Model's config name or path to stored model.")
serve_parser.add_argument("--tokenizer", type=str, help="Tokenizer name to use.")
serve_parser.add_argument(
"--device",
type=int,
default=-1,
help="Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)",
)
serve_parser.set_defaults(func=serve_command_factory)
def __init__(self, pipeline: Pipeline, host: str, port: int, workers: int):
self._pipeline = pipeline
self.host = host
self.port = port
self.workers = workers
if not _serve_dependencies_installed:
raise RuntimeError(
"Using serve command requires FastAPI and unicorn. "
'Please install transformers with [serving]: pip install "transformers[serving]".'
"Or install FastAPI and unicorn separately."
)
else:
logger.info("Serving model over {}:{}".format(host, port))
self._app = FastAPI(
routes=[
APIRoute(
"/",
self.model_info,
response_model=ServeModelInfoResult,
response_class=JSONResponse,
methods=["GET"],
),
APIRoute(
"/tokenize",
self.tokenize,
response_model=ServeTokenizeResult,
response_class=JSONResponse,
methods=["POST"],
),
APIRoute(
"/detokenize",
self.detokenize,
response_model=ServeDeTokenizeResult,
response_class=JSONResponse,
methods=["POST"],
),
APIRoute(
"/forward",
self.forward,
response_model=ServeForwardResult,
response_class=JSONResponse,
methods=["POST"],
),
],
timeout=600,
)
def run(self):
run(self._app, host=self.host, port=self.port, workers=self.workers)
def model_info(self):
return ServeModelInfoResult(infos=vars(self._pipeline.model.config))
def tokenize(self, text_input: str = Body(None, embed=True), return_ids: bool = Body(False, embed=True)):
"""
Tokenize the provided input and eventually returns corresponding tokens id:
- **text_input**: String to tokenize
- **return_ids**: Boolean flags indicating if the tokens have to be converted to their integer mapping.
"""
try:
tokens_txt = self._pipeline.tokenizer.tokenize(text_input)
if return_ids:
tokens_ids = self._pipeline.tokenizer.convert_tokens_to_ids(tokens_txt)
return ServeTokenizeResult(tokens=tokens_txt, tokens_ids=tokens_ids)
else:
return ServeTokenizeResult(tokens=tokens_txt)
except Exception as e:
raise HTTPException(status_code=500, detail={"model": "", "error": str(e)})
def detokenize(
self,
tokens_ids: List[int] = Body(None, embed=True),
skip_special_tokens: bool = Body(False, embed=True),
cleanup_tokenization_spaces: bool = Body(True, embed=True),
):
"""
Detokenize the provided tokens ids to readable text:
- **tokens_ids**: List of tokens ids
- **skip_special_tokens**: Flag indicating to not try to decode special tokens
- **cleanup_tokenization_spaces**: Flag indicating to remove all leading/trailing spaces and intermediate ones.
"""
try:
decoded_str = self._pipeline.tokenizer.decode(tokens_ids, skip_special_tokens, cleanup_tokenization_spaces)
return ServeDeTokenizeResult(model="", text=decoded_str)
except Exception as e:
raise HTTPException(status_code=500, detail={"model": "", "error": str(e)})
async def forward(self, inputs=Body(None, embed=True)):
"""
**inputs**:
**attention_mask**:
**tokens_type_ids**:
"""
# Check we don't have empty string
if len(inputs) == 0:
return ServeForwardResult(output=[], attention=[])
try:
# Forward through the model
output = self._pipeline(inputs)
return ServeForwardResult(output=output)
except Exception as e:
raise HTTPException(500, {"error": str(e)})
| EXA-1-master | exa/models/unilm-master/xtune/src/transformers/commands/serving.py |
from argparse import ArgumentParser
from transformers.commands import BaseTransformersCLICommand
def download_command_factory(args):
return DownloadCommand(args.model, args.cache_dir, args.force)
class DownloadCommand(BaseTransformersCLICommand):
@staticmethod
def register_subcommand(parser: ArgumentParser):
download_parser = parser.add_parser("download")
download_parser.add_argument(
"--cache-dir", type=str, default=None, help="Path to location to store the models"
)
download_parser.add_argument(
"--force", action="store_true", help="Force the model to be download even if already in cache-dir"
)
download_parser.add_argument("model", type=str, help="Name of the model to download")
download_parser.set_defaults(func=download_command_factory)
def __init__(self, model: str, cache: str, force: bool):
self._model = model
self._cache = cache
self._force = force
def run(self):
from transformers import AutoModel, AutoTokenizer
AutoModel.from_pretrained(self._model, cache_dir=self._cache, force_download=self._force)
AutoTokenizer.from_pretrained(self._model, cache_dir=self._cache, force_download=self._force)
| EXA-1-master | exa/models/unilm-master/xtune/src/transformers/commands/download.py |
from argparse import ArgumentParser, Namespace
from logging import getLogger
from transformers.commands import BaseTransformersCLICommand
def convert_command_factory(args: Namespace):
"""
Factory function used to convert a model TF 1.0 checkpoint in a PyTorch checkpoint.
:return: ServeCommand
"""
return ConvertCommand(
args.model_type, args.tf_checkpoint, args.pytorch_dump_output, args.config, args.finetuning_task_name
)
class ConvertCommand(BaseTransformersCLICommand):
@staticmethod
def register_subcommand(parser: ArgumentParser):
"""
Register this command to argparse so it's available for the transformer-cli
:param parser: Root parser to register command-specific arguments
:return:
"""
train_parser = parser.add_parser(
"convert",
help="CLI tool to run convert model from original "
"author checkpoints to Transformers PyTorch checkpoints.",
)
train_parser.add_argument("--model_type", type=str, required=True, help="Model's type.")
train_parser.add_argument(
"--tf_checkpoint", type=str, required=True, help="TensorFlow checkpoint path or folder."
)
train_parser.add_argument(
"--pytorch_dump_output", type=str, required=True, help="Path to the PyTorch savd model output."
)
train_parser.add_argument("--config", type=str, default="", help="Configuration file path or folder.")
train_parser.add_argument(
"--finetuning_task_name",
type=str,
default=None,
help="Optional fine-tuning task name if the TF model was a finetuned model.",
)
train_parser.set_defaults(func=convert_command_factory)
def __init__(
self,
model_type: str,
tf_checkpoint: str,
pytorch_dump_output: str,
config: str,
finetuning_task_name: str,
*args
):
self._logger = getLogger("transformers-cli/converting")
self._logger.info("Loading model {}".format(model_type))
self._model_type = model_type
self._tf_checkpoint = tf_checkpoint
self._pytorch_dump_output = pytorch_dump_output
self._config = config
self._finetuning_task_name = finetuning_task_name
def run(self):
if self._model_type == "bert":
try:
from transformers.convert_bert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
msg = (
"transformers can only be used from the commandline to convert TensorFlow models in PyTorch, "
"In that case, it requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions."
)
raise ImportError(msg)
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint, self._config, self._pytorch_dump_output)
elif self._model_type == "gpt":
from transformers.convert_openai_original_tf_checkpoint_to_pytorch import (
convert_openai_checkpoint_to_pytorch,
)
convert_openai_checkpoint_to_pytorch(self._tf_checkpoint, self._config, self._pytorch_dump_output)
elif self._model_type == "transfo_xl":
try:
from transformers.convert_transfo_xl_original_tf_checkpoint_to_pytorch import (
convert_transfo_xl_checkpoint_to_pytorch,
)
except ImportError:
msg = (
"transformers can only be used from the commandline to convert TensorFlow models in PyTorch, "
"In that case, it requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions."
)
raise ImportError(msg)
if "ckpt" in self._tf_checkpoint.lower():
TF_CHECKPOINT = self._tf_checkpoint
TF_DATASET_FILE = ""
else:
TF_DATASET_FILE = self._tf_checkpoint
TF_CHECKPOINT = ""
convert_transfo_xl_checkpoint_to_pytorch(
TF_CHECKPOINT, self._config, self._pytorch_dump_output, TF_DATASET_FILE
)
elif self._model_type == "gpt2":
try:
from transformers.convert_gpt2_original_tf_checkpoint_to_pytorch import (
convert_gpt2_checkpoint_to_pytorch,
)
except ImportError:
msg = (
"transformers can only be used from the commandline to convert TensorFlow models in PyTorch, "
"In that case, it requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions."
)
raise ImportError(msg)
convert_gpt2_checkpoint_to_pytorch(self._tf_checkpoint, self._config, self._pytorch_dump_output)
elif self._model_type == "xlnet":
try:
from transformers.convert_xlnet_original_tf_checkpoint_to_pytorch import (
convert_xlnet_checkpoint_to_pytorch,
)
except ImportError:
msg = (
"transformers can only be used from the commandline to convert TensorFlow models in PyTorch, "
"In that case, it requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions."
)
raise ImportError(msg)
convert_xlnet_checkpoint_to_pytorch(
self._tf_checkpoint, self._config, self._pytorch_dump_output, self._finetuning_task_name
)
elif self._model_type == "xlm":
from transformers.convert_xlm_original_pytorch_checkpoint_to_pytorch import (
convert_xlm_checkpoint_to_pytorch,
)
convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint, self._pytorch_dump_output)
else:
raise ValueError("--model_type should be selected in the list [bert, gpt, gpt2, transfo_xl, xlnet, xlm]")
| EXA-1-master | exa/models/unilm-master/xtune/src/transformers/commands/convert.py |
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class BaseTransformersCLICommand(ABC):
@staticmethod
@abstractmethod
def register_subcommand(parser: ArgumentParser):
raise NotImplementedError()
@abstractmethod
def run(self):
raise NotImplementedError()
| EXA-1-master | exa/models/unilm-master/xtune/src/transformers/commands/__init__.py |
import os
from argparse import ArgumentParser, Namespace
from logging import getLogger
from transformers import SingleSentenceClassificationProcessor as Processor
from transformers import TextClassificationPipeline, is_tf_available, is_torch_available
from transformers.commands import BaseTransformersCLICommand
if not is_tf_available() and not is_torch_available():
raise RuntimeError("At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training")
# TF training parameters
USE_XLA = False
USE_AMP = False
def train_command_factory(args: Namespace):
"""
Factory function used to instantiate serving server from provided command line arguments.
:return: ServeCommand
"""
return TrainCommand(args)
class TrainCommand(BaseTransformersCLICommand):
@staticmethod
def register_subcommand(parser: ArgumentParser):
"""
Register this command to argparse so it's available for the transformer-cli
:param parser: Root parser to register command-specific arguments
:return:
"""
train_parser = parser.add_parser("train", help="CLI tool to train a model on a task.")
train_parser.add_argument(
"--train_data",
type=str,
required=True,
help="path to train (and optionally evaluation) dataset as a csv with "
"tab separated labels and sentences.",
)
train_parser.add_argument(
"--column_label", type=int, default=0, help="Column of the dataset csv file with example labels."
)
train_parser.add_argument(
"--column_text", type=int, default=1, help="Column of the dataset csv file with example texts."
)
train_parser.add_argument(
"--column_id", type=int, default=2, help="Column of the dataset csv file with example ids."
)
train_parser.add_argument(
"--skip_first_row", action="store_true", help="Skip the first row of the csv file (headers)."
)
train_parser.add_argument("--validation_data", type=str, default="", help="path to validation dataset.")
train_parser.add_argument(
"--validation_split",
type=float,
default=0.1,
help="if validation dataset is not provided, fraction of train dataset " "to use as validation dataset.",
)
train_parser.add_argument("--output", type=str, default="./", help="path to saved the trained model.")
train_parser.add_argument(
"--task", type=str, default="text_classification", help="Task to train the model on."
)
train_parser.add_argument(
"--model", type=str, default="bert-base-uncased", help="Model's name or path to stored model."
)
train_parser.add_argument("--train_batch_size", type=int, default=32, help="Batch size for training.")
train_parser.add_argument("--valid_batch_size", type=int, default=64, help="Batch size for validation.")
train_parser.add_argument("--learning_rate", type=float, default=3e-5, help="Learning rate.")
train_parser.add_argument("--adam_epsilon", type=float, default=1e-08, help="Epsilon for Adam optimizer.")
train_parser.set_defaults(func=train_command_factory)
def __init__(self, args: Namespace):
self.logger = getLogger("transformers-cli/training")
self.framework = "tf" if is_tf_available() else "torch"
os.makedirs(args.output, exist_ok=True)
assert os.path.isdir(args.output)
self.output = args.output
self.column_label = args.column_label
self.column_text = args.column_text
self.column_id = args.column_id
self.logger.info("Loading {} pipeline for {}".format(args.task, args.model))
if args.task == "text_classification":
self.pipeline = TextClassificationPipeline.from_pretrained(args.model)
elif args.task == "token_classification":
raise NotImplementedError
elif args.task == "question_answering":
raise NotImplementedError
self.logger.info("Loading dataset from {}".format(args.train_data))
self.train_dataset = Processor.create_from_csv(
args.train_data,
column_label=args.column_label,
column_text=args.column_text,
column_id=args.column_id,
skip_first_row=args.skip_first_row,
)
self.valid_dataset = None
if args.validation_data:
self.logger.info("Loading validation dataset from {}".format(args.validation_data))
self.valid_dataset = Processor.create_from_csv(
args.validation_data,
column_label=args.column_label,
column_text=args.column_text,
column_id=args.column_id,
skip_first_row=args.skip_first_row,
)
self.validation_split = args.validation_split
self.train_batch_size = args.train_batch_size
self.valid_batch_size = args.valid_batch_size
self.learning_rate = args.learning_rate
self.adam_epsilon = args.adam_epsilon
def run(self):
if self.framework == "tf":
return self.run_tf()
return self.run_torch()
def run_torch(self):
raise NotImplementedError
def run_tf(self):
self.pipeline.fit(
self.train_dataset,
validation_data=self.valid_dataset,
validation_split=self.validation_split,
learning_rate=self.learning_rate,
adam_epsilon=self.adam_epsilon,
train_batch_size=self.train_batch_size,
valid_batch_size=self.valid_batch_size,
)
# Save trained pipeline
self.pipeline.save_pretrained(self.output)
| EXA-1-master | exa/models/unilm-master/xtune/src/transformers/commands/train.py |
# flake8: noqa
# There's no way to ignore "F401 '...' imported but unused" warnings in this
# module, but to preserve other warnings. So, don't check this module at all.
from .metrics import is_sklearn_available
from .processors import (
DataProcessor,
InputExample,
InputFeatures,
SingleSentenceClassificationProcessor,
SquadExample,
SquadFeatures,
SquadV1Processor,
SquadV2Processor,
glue_convert_examples_to_features,
glue_output_modes,
glue_processors,
glue_tasks_num_labels,
xglue_convert_examples_to_features,
xglue_convert_examples_to_vat_features,
xglue_output_modes,
xglue_processors,
xglue_tasks_num_labels,
xtreme_convert_examples_to_features,
xtreme_output_modes,
xtreme_processors,
xtreme_tasks_num_labels,
squad_convert_examples_to_features,
xnli_output_modes,
xnli_processors,
xnli_tasks_num_labels,
)
if is_sklearn_available():
from .metrics import glue_compute_metrics, xnli_compute_metrics, xglue_compute_metrics, xtreme_compute_metrics
| EXA-1-master | exa/models/unilm-master/xtune/src/transformers/data/__init__.py |
# coding=utf-8
# Based on the SQuAD evaluation script from:
# https://github.com/allenai/bi-att-flow/blob/master/squad/evaluate-v1.1.py
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# https://github.com/allenai/bi-att-flow/blob/master/squad/evaluate-v1.1.py
""" Official evaluation script for v1.1 of the SQuAD dataset. """
from __future__ import print_function
from collections import Counter
import string
import re
import argparse
import json
import sys
def normalize_answer(s):
"""Lower text and remove punctuation, articles and extra whitespace."""
def remove_articles(text):
return re.sub(r'\b(a|an|the)\b', ' ', text)
def white_space_fix(text):
return ' '.join(text.split())
def remove_punc(text):
exclude = set(string.punctuation)
return ''.join(ch for ch in text if ch not in exclude)
def lower(text):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(s))))
def f1_score(prediction, ground_truth):
prediction_tokens = normalize_answer(prediction).split()
ground_truth_tokens = normalize_answer(ground_truth).split()
common = Counter(prediction_tokens) & Counter(ground_truth_tokens)
num_same = sum(common.values())
if num_same == 0:
return 0
precision = 1.0 * num_same / len(prediction_tokens)
recall = 1.0 * num_same / len(ground_truth_tokens)
f1 = (2 * precision * recall) / (precision + recall)
return f1
def exact_match_score(prediction, ground_truth):
return (normalize_answer(prediction) == normalize_answer(ground_truth))
def metric_max_over_ground_truths(metric_fn, prediction, ground_truths):
scores_for_ground_truths = []
for ground_truth in ground_truths:
score = metric_fn(prediction, ground_truth)
scores_for_ground_truths.append(score)
return max(scores_for_ground_truths)
def evaluate(dataset, predictions):
f1 = exact_match = total = 0
for article in dataset:
for paragraph in article['paragraphs']:
for qa in paragraph['qas']:
total += 1
if qa['id'] not in predictions:
message = 'Unanswered question ' + qa['id'] + \
' will receive score 0.'
print(message, file=sys.stderr)
continue
ground_truths = list(map(lambda x: x['text'], qa['answers']))
prediction = predictions[qa['id']]
exact_match += metric_max_over_ground_truths(
exact_match_score, prediction, ground_truths)
f1 += metric_max_over_ground_truths(
f1_score, prediction, ground_truths)
exact_match = 100.0 * exact_match / total
f1 = 100.0 * f1 / total
return {'exact_match': exact_match, 'f1': f1}
def evaluate_with_path(dataset_file, prediction_file):
with open(dataset_file) as dataset_file_reader:
dataset_json = json.load(dataset_file_reader)
dataset = dataset_json['data']
with open(prediction_file) as prediction_file_reader:
predictions = json.load(prediction_file_reader)
return evaluate(dataset, predictions)
if __name__ == '__main__':
expected_version = '1.1'
parser = argparse.ArgumentParser(
description='Evaluation for SQuAD ' + expected_version)
parser.add_argument('dataset_file', help='Dataset file')
parser.add_argument('prediction_file', help='Prediction File')
args = parser.parse_args()
with open(args.dataset_file) as dataset_file:
dataset_json = json.load(dataset_file)
if (dataset_json['version'] != expected_version):
print('Evaluation expects v-' + expected_version +
', but got dataset with v-' + dataset_json['version'],
file=sys.stderr)
dataset = dataset_json['data']
with open(args.prediction_file) as prediction_file:
predictions = json.load(prediction_file)
print(json.dumps(evaluate(dataset, predictions))) | EXA-1-master | exa/models/unilm-master/xtune/src/transformers/data/metrics/evaluate_squad.py |
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
""" Official evaluation script for the MLQA dataset. """
from __future__ import print_function
from collections import Counter
import string
import re
import argparse
import json
import sys
import unicodedata
PUNCT = {chr(i) for i in range(sys.maxunicode) if unicodedata.category(chr(i)).startswith('P')}.union(string.punctuation)
WHITESPACE_LANGS = ['en', 'es', 'hi', 'vi', 'de', 'ar']
MIXED_SEGMENTATION_LANGS = ['zh']
def whitespace_tokenize(text):
return text.split()
def mixed_segmentation(text):
segs_out = []
temp_str = ""
for char in text:
if re.search(r'[\u4e00-\u9fa5]', char) or char in PUNCT:
if temp_str != "":
ss = whitespace_tokenize(temp_str)
segs_out.extend(ss)
temp_str = ""
segs_out.append(char)
else:
temp_str += char
if temp_str != "":
ss = whitespace_tokenize(temp_str)
segs_out.extend(ss)
return segs_out
def normalize_answer(s, lang):
"""Lower text and remove punctuation, articles and extra whitespace."""
def remove_articles(text, lang):
if lang == 'en':
return re.sub(r'\b(a|an|the)\b', ' ', text)
elif lang == 'es':
return re.sub(r'\b(un|una|unos|unas|el|la|los|las)\b', ' ', text)
elif lang == 'hi':
return text # Hindi does not have formal articles
elif lang == 'vi':
return re.sub(r'\b(của|là|cái|chiếc|những)\b', ' ', text)
elif lang == 'de':
return re.sub(r'\b(ein|eine|einen|einem|eines|einer|der|die|das|den|dem|des)\b', ' ', text)
elif lang == 'ar':
return re.sub('\sال^|ال', ' ', text)
elif lang == 'zh':
return text # Chinese does not have formal articles
else:
raise Exception('Unknown Language {}'.format(lang))
def white_space_fix(text, lang):
if lang in WHITESPACE_LANGS:
tokens = whitespace_tokenize(text)
elif lang in MIXED_SEGMENTATION_LANGS:
tokens = mixed_segmentation(text)
else:
raise Exception('Unknown Language {}'.format(lang))
return ' '.join([t for t in tokens if t.strip() != ''])
def remove_punc(text):
return ''.join(ch for ch in text if ch not in PUNCT)
def lower(text):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(s)), lang), lang)
def f1_score(prediction, ground_truth, lang):
prediction_tokens = normalize_answer(prediction, lang).split()
ground_truth_tokens = normalize_answer(ground_truth, lang).split()
common = Counter(prediction_tokens) & Counter(ground_truth_tokens)
num_same = sum(common.values())
if num_same == 0:
return 0
precision = 1.0 * num_same / len(prediction_tokens)
recall = 1.0 * num_same / len(ground_truth_tokens)
f1 = (2 * precision * recall) / (precision + recall)
return f1
def exact_match_score(prediction, ground_truth, lang):
return (normalize_answer(prediction, lang) == normalize_answer(ground_truth, lang))
def metric_max_over_ground_truths(metric_fn, prediction, ground_truths, lang):
scores_for_ground_truths = []
for ground_truth in ground_truths:
score = metric_fn(prediction, ground_truth, lang)
scores_for_ground_truths.append(score)
return max(scores_for_ground_truths)
def evaluate(dataset, predictions, lang):
f1 = exact_match = total = 0
for article in dataset:
for paragraph in article['paragraphs']:
for qa in paragraph['qas']:
total += 1
if qa['id'] not in predictions:
message = 'Unanswered question ' + qa['id'] + \
' will receive score 0.'
print(message, file=sys.stderr)
continue
ground_truths = list(map(lambda x: x['text'], qa['answers']))
prediction = predictions[qa['id']]
exact_match += metric_max_over_ground_truths(
exact_match_score, prediction, ground_truths, lang)
f1 += metric_max_over_ground_truths(
f1_score, prediction, ground_truths, lang)
exact_match = 100.0 * exact_match / total
f1 = 100.0 * f1 / total
return {'exact_match': exact_match, 'f1': f1}
def evaluate_with_path(dataset_file, prediction_file, answer_language):
with open(dataset_file) as dataset_file_reader:
dataset_json = json.load(dataset_file_reader)
dataset = dataset_json['data']
with open(prediction_file) as prediction_file_reader:
predictions = json.load(prediction_file_reader)
return evaluate(dataset, predictions, answer_language)
if __name__ == '__main__':
expected_version = '1.0'
parser = argparse.ArgumentParser(
description='Evaluation for MLQA ' + expected_version)
parser.add_argument('dataset_file', help='Dataset file')
parser.add_argument('prediction_file', help='Prediction File')
parser.add_argument('answer_language', help='Language code of answer language')
args = parser.parse_args()
with open(args.dataset_file) as dataset_file:
dataset_json = json.load(dataset_file)
if (str(dataset_json['version']) != expected_version):
print('Evaluation expects v-' + expected_version +
', but got dataset with v-' + dataset_json['version'],
file=sys.stderr)
dataset = dataset_json['data']
with open(args.prediction_file) as prediction_file:
predictions = json.load(prediction_file)
print(json.dumps(evaluate(dataset, predictions, args.answer_language)))
| EXA-1-master | exa/models/unilm-master/xtune/src/transformers/data/metrics/mlqa_evaluation_v1.py |
# coding=utf-8
# Based on the MLQA evaluation script from:
# https://github.com/facebookresearch/MLQA/blob/master/mlqa_evaluation_v1.py
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
""" Official evaluation script for the MLQA dataset. """
from __future__ import print_function
from collections import Counter
import string
import re
import argparse
import json
import sys
import unicodedata
PUNCT = {chr(i) for i in range(sys.maxunicode) if unicodedata.category(chr(i)).startswith('P')}.union(
string.punctuation)
WHITESPACE_LANGS = ['en', 'es', 'hi', 'vi', 'de', 'ar']
MIXED_SEGMENTATION_LANGS = ['zh']
def whitespace_tokenize(text):
return text.split()
def mixed_segmentation(text):
segs_out = []
temp_str = ""
for char in text:
if re.search(r'[\u4e00-\u9fa5]', char) or char in PUNCT:
if temp_str != "":
ss = whitespace_tokenize(temp_str)
segs_out.extend(ss)
temp_str = ""
segs_out.append(char)
else:
temp_str += char
if temp_str != "":
ss = whitespace_tokenize(temp_str)
segs_out.extend(ss)
return segs_out
def normalize_answer(s, lang):
"""Lower text and remove punctuation, articles and extra whitespace."""
def remove_articles(text, lang):
if lang == 'en':
return re.sub(r'\b(a|an|the)\b', ' ', text)
elif lang == 'es':
return re.sub(r'\b(un|una|unos|unas|el|la|los|las)\b', ' ', text)
elif lang == 'hi':
return text # Hindi does not have formal articles
elif lang == 'vi':
return re.sub(r'\b(của|là|cái|chiếc|những)\b', ' ', text)
elif lang == 'de':
return re.sub(r'\b(ein|eine|einen|einem|eines|einer|der|die|das|den|dem|des)\b', ' ', text)
elif lang == 'ar':
return re.sub('\sال^|ال', ' ', text)
elif lang == 'zh':
return text # Chinese does not have formal articles
else:
raise Exception('Unknown Language {}'.format(lang))
def white_space_fix(text, lang):
if lang in WHITESPACE_LANGS:
tokens = whitespace_tokenize(text)
elif lang in MIXED_SEGMENTATION_LANGS:
tokens = mixed_segmentation(text)
else:
raise Exception('Unknown Language {}'.format(lang))
return ' '.join([t for t in tokens if t.strip() != ''])
def remove_punc(text):
return ''.join(ch for ch in text if ch not in PUNCT)
def lower(text):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(s)), lang), lang)
def f1_score(prediction, ground_truth, lang):
prediction_tokens = normalize_answer(prediction, lang).split()
ground_truth_tokens = normalize_answer(ground_truth, lang).split()
common = Counter(prediction_tokens) & Counter(ground_truth_tokens)
num_same = sum(common.values())
if num_same == 0:
return 0
precision = 1.0 * num_same / len(prediction_tokens)
recall = 1.0 * num_same / len(ground_truth_tokens)
f1 = (2 * precision * recall) / (precision + recall)
return f1
def exact_match_score(prediction, ground_truth, lang):
return (normalize_answer(prediction, lang) == normalize_answer(ground_truth, lang))
def metric_max_over_ground_truths(metric_fn, prediction, ground_truths, lang):
scores_for_ground_truths = []
for ground_truth in ground_truths:
score = metric_fn(prediction, ground_truth, lang)
scores_for_ground_truths.append(score)
return max(scores_for_ground_truths)
def evaluate(dataset, predictions, lang):
f1 = exact_match = total = 0
for article in dataset:
for paragraph in article['paragraphs']:
for qa in paragraph['qas']:
total += 1
if qa['id'] not in predictions:
message = 'Unanswered question ' + qa['id'] + \
' will receive score 0.'
print(message, file=sys.stderr)
continue
ground_truths = list(map(lambda x: x['text'], qa['answers']))
prediction = predictions[qa['id']]
exact_match += metric_max_over_ground_truths(
exact_match_score, prediction, ground_truths, lang)
f1 += metric_max_over_ground_truths(
f1_score, prediction, ground_truths, lang)
exact_match = 100.0 * exact_match / total
f1 = 100.0 * f1 / total
return {'exact_match': exact_match, 'f1': f1}
def evaluate_with_path(dataset_file, prediction_file, answer_language):
with open(dataset_file) as dataset_file_reader:
dataset_json = json.load(dataset_file_reader)
dataset = dataset_json['data']
with open(prediction_file) as prediction_file_reader:
predictions = json.load(prediction_file_reader)
return evaluate(dataset, predictions, answer_language)
if __name__ == '__main__':
expected_version = '1.0'
parser = argparse.ArgumentParser(
description='Evaluation for MLQA ' + expected_version)
parser.add_argument('dataset_file', help='Dataset file')
parser.add_argument('prediction_file', help='Prediction File')
parser.add_argument('answer_language', help='Language code of answer language')
args = parser.parse_args()
with open(args.dataset_file) as dataset_file:
dataset_json = json.load(dataset_file)
if (str(dataset_json['version']) != expected_version):
print('Evaluation expects v-' + expected_version +
', but got dataset with v-' + dataset_json['version'],
file=sys.stderr)
dataset = dataset_json['data']
with open(args.prediction_file) as prediction_file:
predictions = json.load(prediction_file)
print(json.dumps(evaluate(dataset, predictions, args.answer_language)))
| EXA-1-master | exa/models/unilm-master/xtune/src/transformers/data/metrics/evaluate_mlqa.py |
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
try:
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import matthews_corrcoef, f1_score, average_precision_score, ndcg_score, roc_auc_score
import numpy as np
_has_sklearn = True
except (AttributeError, ImportError):
_has_sklearn = False
def is_sklearn_available():
return _has_sklearn
if _has_sklearn:
def simple_accuracy(preds, labels):
return (preds == labels).mean()
def simple_ndcg(preds, labels, guids):
ndcgs = []
query2content = {}
for guid, pred, label in zip(guids, preds, labels):
query = guid.split("_")[0]
if not query in query2content:
query2content[query] = [[int(pred)], [int(label)]]
else:
query2content[query][0].append(int(pred))
query2content[query][1].append(int(label))
for key in query2content.keys():
if len(query2content[key][1]) < 2 or len(query2content[key][0]) < 2:
continue
ndcgs.append(ndcg_score(np.asarray([query2content[key][1]]), np.asarray([query2content[key][0]])))
return {"ndcg" : np.array(ndcgs).mean()}
def acc_and_f1(preds, labels):
acc = simple_accuracy(preds, labels)
f1 = f1_score(y_true=labels, y_pred=preds)
return {
"acc": acc,
"f1": f1,
"acc_and_f1": (acc + f1) / 2,
}
def acc_and_auc(preds, labels): # auc of pr curve is equal to average precision
acc = simple_accuracy(preds, labels)
auc = average_precision_score(labels, preds)
return {
"acc": acc,
"auc": auc,
"acc_and_auc": (acc + auc) / 2,
}
def acc_and_roc_auc(preds, labels): # auc of pr curve is equal to average precision
acc = simple_accuracy(preds, labels)
roc_auc = roc_auc_score(labels, preds)
return {
"acc": acc,
"roc_auc": roc_auc,
"acc_and_roc_auc": (acc + roc_auc) / 2,
}
def pearson_and_spearman(preds, labels):
pearson_corr = pearsonr(preds, labels)[0]
spearman_corr = spearmanr(preds, labels)[0]
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
"corr": (pearson_corr + spearman_corr) / 2,
}
def xglue_compute_metrics(task_name, preds, labels, guids):
assert len(preds) == len(labels)
if task_name == "xnli":
return {"acc": simple_accuracy(preds, labels)}
elif task_name == "pawsx":
return acc_and_auc(preds, labels)
elif task_name == "qam":
return acc_and_auc(preds, labels)
elif task_name == "ads":
return acc_and_roc_auc(preds, labels)
elif task_name == "rel":
return simple_ndcg(preds, labels, guids)
elif task_name == "news":
return {"acc": simple_accuracy(preds, labels)}
else:
raise KeyError(task_name)
def xtreme_compute_metrics(task_name, preds, labels, guids):
assert len(preds) == len(labels)
if task_name == "xnli":
return {"acc": simple_accuracy(preds, labels)}
elif task_name == "pawsx":
return acc_and_auc(preds, labels)
else:
raise KeyError(task_name)
def glue_compute_metrics(task_name, preds, labels):
assert len(preds) == len(labels)
if task_name == "cola":
return {"mcc": matthews_corrcoef(labels, preds)}
elif task_name == "sst-2":
return {"acc": simple_accuracy(preds, labels)}
elif task_name == "mrpc":
return acc_and_f1(preds, labels)
elif task_name == "sts-b":
return pearson_and_spearman(preds, labels)
elif task_name == "qqp":
return acc_and_f1(preds, labels)
elif task_name == "mnli":
return {"acc": simple_accuracy(preds, labels)}
elif task_name == "mnli-mm":
return {"acc": simple_accuracy(preds, labels)}
elif task_name == "qnli":
return {"acc": simple_accuracy(preds, labels)}
elif task_name == "rte":
return {"acc": simple_accuracy(preds, labels)}
elif task_name == "wnli":
return {"acc": simple_accuracy(preds, labels)}
elif task_name == "hans":
return {"acc": simple_accuracy(preds, labels)}
else:
raise KeyError(task_name)
def xnli_compute_metrics(task_name, preds, labels):
assert len(preds) == len(labels)
if task_name == "xnli":
return {"acc": simple_accuracy(preds, labels)}
else:
raise KeyError(task_name)
| EXA-1-master | exa/models/unilm-master/xtune/src/transformers/data/metrics/__init__.py |
""" Very heavily inspired by the official evaluation script for SQuAD version 2.0 which was
modified by XLNet authors to update `find_best_threshold` scripts for SQuAD V2.0
In addition to basic functionality, we also compute additional statistics and
plot precision-recall curves if an additional na_prob.json file is provided.
This file is expected to map question ID's to the model's predicted probability
that a question is unanswerable.
"""
import collections
import json
import logging
import math
import re
import string
from transformers.tokenization_bert import BasicTokenizer
logger = logging.getLogger(__name__)
def normalize_answer(s):
"""Lower text and remove punctuation, articles and extra whitespace."""
def remove_articles(text):
regex = re.compile(r"\b(a|an|the)\b", re.UNICODE)
return re.sub(regex, " ", text)
def white_space_fix(text):
return " ".join(text.split())
def remove_punc(text):
exclude = set(string.punctuation)
return "".join(ch for ch in text if ch not in exclude)
def lower(text):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(s))))
def get_tokens(s):
if not s:
return []
return normalize_answer(s).split()
def compute_exact(a_gold, a_pred):
return int(normalize_answer(a_gold) == normalize_answer(a_pred))
def compute_f1(a_gold, a_pred):
gold_toks = get_tokens(a_gold)
pred_toks = get_tokens(a_pred)
common = collections.Counter(gold_toks) & collections.Counter(pred_toks)
num_same = sum(common.values())
if len(gold_toks) == 0 or len(pred_toks) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks)
if num_same == 0:
return 0
precision = 1.0 * num_same / len(pred_toks)
recall = 1.0 * num_same / len(gold_toks)
f1 = (2 * precision * recall) / (precision + recall)
return f1
def get_raw_scores(examples, preds):
"""
Computes the exact and f1 scores from the examples and the model predictions
"""
exact_scores = {}
f1_scores = {}
for example in examples:
qas_id = example.qas_id
gold_answers = [answer["text"] for answer in example.answers if normalize_answer(answer["text"])]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
gold_answers = [""]
if qas_id not in preds:
print("Missing prediction for %s" % qas_id)
continue
prediction = preds[qas_id]
exact_scores[qas_id] = max(compute_exact(a, prediction) for a in gold_answers)
f1_scores[qas_id] = max(compute_f1(a, prediction) for a in gold_answers)
return exact_scores, f1_scores
def apply_no_ans_threshold(scores, na_probs, qid_to_has_ans, na_prob_thresh):
new_scores = {}
for qid, s in scores.items():
pred_na = na_probs[qid] > na_prob_thresh
if pred_na:
new_scores[qid] = float(not qid_to_has_ans[qid])
else:
new_scores[qid] = s
return new_scores
def make_eval_dict(exact_scores, f1_scores, qid_list=None):
if not qid_list:
total = len(exact_scores)
return collections.OrderedDict(
[
("exact", 100.0 * sum(exact_scores.values()) / total),
("f1", 100.0 * sum(f1_scores.values()) / total),
("total", total),
]
)
else:
total = len(qid_list)
return collections.OrderedDict(
[
("exact", 100.0 * sum(exact_scores[k] for k in qid_list) / total),
("f1", 100.0 * sum(f1_scores[k] for k in qid_list) / total),
("total", total),
]
)
def merge_eval(main_eval, new_eval, prefix):
for k in new_eval:
main_eval["%s_%s" % (prefix, k)] = new_eval[k]
def find_best_thresh_v2(preds, scores, na_probs, qid_to_has_ans):
num_no_ans = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k])
cur_score = num_no_ans
best_score = cur_score
best_thresh = 0.0
qid_list = sorted(na_probs, key=lambda k: na_probs[k])
for i, qid in enumerate(qid_list):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
diff = scores[qid]
else:
if preds[qid]:
diff = -1
else:
diff = 0
cur_score += diff
if cur_score > best_score:
best_score = cur_score
best_thresh = na_probs[qid]
has_ans_score, has_ans_cnt = 0, 0
for qid in qid_list:
if not qid_to_has_ans[qid]:
continue
has_ans_cnt += 1
if qid not in scores:
continue
has_ans_score += scores[qid]
return 100.0 * best_score / len(scores), best_thresh, 1.0 * has_ans_score / has_ans_cnt
def find_all_best_thresh_v2(main_eval, preds, exact_raw, f1_raw, na_probs, qid_to_has_ans):
best_exact, exact_thresh, has_ans_exact = find_best_thresh_v2(preds, exact_raw, na_probs, qid_to_has_ans)
best_f1, f1_thresh, has_ans_f1 = find_best_thresh_v2(preds, f1_raw, na_probs, qid_to_has_ans)
main_eval["best_exact"] = best_exact
main_eval["best_exact_thresh"] = exact_thresh
main_eval["best_f1"] = best_f1
main_eval["best_f1_thresh"] = f1_thresh
main_eval["has_ans_exact"] = has_ans_exact
main_eval["has_ans_f1"] = has_ans_f1
def find_best_thresh(preds, scores, na_probs, qid_to_has_ans):
num_no_ans = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k])
cur_score = num_no_ans
best_score = cur_score
best_thresh = 0.0
qid_list = sorted(na_probs, key=lambda k: na_probs[k])
for _, qid in enumerate(qid_list):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
diff = scores[qid]
else:
if preds[qid]:
diff = -1
else:
diff = 0
cur_score += diff
if cur_score > best_score:
best_score = cur_score
best_thresh = na_probs[qid]
return 100.0 * best_score / len(scores), best_thresh
def find_all_best_thresh(main_eval, preds, exact_raw, f1_raw, na_probs, qid_to_has_ans):
best_exact, exact_thresh = find_best_thresh(preds, exact_raw, na_probs, qid_to_has_ans)
best_f1, f1_thresh = find_best_thresh(preds, f1_raw, na_probs, qid_to_has_ans)
main_eval["best_exact"] = best_exact
main_eval["best_exact_thresh"] = exact_thresh
main_eval["best_f1"] = best_f1
main_eval["best_f1_thresh"] = f1_thresh
def squad_evaluate(examples, preds, no_answer_probs=None, no_answer_probability_threshold=1.0):
qas_id_to_has_answer = {example.qas_id: bool(example.answers) for example in examples}
has_answer_qids = [qas_id for qas_id, has_answer in qas_id_to_has_answer.items() if has_answer]
no_answer_qids = [qas_id for qas_id, has_answer in qas_id_to_has_answer.items() if not has_answer]
if no_answer_probs is None:
no_answer_probs = {k: 0.0 for k in preds}
exact, f1 = get_raw_scores(examples, preds)
exact_threshold = apply_no_ans_threshold(
exact, no_answer_probs, qas_id_to_has_answer, no_answer_probability_threshold
)
f1_threshold = apply_no_ans_threshold(f1, no_answer_probs, qas_id_to_has_answer, no_answer_probability_threshold)
evaluation = make_eval_dict(exact_threshold, f1_threshold)
if has_answer_qids:
has_ans_eval = make_eval_dict(exact_threshold, f1_threshold, qid_list=has_answer_qids)
merge_eval(evaluation, has_ans_eval, "HasAns")
if no_answer_qids:
no_ans_eval = make_eval_dict(exact_threshold, f1_threshold, qid_list=no_answer_qids)
merge_eval(evaluation, no_ans_eval, "NoAns")
if no_answer_probs:
find_all_best_thresh(evaluation, preds, exact, f1, no_answer_probs, qas_id_to_has_answer)
return evaluation
def get_final_text(pred_text, orig_text, do_lower_case, verbose_logging=False):
"""Project the tokenized prediction back to the original text."""
# When we created the data, we kept track of the alignment between original
# (whitespace tokenized) tokens and our WordPiece tokenized tokens. So
# now `orig_text` contains the span of our original text corresponding to the
# span that we predicted.
#
# However, `orig_text` may contain extra characters that we don't want in
# our prediction.
#
# For example, let's say:
# pred_text = steve smith
# orig_text = Steve Smith's
#
# We don't want to return `orig_text` because it contains the extra "'s".
#
# We don't want to return `pred_text` because it's already been normalized
# (the SQuAD eval script also does punctuation stripping/lower casing but
# our tokenizer does additional normalization like stripping accent
# characters).
#
# What we really want to return is "Steve Smith".
#
# Therefore, we have to apply a semi-complicated alignment heuristic between
# `pred_text` and `orig_text` to get a character-to-character alignment. This
# can fail in certain cases in which case we just return `orig_text`.
def _strip_spaces(text):
ns_chars = []
ns_to_s_map = collections.OrderedDict()
for (i, c) in enumerate(text):
if c == " ":
continue
ns_to_s_map[len(ns_chars)] = i
ns_chars.append(c)
ns_text = "".join(ns_chars)
return (ns_text, ns_to_s_map)
# We first tokenize `orig_text`, strip whitespace from the result
# and `pred_text`, and check if they are the same length. If they are
# NOT the same length, the heuristic has failed. If they are the same
# length, we assume the characters are one-to-one aligned.
tokenizer = BasicTokenizer(do_lower_case=do_lower_case)
tok_text = " ".join(tokenizer.tokenize(orig_text))
start_position = tok_text.find(pred_text)
if start_position == -1:
if verbose_logging:
logger.info("Unable to find text: '%s' in '%s'" % (pred_text, orig_text))
return orig_text
end_position = start_position + len(pred_text) - 1
(orig_ns_text, orig_ns_to_s_map) = _strip_spaces(orig_text)
(tok_ns_text, tok_ns_to_s_map) = _strip_spaces(tok_text)
if len(orig_ns_text) != len(tok_ns_text):
if verbose_logging:
logger.info("Length not equal after stripping spaces: '%s' vs '%s'", orig_ns_text, tok_ns_text)
return orig_text
# We then project the characters in `pred_text` back to `orig_text` using
# the character-to-character alignment.
tok_s_to_ns_map = {}
for (i, tok_index) in tok_ns_to_s_map.items():
tok_s_to_ns_map[tok_index] = i
orig_start_position = None
if start_position in tok_s_to_ns_map:
ns_start_position = tok_s_to_ns_map[start_position]
if ns_start_position in orig_ns_to_s_map:
orig_start_position = orig_ns_to_s_map[ns_start_position]
if orig_start_position is None:
if verbose_logging:
logger.info("Couldn't map start position")
return orig_text
orig_end_position = None
if end_position in tok_s_to_ns_map:
ns_end_position = tok_s_to_ns_map[end_position]
if ns_end_position in orig_ns_to_s_map:
orig_end_position = orig_ns_to_s_map[ns_end_position]
if orig_end_position is None:
if verbose_logging:
logger.info("Couldn't map end position")
return orig_text
output_text = orig_text[orig_start_position : (orig_end_position + 1)]
return output_text
def _get_best_indexes(logits, n_best_size):
"""Get the n-best logits from a list."""
index_and_score = sorted(enumerate(logits), key=lambda x: x[1], reverse=True)
best_indexes = []
for i in range(len(index_and_score)):
if i >= n_best_size:
break
best_indexes.append(index_and_score[i][0])
return best_indexes
def _compute_softmax(scores):
"""Compute softmax probability over raw logits."""
if not scores:
return []
max_score = None
for score in scores:
if max_score is None or score > max_score:
max_score = score
exp_scores = []
total_sum = 0.0
for score in scores:
x = math.exp(score - max_score)
exp_scores.append(x)
total_sum += x
probs = []
for score in exp_scores:
probs.append(score / total_sum)
return probs
def compute_predictions_logits(
all_examples,
all_features,
all_results,
n_best_size,
max_answer_length,
do_lower_case,
output_prediction_file,
output_nbest_file,
output_null_log_odds_file,
verbose_logging,
version_2_with_negative,
null_score_diff_threshold,
tokenizer,
map_to_origin=True,
):
"""Write final predictions to the json file and log-odds of null if needed."""
logger.info("Writing predictions to: %s" % (output_prediction_file))
logger.info("Writing nbest to: %s" % (output_nbest_file))
example_index_to_features = collections.defaultdict(list)
for feature in all_features:
example_index_to_features[feature.example_index].append(feature)
unique_id_to_result = {}
for result in all_results:
unique_id_to_result[result.unique_id] = result
_PrelimPrediction = collections.namedtuple( # pylint: disable=invalid-name
"PrelimPrediction", ["feature_index", "start_index", "end_index", "start_logit", "end_logit"]
)
all_predictions = collections.OrderedDict()
all_nbest_json = collections.OrderedDict()
scores_diff_json = collections.OrderedDict()
for (example_index, example) in enumerate(all_examples):
features = example_index_to_features[example_index]
prelim_predictions = []
# keep track of the minimum score of null start+end of position 0
score_null = 1000000 # large and positive
min_null_feature_index = 0 # the paragraph slice with min null score
null_start_logit = 0 # the start logit at the slice with min null score
null_end_logit = 0 # the end logit at the slice with min null score
for (feature_index, feature) in enumerate(features):
result = unique_id_to_result[feature.unique_id]
start_indexes = _get_best_indexes(result.start_logits, n_best_size)
end_indexes = _get_best_indexes(result.end_logits, n_best_size)
# if we could have irrelevant answers, get the min score of irrelevant
if version_2_with_negative:
feature_null_score = result.start_logits[0] + result.end_logits[0]
if feature_null_score < score_null:
score_null = feature_null_score
min_null_feature_index = feature_index
null_start_logit = result.start_logits[0]
null_end_logit = result.end_logits[0]
for start_index in start_indexes:
for end_index in end_indexes:
# We could hypothetically create invalid predictions, e.g., predict
# that the start of the span is in the question. We throw out all
# invalid predictions.
if start_index >= len(feature.tokens):
continue
if end_index >= len(feature.tokens):
continue
if start_index not in feature.token_to_orig_map:
continue
if end_index not in feature.token_to_orig_map:
continue
if not feature.token_is_max_context.get(start_index, False):
continue
if end_index < start_index:
continue
length = end_index - start_index + 1
if length > max_answer_length:
continue
prelim_predictions.append(
_PrelimPrediction(
feature_index=feature_index,
start_index=start_index,
end_index=end_index,
start_logit=result.start_logits[start_index],
end_logit=result.end_logits[end_index],
)
)
if version_2_with_negative:
prelim_predictions.append(
_PrelimPrediction(
feature_index=min_null_feature_index,
start_index=0,
end_index=0,
start_logit=null_start_logit,
end_logit=null_end_logit,
)
)
prelim_predictions = sorted(prelim_predictions, key=lambda x: (x.start_logit + x.end_logit), reverse=True)
_NbestPrediction = collections.namedtuple( # pylint: disable=invalid-name
"NbestPrediction", ["text", "start_logit", "end_logit"]
)
seen_predictions = {}
nbest = []
for pred in prelim_predictions:
if len(nbest) >= n_best_size:
break
feature = features[pred.feature_index]
if pred.start_index > 0: # this is a non-null prediction
tok_tokens = feature.tokens[pred.start_index : (pred.end_index + 1)]
orig_doc_start = feature.token_to_orig_map[pred.start_index]
orig_doc_end = feature.token_to_orig_map[pred.end_index]
orig_tokens = example.doc_tokens[orig_doc_start : (orig_doc_end + 1)]
tok_text = tokenizer.convert_tokens_to_string(tok_tokens)
# tok_text = " ".join(tok_tokens)
#
# # De-tokenize WordPieces that have been split off.
# tok_text = tok_text.replace(" ##", "")
# tok_text = tok_text.replace("##", "")
# Clean whitespace
tok_text = tok_text.strip()
tok_text = " ".join(tok_text.split())
orig_text = " ".join(orig_tokens)
final_text = get_final_text(tok_text, orig_text, do_lower_case, verbose_logging)
if not map_to_origin:
final_text = tok_text
if final_text in seen_predictions:
continue
seen_predictions[final_text] = True
else:
final_text = ""
seen_predictions[final_text] = True
nbest.append(_NbestPrediction(text=final_text, start_logit=pred.start_logit, end_logit=pred.end_logit))
# if we didn't include the empty option in the n-best, include it
if version_2_with_negative:
if "" not in seen_predictions:
nbest.append(_NbestPrediction(text="", start_logit=null_start_logit, end_logit=null_end_logit))
# In very rare edge cases we could only have single null prediction.
# So we just create a nonce prediction in this case to avoid failure.
if len(nbest) == 1:
nbest.insert(0, _NbestPrediction(text="empty", start_logit=0.0, end_logit=0.0))
# In very rare edge cases we could have no valid predictions. So we
# just create a nonce prediction in this case to avoid failure.
if not nbest:
nbest.append(_NbestPrediction(text="empty", start_logit=0.0, end_logit=0.0))
assert len(nbest) >= 1
total_scores = []
best_non_null_entry = None
for entry in nbest:
total_scores.append(entry.start_logit + entry.end_logit)
if not best_non_null_entry:
if entry.text:
best_non_null_entry = entry
probs = _compute_softmax(total_scores)
nbest_json = []
for (i, entry) in enumerate(nbest):
output = collections.OrderedDict()
output["text"] = entry.text
output["probability"] = probs[i]
output["start_logit"] = entry.start_logit
output["end_logit"] = entry.end_logit
nbest_json.append(output)
assert len(nbest_json) >= 1
if not version_2_with_negative:
all_predictions[example.qas_id] = nbest_json[0]["text"]
else:
# predict "" iff the null score - the score of best non-null > threshold
score_diff = score_null - best_non_null_entry.start_logit - (best_non_null_entry.end_logit)
scores_diff_json[example.qas_id] = score_diff
if score_diff > null_score_diff_threshold:
all_predictions[example.qas_id] = ""
else:
all_predictions[example.qas_id] = best_non_null_entry.text
all_nbest_json[example.qas_id] = nbest_json
with open(output_prediction_file, "w") as writer:
writer.write(json.dumps(all_predictions, indent=4) + "\n")
with open(output_nbest_file, "w") as writer:
writer.write(json.dumps(all_nbest_json, indent=4) + "\n")
if version_2_with_negative:
with open(output_null_log_odds_file, "w") as writer:
writer.write(json.dumps(scores_diff_json, indent=4) + "\n")
return all_predictions
def compute_predictions_log_probs(
all_examples,
all_features,
all_results,
n_best_size,
max_answer_length,
output_prediction_file,
output_nbest_file,
output_null_log_odds_file,
start_n_top,
end_n_top,
version_2_with_negative,
tokenizer,
verbose_logging,
):
""" XLNet write prediction logic (more complex than Bert's).
Write final predictions to the json file and log-odds of null if needed.
Requires utils_squad_evaluate.py
"""
_PrelimPrediction = collections.namedtuple( # pylint: disable=invalid-name
"PrelimPrediction", ["feature_index", "start_index", "end_index", "start_log_prob", "end_log_prob"]
)
_NbestPrediction = collections.namedtuple( # pylint: disable=invalid-name
"NbestPrediction", ["text", "start_log_prob", "end_log_prob"]
)
logger.info("Writing predictions to: %s", output_prediction_file)
# logger.info("Writing nbest to: %s" % (output_nbest_file))
example_index_to_features = collections.defaultdict(list)
for feature in all_features:
example_index_to_features[feature.example_index].append(feature)
unique_id_to_result = {}
for result in all_results:
unique_id_to_result[result.unique_id] = result
all_predictions = collections.OrderedDict()
all_nbest_json = collections.OrderedDict()
scores_diff_json = collections.OrderedDict()
for (example_index, example) in enumerate(all_examples):
features = example_index_to_features[example_index]
prelim_predictions = []
# keep track of the minimum score of null start+end of position 0
score_null = 1000000 # large and positive
for (feature_index, feature) in enumerate(features):
result = unique_id_to_result[feature.unique_id]
cur_null_score = result.cls_logits
# if we could have irrelevant answers, get the min score of irrelevant
score_null = min(score_null, cur_null_score)
for i in range(start_n_top):
for j in range(end_n_top):
start_log_prob = result.start_logits[i]
start_index = result.start_top_index[i]
j_index = i * end_n_top + j
end_log_prob = result.end_logits[j_index]
end_index = result.end_top_index[j_index]
# We could hypothetically create invalid predictions, e.g., predict
# that the start of the span is in the question. We throw out all
# invalid predictions.
if start_index >= feature.paragraph_len - 1:
continue
if end_index >= feature.paragraph_len - 1:
continue
if not feature.token_is_max_context.get(start_index, False):
continue
if end_index < start_index:
continue
length = end_index - start_index + 1
if length > max_answer_length:
continue
prelim_predictions.append(
_PrelimPrediction(
feature_index=feature_index,
start_index=start_index,
end_index=end_index,
start_log_prob=start_log_prob,
end_log_prob=end_log_prob,
)
)
prelim_predictions = sorted(
prelim_predictions, key=lambda x: (x.start_log_prob + x.end_log_prob), reverse=True
)
seen_predictions = {}
nbest = []
for pred in prelim_predictions:
if len(nbest) >= n_best_size:
break
feature = features[pred.feature_index]
# XLNet un-tokenizer
# Let's keep it simple for now and see if we need all this later.
#
# tok_start_to_orig_index = feature.tok_start_to_orig_index
# tok_end_to_orig_index = feature.tok_end_to_orig_index
# start_orig_pos = tok_start_to_orig_index[pred.start_index]
# end_orig_pos = tok_end_to_orig_index[pred.end_index]
# paragraph_text = example.paragraph_text
# final_text = paragraph_text[start_orig_pos: end_orig_pos + 1].strip()
# Previously used Bert untokenizer
tok_tokens = feature.tokens[pred.start_index : (pred.end_index + 1)]
orig_doc_start = feature.token_to_orig_map[pred.start_index]
orig_doc_end = feature.token_to_orig_map[pred.end_index]
orig_tokens = example.doc_tokens[orig_doc_start : (orig_doc_end + 1)]
tok_text = tokenizer.convert_tokens_to_string(tok_tokens)
# Clean whitespace
tok_text = tok_text.strip()
tok_text = " ".join(tok_text.split())
orig_text = " ".join(orig_tokens)
if hasattr(tokenizer, "do_lower_case"):
do_lower_case = tokenizer.do_lower_case
else:
do_lower_case = tokenizer.do_lowercase_and_remove_accent
final_text = get_final_text(tok_text, orig_text, do_lower_case, verbose_logging)
if final_text in seen_predictions:
continue
seen_predictions[final_text] = True
nbest.append(
_NbestPrediction(text=final_text, start_log_prob=pred.start_log_prob, end_log_prob=pred.end_log_prob)
)
# In very rare edge cases we could have no valid predictions. So we
# just create a nonce prediction in this case to avoid failure.
if not nbest:
nbest.append(_NbestPrediction(text="", start_log_prob=-1e6, end_log_prob=-1e6))
total_scores = []
best_non_null_entry = None
for entry in nbest:
total_scores.append(entry.start_log_prob + entry.end_log_prob)
if not best_non_null_entry:
best_non_null_entry = entry
probs = _compute_softmax(total_scores)
nbest_json = []
for (i, entry) in enumerate(nbest):
output = collections.OrderedDict()
output["text"] = entry.text
output["probability"] = probs[i]
output["start_log_prob"] = entry.start_log_prob
output["end_log_prob"] = entry.end_log_prob
nbest_json.append(output)
assert len(nbest_json) >= 1
assert best_non_null_entry is not None
score_diff = score_null
scores_diff_json[example.qas_id] = score_diff
# note(zhiliny): always predict best_non_null_entry
# and the evaluation script will search for the best threshold
all_predictions[example.qas_id] = best_non_null_entry.text
all_nbest_json[example.qas_id] = nbest_json
with open(output_prediction_file, "w") as writer:
writer.write(json.dumps(all_predictions, indent=4) + "\n")
with open(output_nbest_file, "w") as writer:
writer.write(json.dumps(all_nbest_json, indent=4) + "\n")
if version_2_with_negative:
with open(output_null_log_odds_file, "w") as writer:
writer.write(json.dumps(scores_diff_json, indent=4) + "\n")
return all_predictions
| EXA-1-master | exa/models/unilm-master/xtune/src/transformers/data/metrics/squad_metrics.py |
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" XNLI utils (dataset loading and evaluation) """
import logging
import os
from .utils import DataProcessor, InputExample
logger = logging.getLogger(__name__)
class XnliProcessor(DataProcessor):
"""Processor for the XNLI dataset.
Adapted from https://github.com/google-research/bert/blob/f39e881b169b9d53bea03d2d341b31707a6c052b/run_classifier.py#L207"""
def __init__(self, language, train_language=None):
self.language = language
self.train_language = train_language
def get_train_examples(self, data_dir):
"""See base class."""
lg = self.language if self.train_language is None else self.train_language
lines = self._read_tsv(os.path.join(data_dir, "XNLI-MT-1.0/multinli/multinli.train.{}.tsv".format(lg)))
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % ("train", i)
text_a = line[0]
text_b = line[1]
label = "contradiction" if line[2] == "contradictory" else line[2]
assert isinstance(text_a, str) and isinstance(text_b, str) and isinstance(label, str)
examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
def get_test_examples(self, data_dir):
"""See base class."""
lines = self._read_tsv(os.path.join(data_dir, "XNLI-1.0/xnli.test.tsv"))
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
language = line[0]
if language != self.language:
continue
guid = "%s-%s" % ("test", i)
text_a = line[6]
text_b = line[7]
label = line[1]
assert isinstance(text_a, str) and isinstance(text_b, str) and isinstance(label, str)
examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
def get_labels(self):
"""See base class."""
return ["contradiction", "entailment", "neutral"]
xnli_processors = {
"xnli": XnliProcessor,
}
xnli_output_modes = {
"xnli": "classification",
}
xnli_tasks_num_labels = {
"xnli": 3,
}
| EXA-1-master | exa/models/unilm-master/xtune/src/transformers/data/processors/xnli.py |
# flake8: noqa
# There's no way to ignore "F401 '...' imported but unused" warnings in this
# module, but to preserve other warnings. So, don't check this module at all.
from .xglue import xglue_convert_examples_to_features, xglue_output_modes, xglue_processors, xglue_tasks_num_labels
from .xtreme import xtreme_convert_examples_to_features, xtreme_output_modes, xtreme_processors, xtreme_tasks_num_labels
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels
from .squad import SquadExample, SquadFeatures, SquadV1Processor, SquadV2Processor, squad_convert_examples_to_features
from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor
from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
from .xglue import xglue_convert_examples_to_vat_features
| EXA-1-master | exa/models/unilm-master/xtune/src/transformers/data/processors/__init__.py |
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" GLUE processors and helpers """
import logging
import os
import random
from ...file_utils import is_tf_available
from .utils import DataProcessor, InputExample, InputFeatures
if is_tf_available():
import tensorflow as tf
logger = logging.getLogger(__name__)
def xtreme_convert_examples_to_features(
examples,
tokenizer,
max_length=512,
task=None,
label_list=None,
output_mode=None,
pad_on_left=False,
pad_token=0,
pad_token_segment_id=0,
mask_padding_with_zero=True,
word_dropout_rate=0.0,
):
"""
Loads a data file into a list of ``InputFeatures``
Args:
examples: List of ``InputExamples`` or ``tf.data.Dataset`` containing the examples.
tokenizer: Instance of a tokenizer that will tokenize the examples
max_length: Maximum example length
task: GLUE task
label_list: List of labels. Can be obtained from the processor using the ``processor.get_labels()`` method
output_mode: String indicating the output mode. Either ``regression`` or ``classification``
pad_on_left: If set to ``True``, the examples will be padded on the left rather than on the right (default)
pad_token: Padding token
pad_token_segment_id: The segment ID for the padding token (It is usually 0, but can vary such as for XLNet where it is 4)
mask_padding_with_zero: If set to ``True``, the attention mask will be filled by ``1`` for actual values
and by ``0`` for padded values. If set to ``False``, inverts it (``1`` for padded values, ``0`` for
actual values)
Returns:
If the ``examples`` input is a ``tf.data.Dataset``, will return a ``tf.data.Dataset``
containing the task-specific features. If the input is a list of ``InputExamples``, will return
a list of task-specific ``InputFeatures`` which can be fed to the model.
"""
is_tf_dataset = False
if is_tf_available() and isinstance(examples, tf.data.Dataset):
is_tf_dataset = True
if task is not None:
processor = xtreme_processors[task]()
if label_list is None:
label_list = processor.get_labels()
logger.info("Using label list %s for task %s" % (label_list, task))
if output_mode is None:
output_mode = xtreme_output_modes[task]
logger.info("Using output mode %s for task %s" % (output_mode, task))
label_map = {label: i for i, label in enumerate(label_list)}
features = []
for (ex_index, example) in enumerate(examples):
len_examples = 0
if is_tf_dataset:
example = processor.get_example_from_tensor_dict(example)
example = processor.tfds_map(example)
len_examples = tf.data.experimental.cardinality(examples)
else:
len_examples = len(examples)
if ex_index % 10000 == 0:
logger.info("Writing example %d/%d" % (ex_index, len_examples))
inputs = tokenizer.encode_plus(example.text_a, example.text_b, add_special_tokens=True, max_length=max_length, word_dropout_rate=word_dropout_rate,)
input_ids, token_type_ids = inputs["input_ids"], inputs["token_type_ids"]
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
attention_mask = [1 if mask_padding_with_zero else 0] * len(input_ids)
# Zero-pad up to the sequence length.
padding_length = max_length - len(input_ids)
if pad_on_left:
input_ids = ([pad_token] * padding_length) + input_ids
attention_mask = ([0 if mask_padding_with_zero else 1] * padding_length) + attention_mask
token_type_ids = ([pad_token_segment_id] * padding_length) + token_type_ids
else:
input_ids = input_ids + ([pad_token] * padding_length)
attention_mask = attention_mask + ([0 if mask_padding_with_zero else 1] * padding_length)
token_type_ids = token_type_ids + ([pad_token_segment_id] * padding_length)
assert len(input_ids) == max_length, "Error with input length {} vs {}".format(len(input_ids), max_length)
assert len(attention_mask) == max_length, "Error with input length {} vs {}".format(
len(attention_mask), max_length
)
assert len(token_type_ids) == max_length, "Error with input length {} vs {}".format(
len(token_type_ids), max_length
)
if output_mode == "classification":
label = label_map[example.label]
elif output_mode == "regression":
label = float(example.label)
else:
raise KeyError(output_mode)
if ex_index < 5:
logger.info("*** Example ***")
logger.info("guid: %s" % (example.guid))
logger.info("text a: %s" % (example.text_a))
logger.info("text b: %s" % (example.text_b))
logger.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
logger.info("attention_mask: %s" % " ".join([str(x) for x in attention_mask]))
logger.info("token_type_ids: %s" % " ".join([str(x) for x in token_type_ids]))
logger.info("label: %s (id = %d)" % (example.label, label))
features.append(
InputFeatures(
input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, label=label,
guid=example.guid
)
)
if is_tf_available() and is_tf_dataset:
def gen():
for ex in features:
yield (
{
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
return tf.data.Dataset.from_generator(
gen,
({"input_ids": tf.int32, "attention_mask": tf.int32, "token_type_ids": tf.int32}, tf.int64),
(
{
"input_ids": tf.TensorShape([None]),
"attention_mask": tf.TensorShape([None]),
"token_type_ids": tf.TensorShape([None]),
},
tf.TensorShape([]),
),
)
return features
class PawsxProcessor(DataProcessor):
"""Processor for the PAWS-X data set (XTREME version)."""
def __init__(self, language, train_language=None):
self.language = language
self.train_language = train_language
def get_example_from_tensor_dict(self, tensor_dict):
"""See base class."""
return InputExample(
tensor_dict["idx"].numpy(),
tensor_dict["sentence1"].numpy().decode("utf-8"),
tensor_dict["sentence2"].numpy().decode("utf-8"),
str(tensor_dict["label"].numpy()),
)
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, "train-en.tsv")), "train")
def get_translate_train_examples(self, data_dir):
lg = self.language if self.train_language is None else self.train_language
lines = self._read_tsv(os.path.join(data_dir, "translate-train/en-{}-translated.tsv".format(lg)))
examples = []
for (i, line) in enumerate(lines):
guid = "%s-%s" % ("translate", i)
text_a = line[0]
text_b = line[1]
label = line[-1]
assert isinstance(text_a, str) and isinstance(text_b, str) and isinstance(label, str)
examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
def get_valid_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, "dev-{}.tsv".format(self.language))), "dev")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, "test-{}.tsv".format(self.language))),
"test")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
guid = "%s-%s" % (set_type, i)
text_a = line[0]
text_b = line[1]
label = line[-1]
examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
def get_translate_train_dict(self, data_dir, tgt2src_dict, tgt2src_cnt):
"""See base class."""
lg = self.language if self.train_language is None else self.train_language
lines = self._read_tsv(os.path.join(data_dir, "translate-train/en-{}-translated.tsv".format(lg)))
dict = {}
cnt = {}
for (i, line) in enumerate(lines):
text_a = line[0].strip()
text_b = line[1].strip()
translated_text_a = line[2].strip()
translated_text_b = line[3].strip()
assert isinstance(text_a, str) and isinstance(text_b, str) and \
isinstance(translated_text_a, str) and isinstance(translated_text_b, str)
if text_a not in cnt:
cnt[text_a] = 0
cnt[text_a] += 1
if text_b not in cnt:
cnt[text_b] = 0
cnt[text_b] += 1
if text_a not in dict or random.random() <= 1.0 / cnt[text_a]:
dict[text_a] = translated_text_a
if text_b not in dict or random.random() <= 1.0 / cnt[text_b]:
dict[text_b] = translated_text_b
if translated_text_a not in tgt2src_cnt:
tgt2src_cnt[translated_text_a] = 0
tgt2src_cnt[translated_text_a] += 1
if translated_text_b not in tgt2src_cnt:
tgt2src_cnt[translated_text_b] = 0
tgt2src_cnt[translated_text_b] += 1
if translated_text_a not in tgt2src_dict or random.random() <= 1.0 / tgt2src_cnt[translated_text_a]:
tgt2src_dict[translated_text_a] = text_a
if translated_text_b not in tgt2src_dict or random.random() <= 1.0 / tgt2src_cnt[translated_text_b]:
tgt2src_dict[translated_text_b] = text_b
return dict
class XnliProcessor(DataProcessor):
"""Processor for the XNLI data set (XTREME version)."""
def __init__(self, language, train_language=None):
self.language = language
self.train_language = train_language
def get_example_from_tensor_dict(self, tensor_dict):
"""See base class."""
return InputExample(
tensor_dict["idx"].numpy(),
tensor_dict["sentence"].numpy().decode("utf-8"),
None,
str(tensor_dict["label"].numpy()),
)
def get_train_examples(self, data_dir):
"""See base class."""
lg = self.language if self.train_language is None else self.train_language
lines = self._read_tsv(os.path.join(data_dir, "train-{}.tsv".format(lg)))
examples = []
for (i, line) in enumerate(lines):
guid = "%s-%s" % ("train", i)
text_a = line[0]
text_b = line[1]
label = "contradiction" if line[2] == "contradictory" else line[2]
assert isinstance(text_a, str) and isinstance(text_b, str) and isinstance(label, str)
examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
def get_translate_train_examples(self, data_dir):
lg = self.language if self.train_language is None else self.train_language
lines = self._read_tsv(os.path.join(data_dir, "translate-train/en-{}-translated.tsv".format(lg)))
examples = []
for (i, line) in enumerate(lines):
guid = "%s-%s" % ("train", i)
text_a = line[0]
text_b = line[1]
label = "contradiction" if line[4] == "contradictory" else line[2]
assert isinstance(text_a, str) and isinstance(text_b, str) and isinstance(label, str)
examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
def get_translate_train_dict(self, data_dir, tgt2src_dict, tgt2src_cnt):
"""See base class."""
lg = self.language if self.train_language is None else self.train_language
lines = self._read_tsv(os.path.join(data_dir, "translate-train/en-{}-translated.tsv".format(lg)))
dict = {}
cnt = {}
for (i, line) in enumerate(lines):
text_a = line[0].strip()
text_b = line[1].strip()
translated_text_a = line[2].strip()
translated_text_b = line[3].strip()
assert isinstance(text_a, str) and isinstance(text_b, str) and \
isinstance(translated_text_a, str) and isinstance(translated_text_b, str)
if text_a not in cnt:
cnt[text_a] = 0
cnt[text_a] += 1
if text_b not in cnt:
cnt[text_b] = 0
cnt[text_b] += 1
if text_a not in dict or random.random() <= 1.0 / cnt[text_a]:
dict[text_a] = translated_text_a
if text_b not in dict or random.random() <= 1.0 / cnt[text_b]:
dict[text_b] = translated_text_b
if translated_text_a not in tgt2src_cnt:
tgt2src_cnt[translated_text_a] = 0
tgt2src_cnt[translated_text_a] += 1
if translated_text_b not in tgt2src_cnt:
tgt2src_cnt[translated_text_b] = 0
tgt2src_cnt[translated_text_b] += 1
if translated_text_a not in tgt2src_dict or random.random() <= 1.0 / tgt2src_cnt[translated_text_a]:
tgt2src_dict[translated_text_a] = text_a
if translated_text_b not in tgt2src_dict or random.random() <= 1.0 / tgt2src_cnt[translated_text_b]:
tgt2src_dict[translated_text_b] = text_b
return dict
def get_valid_examples(self, data_dir):
"""See base class."""
return self.get_test_valid_examples(data_dir, "dev")
def get_test_examples(self, data_dir):
return self.get_test_valid_examples(data_dir, "test")
def get_test_valid_examples(self, data_dir, split):
assert split in ["test", "dev"]
lines = self._read_tsv(os.path.join(data_dir, "{}-{}.tsv".format(split, self.language)))
examples = []
for (i, line) in enumerate(lines):
guid = "%s-%s" % (split, i)
text_a = line[0]
text_b = line[1]
label = line[2]
assert isinstance(text_a, str) and isinstance(text_b, str) and isinstance(label, str)
examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
def get_labels(self):
"""See base class."""
return ["contradiction", "entailment", "neutral"]
xtreme_tasks_num_labels = {
"xnli": 3,
"pawsx": 2,
}
xtreme_processors = {
"xnli": XnliProcessor,
"pawsx": PawsxProcessor,
}
xtreme_output_modes = {
"xnli": "classification",
"pawsx": "classification",
}
| EXA-1-master | exa/models/unilm-master/xtune/src/transformers/data/processors/xtreme.py |
import json
import logging
import os
from functools import partial
from multiprocessing import Pool, cpu_count
import numpy as np
from tqdm import tqdm
from ...file_utils import is_tf_available, is_torch_available
from ...tokenization_bert import whitespace_tokenize
from .utils import DataProcessor
from ..metrics.squad_metrics import compute_f1
if is_torch_available():
import torch
from torch.utils.data import TensorDataset
if is_tf_available():
import tensorflow as tf
logger = logging.getLogger(__name__)
def _improve_answer_span(doc_tokens, input_start, input_end, tokenizer, orig_answer_text):
"""Returns tokenized answer spans that better match the annotated answer."""
tok_answer_text = tokenizer.convert_tokens_to_string(tokenizer.tokenize(orig_answer_text))
for new_start in range(input_start, input_end + 1):
for new_end in range(input_end, new_start - 1, -1):
text_span = tokenizer.convert_tokens_to_string(doc_tokens[new_start:(new_end + 1)])
if text_span.strip() == tok_answer_text.strip():
return (new_start, new_end)
max_f1 = 0
max_start = -1
max_end = -1
for new_start in range(input_start, input_end + 1):
for new_end in range(input_end, new_start - 1, -1):
text_span = tokenizer.convert_tokens_to_string(doc_tokens[new_start:(new_end + 1)])
cur_f1 = compute_f1(tok_answer_text.strip(), text_span.strip())
if cur_f1 > max_f1:
max_f1 = cur_f1
max_start = new_start
max_end = new_end
if max_start == -1 and max_end == -1:
max_start = input_start
max_end = input_end
return (max_start, max_end)
def _check_is_max_context(doc_spans, cur_span_index, position):
"""Check if this is the 'max context' doc span for the token."""
best_score = None
best_span_index = None
for (span_index, doc_span) in enumerate(doc_spans):
end = doc_span.start + doc_span.length - 1
if position < doc_span.start:
continue
if position > end:
continue
num_left_context = position - doc_span.start
num_right_context = end - position
score = min(num_left_context, num_right_context) + 0.01 * doc_span.length
if best_score is None or score > best_score:
best_score = score
best_span_index = span_index
return cur_span_index == best_span_index
def _new_check_is_max_context(doc_spans, cur_span_index, position):
"""Check if this is the 'max context' doc span for the token."""
# if len(doc_spans) == 1:
# return True
best_score = None
best_span_index = None
for (span_index, doc_span) in enumerate(doc_spans):
end = doc_span["start"] + doc_span["length"] - 1
if position < doc_span["start"]:
continue
if position > end:
continue
num_left_context = position - doc_span["start"]
num_right_context = end - position
score = min(num_left_context, num_right_context) + 0.01 * doc_span["length"]
if best_score is None or score > best_score:
best_score = score
best_span_index = span_index
return cur_span_index == best_span_index
def _is_whitespace(c):
if c == " " or c == "\t" or c == "\r" or c == "\n" or ord(c) == 0x202F:
return True
return False
def squad_convert_example_to_features(example, max_seq_length, doc_stride, max_query_length, is_training):
features = []
if is_training and not example.is_impossible:
# Get start and end position
start_position = example.start_position
end_position = example.end_position
# If the answer cannot be found in the text, then skip this example.
actual_text = " ".join(example.doc_tokens[start_position: (end_position + 1)])
cleaned_answer_text = " ".join(whitespace_tokenize(example.answer_text))
if actual_text.find(cleaned_answer_text) == -1:
logger.warning("Could not find answer: '%s' vs. '%s'", actual_text, cleaned_answer_text)
return []
tok_to_orig_index = []
orig_to_tok_index = []
all_doc_tokens = []
for (i, token) in enumerate(example.doc_tokens):
orig_to_tok_index.append(len(all_doc_tokens))
sub_tokens = tokenizer.tokenize(token)
for sub_token in sub_tokens:
tok_to_orig_index.append(i)
all_doc_tokens.append(sub_token)
if is_training and not example.is_impossible:
tok_start_position = orig_to_tok_index[example.start_position]
if example.end_position < len(example.doc_tokens) - 1:
tok_end_position = orig_to_tok_index[example.end_position + 1] - 1
else:
tok_end_position = len(all_doc_tokens) - 1
(tok_start_position, tok_end_position) = _improve_answer_span(
all_doc_tokens, tok_start_position, tok_end_position, tokenizer, example.answer_text
)
spans = []
truncated_query = tokenizer.encode(example.question_text, add_special_tokens=False, max_length=max_query_length)
sequence_added_tokens = (
tokenizer.max_len - tokenizer.max_len_single_sentence + 1
if "roberta" in str(type(tokenizer)) or "camembert" in str(type(tokenizer))
else tokenizer.max_len - tokenizer.max_len_single_sentence
)
sequence_pair_added_tokens = tokenizer.max_len - tokenizer.max_len_sentences_pair
span_doc_tokens = all_doc_tokens
while len(spans) * doc_stride < len(all_doc_tokens):
encoded_dict = tokenizer.encode_plus(
truncated_query if tokenizer.padding_side == "right" else span_doc_tokens,
span_doc_tokens if tokenizer.padding_side == "right" else truncated_query,
max_length=max_seq_length,
return_overflowing_tokens=True,
pad_to_max_length=True,
stride=max_seq_length - doc_stride - len(truncated_query) - sequence_pair_added_tokens,
truncation_strategy="only_second" if tokenizer.padding_side == "right" else "only_first",
)
paragraph_len = min(
len(all_doc_tokens) - len(spans) * doc_stride,
max_seq_length - len(truncated_query) - sequence_pair_added_tokens,
)
if tokenizer.pad_token_id in encoded_dict["input_ids"]:
if tokenizer.padding_side == "right":
non_padded_ids = encoded_dict["input_ids"][: encoded_dict["input_ids"].index(tokenizer.pad_token_id)]
else:
last_padding_id_position = (
len(encoded_dict["input_ids"]) - 1 - encoded_dict["input_ids"][::-1].index(
tokenizer.pad_token_id)
)
non_padded_ids = encoded_dict["input_ids"][last_padding_id_position + 1:]
else:
non_padded_ids = encoded_dict["input_ids"]
tokens = tokenizer.convert_ids_to_tokens(non_padded_ids)
token_to_orig_map = {}
for i in range(paragraph_len):
index = len(truncated_query) + sequence_added_tokens + i if tokenizer.padding_side == "right" else i
token_to_orig_map[index] = tok_to_orig_index[len(spans) * doc_stride + i]
encoded_dict["paragraph_len"] = paragraph_len
encoded_dict["tokens"] = tokens
encoded_dict["token_to_orig_map"] = token_to_orig_map
encoded_dict["truncated_query_with_special_tokens_length"] = len(truncated_query) + sequence_added_tokens
encoded_dict["token_is_max_context"] = {}
encoded_dict["start"] = len(spans) * doc_stride
encoded_dict["length"] = paragraph_len
spans.append(encoded_dict)
if "overflowing_tokens" not in encoded_dict:
break
span_doc_tokens = encoded_dict["overflowing_tokens"]
for doc_span_index in range(len(spans)):
for j in range(spans[doc_span_index]["paragraph_len"]):
is_max_context = _new_check_is_max_context(spans, doc_span_index, doc_span_index * doc_stride + j)
index = (
j
if tokenizer.padding_side == "left"
else spans[doc_span_index]["truncated_query_with_special_tokens_length"] + j
)
spans[doc_span_index]["token_is_max_context"][index] = is_max_context
for span in spans:
# Identify the position of the CLS token
cls_index = span["input_ids"].index(tokenizer.cls_token_id)
# p_mask: mask with 1 for token than cannot be in the answer (0 for token which can be in an answer)
# Original TF implem also keep the classification token (set to 0) (not sure why...)
p_mask = np.array(span["token_type_ids"])
p_mask = np.minimum(p_mask, 1)
if tokenizer.padding_side == "right":
# Limit positive values to one
p_mask = 1 - p_mask
p_mask[np.where(np.array(span["input_ids"]) == tokenizer.sep_token_id)[0]] = 1
# Set the CLS index to '0'
p_mask[cls_index] = 0
span_is_impossible = example.is_impossible
start_position = 0
end_position = 0
if is_training and not span_is_impossible:
# For training, if our document chunk does not contain an annotation
# we throw it out, since there is nothing to predict.
doc_start = span["start"]
doc_end = span["start"] + span["length"] - 1
out_of_span = False
if not (tok_start_position >= doc_start and tok_end_position <= doc_end):
out_of_span = True
if out_of_span:
start_position = cls_index
end_position = cls_index
span_is_impossible = True
else:
if tokenizer.padding_side == "left":
doc_offset = 0
else:
doc_offset = len(truncated_query) + sequence_added_tokens
start_position = tok_start_position - doc_start + doc_offset
end_position = tok_end_position - doc_start + doc_offset
features.append(
SquadFeatures(
span["input_ids"],
span["attention_mask"],
span["token_type_ids"],
cls_index,
p_mask.tolist(),
example_index=0,
# Can not set unique_id and example_index here. They will be set after multiple processing.
unique_id=0,
paragraph_len=span["paragraph_len"],
token_is_max_context=span["token_is_max_context"],
tokens=span["tokens"],
token_to_orig_map=span["token_to_orig_map"],
start_position=start_position,
end_position=end_position,
is_impossible=span_is_impossible,
)
)
return features
def squad_convert_example_to_features_init(tokenizer_for_convert):
global tokenizer
tokenizer = tokenizer_for_convert
def squad_convert_examples_to_features(
examples, tokenizer, max_seq_length, doc_stride, max_query_length, is_training, return_dataset=False, threads=1
):
"""
Converts a list of examples into a list of features that can be directly given as input to a model.
It is model-dependant and takes advantage of many of the tokenizer's features to create the model's inputs.
Args:
examples: list of :class:`~transformers.data.processors.squad.SquadExample`
tokenizer: an instance of a child of :class:`~transformers.PreTrainedTokenizer`
max_seq_length: The maximum sequence length of the inputs.
doc_stride: The stride used when the context is too large and is split across several features.
max_query_length: The maximum length of the query.
is_training: whether to create features for model evaluation or model training.
return_dataset: Default False. Either 'pt' or 'tf'.
if 'pt': returns a torch.data.TensorDataset,
if 'tf': returns a tf.data.Dataset
threads: multiple processing threadsa-smi
Returns:
list of :class:`~transformers.data.processors.squad.SquadFeatures`
Example::
processor = SquadV2Processor()
examples = processor.get_dev_examples(data_dir)
features = squad_convert_examples_to_features(
examples=examples,
tokenizer=tokenizer,
max_seq_length=args.max_seq_length,
doc_stride=args.doc_stride,
max_query_length=args.max_query_length,
is_training=not evaluate,
)
"""
# Defining helper methods
features = []
threads = min(threads, cpu_count())
with Pool(threads, initializer=squad_convert_example_to_features_init, initargs=(tokenizer,)) as p:
annotate_ = partial(
squad_convert_example_to_features,
max_seq_length=max_seq_length,
doc_stride=doc_stride,
max_query_length=max_query_length,
is_training=is_training,
)
features = list(
tqdm(
p.imap(annotate_, examples, chunksize=32),
total=len(examples),
desc="convert squad examples to features",
)
)
new_features = []
unique_id = 1000000000
example_index = 0
for example_features in tqdm(features, total=len(features), desc="add example index and unique id"):
if not example_features:
continue
for example_feature in example_features:
example_feature.example_index = example_index
example_feature.unique_id = unique_id
new_features.append(example_feature)
unique_id += 1
example_index += 1
features = new_features
del new_features
if return_dataset == "pt":
if not is_torch_available():
raise RuntimeError("PyTorch must be installed to return a PyTorch dataset.")
# Convert to Tensors and build dataset
all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
all_attention_masks = torch.tensor([f.attention_mask for f in features], dtype=torch.long)
all_token_type_ids = torch.tensor([f.token_type_ids for f in features], dtype=torch.long)
all_cls_index = torch.tensor([f.cls_index for f in features], dtype=torch.long)
all_p_mask = torch.tensor([f.p_mask for f in features], dtype=torch.float)
all_is_impossible = torch.tensor([f.is_impossible for f in features], dtype=torch.float)
if not is_training:
all_example_index = torch.arange(all_input_ids.size(0), dtype=torch.long)
dataset = TensorDataset(
all_input_ids, all_attention_masks, all_token_type_ids, all_example_index, all_cls_index, all_p_mask
)
else:
all_start_positions = torch.tensor([f.start_position for f in features], dtype=torch.long)
all_end_positions = torch.tensor([f.end_position for f in features], dtype=torch.long)
dataset = TensorDataset(
all_input_ids,
all_attention_masks,
all_token_type_ids,
all_start_positions,
all_end_positions,
all_cls_index,
all_p_mask,
all_is_impossible,
)
return features, dataset
elif return_dataset == "tf":
if not is_tf_available():
raise RuntimeError("TensorFlow must be installed to return a TensorFlow dataset.")
def gen():
for ex in features:
yield (
{
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
{
"start_position": ex.start_position,
"end_position": ex.end_position,
"cls_index": ex.cls_index,
"p_mask": ex.p_mask,
"is_impossible": ex.is_impossible,
},
)
return tf.data.Dataset.from_generator(
gen,
(
{"input_ids": tf.int32, "attention_mask": tf.int32, "token_type_ids": tf.int32},
{
"start_position": tf.int64,
"end_position": tf.int64,
"cls_index": tf.int64,
"p_mask": tf.int32,
"is_impossible": tf.int32,
},
),
(
{
"input_ids": tf.TensorShape([None]),
"attention_mask": tf.TensorShape([None]),
"token_type_ids": tf.TensorShape([None]),
},
{
"start_position": tf.TensorShape([]),
"end_position": tf.TensorShape([]),
"cls_index": tf.TensorShape([]),
"p_mask": tf.TensorShape([None]),
"is_impossible": tf.TensorShape([]),
},
),
)
return features
class SquadProcessor(DataProcessor):
"""
Processor for the SQuAD data set.
Overriden by SquadV1Processor and SquadV2Processor, used by the version 1.1 and version 2.0 of SQuAD, respectively.
"""
train_file = None
dev_file = None
def _get_example_from_tensor_dict(self, tensor_dict, evaluate=False):
if not evaluate:
answer = tensor_dict["answers"]["text"][0].numpy().decode("utf-8")
answer_start = tensor_dict["answers"]["answer_start"][0].numpy()
answers = []
else:
answers = [
{"answer_start": start.numpy(), "text": text.numpy().decode("utf-8")}
for start, text in zip(tensor_dict["answers"]["answer_start"], tensor_dict["answers"]["text"])
]
answer = None
answer_start = None
return SquadExample(
qas_id=tensor_dict["id"].numpy().decode("utf-8"),
question_text=tensor_dict["question"].numpy().decode("utf-8"),
context_text=tensor_dict["context"].numpy().decode("utf-8"),
answer_text=answer,
start_position_character=answer_start,
title=tensor_dict["title"].numpy().decode("utf-8"),
answers=answers,
)
def get_examples_from_dataset(self, dataset, evaluate=False):
"""
Creates a list of :class:`~transformers.data.processors.squad.SquadExample` using a TFDS dataset.
Args:
dataset: The tfds dataset loaded from `tensorflow_datasets.load("squad")`
evaluate: boolean specifying if in evaluation mode or in training mode
Returns:
List of SquadExample
Examples::
import tensorflow_datasets as tfds
dataset = tfds.load("squad")
training_examples = get_examples_from_dataset(dataset, evaluate=False)
evaluation_examples = get_examples_from_dataset(dataset, evaluate=True)
"""
if evaluate:
dataset = dataset["validation"]
else:
dataset = dataset["train"]
examples = []
for tensor_dict in tqdm(dataset):
examples.append(self._get_example_from_tensor_dict(tensor_dict, evaluate=evaluate))
return examples
def get_train_examples(self, data_dir, filename=None):
"""
Returns the training examples from the data directory.
Args:
data_dir: Directory containing the data files used for training and evaluating.
filename: None by default, specify this if the training file has a different name than the original one
which is `train-v1.1.json` and `train-v2.0.json` for squad versions 1.1 and 2.0 respectively.
"""
if data_dir is None:
data_dir = ""
if self.train_file is None:
raise ValueError("SquadProcessor should be instantiated via SquadV1Processor or SquadV2Processor")
with open(
os.path.join(data_dir, self.train_file if filename is None else filename), "r", encoding="utf-8"
) as reader:
input_data = json.load(reader)["data"]
return self._create_examples(input_data, "train")
def get_dev_examples(self, data_dir, filename=None):
"""
Returns the evaluation example from the data directory.
Args:
data_dir: Directory containing the data files used for training and evaluating.
filename: None by default, specify this if the evaluation file has a different name than the original one
which is `train-v1.1.json` and `train-v2.0.json` for squad versions 1.1 and 2.0 respectively.
"""
if data_dir is None:
data_dir = ""
if self.dev_file is None:
raise ValueError("SquadProcessor should be instantiated via SquadV1Processor or SquadV2Processor")
with open(
os.path.join(data_dir, self.dev_file if filename is None else filename), "r", encoding="utf-8"
) as reader:
input_data = json.load(reader)["data"]
return self._create_examples(input_data, "dev")
def _create_examples(self, input_data, set_type):
is_training = set_type == "train"
examples = []
for entry in tqdm(input_data):
# print(entry.keys())
if "title" in entry:
title = entry["title"]
else:
title = "no_title"
for paragraph in entry["paragraphs"]:
context_text = paragraph["context"]
for qa in paragraph["qas"]:
qas_id = qa["id"]
question_text = qa["question"]
start_position_character = None
answer_text = None
answers = []
if "is_impossible" in qa:
is_impossible = qa["is_impossible"]
else:
is_impossible = False
if not is_impossible:
if is_training:
answer = qa["answers"][0]
answer_text = answer["text"]
start_position_character = answer["answer_start"]
else:
answers = qa["answers"]
example = SquadExample(
qas_id=qas_id,
question_text=question_text,
context_text=context_text,
answer_text=answer_text,
start_position_character=start_position_character,
title=title,
is_impossible=is_impossible,
answers=answers,
)
examples.append(example)
return examples
class SquadV1Processor(SquadProcessor):
train_file = "train-v1.1.json"
dev_file = "dev-v1.1.json"
def get_dataset_path(self, data_dir, split, language):
assert split == "dev"
return os.path.join(data_dir, self.dev_file)
class SquadV2Processor(SquadProcessor):
train_file = "train-v2.0.json"
dev_file = "dev-v2.0.json"
class MLQAProcessor(SquadProcessor):
train_file = "squad1.1/train-v1.1.json"
dev_file = "squad1.1/dev-v1.1.json"
def get_dataset_path(self, data_dir, split, language):
if split == "train":
return os.path.join(data_dir, self.train_file)
else:
return os.path.join(data_dir, "MLQA_V1", split, "{0}-context-{1}-question-{1}.json".format(split, language))
def get_dev_examples_by_language(self, data_dir, language):
return self.get_dev_examples(data_dir, "MLQA_V1/dev/dev-context-{0}-question-{0}.json".format(language))
def get_test_examples_by_language(self, data_dir, language):
return self.get_dev_examples(data_dir, "MLQA_V1/test/test-context-{0}-question-{0}.json".format(language))
class XQuADProcessor(SquadProcessor):
train_file = "squad1.1/train-v1.1.json"
dev_file = "squad1.1/dev-v1.1.json"
def get_dataset_path(self, data_dir, split, language):
if split == "train":
return os.path.join(data_dir, self.train_file)
else:
return os.path.join(data_dir, "xquad.{0}.json".format(language))
def get_dev_examples_by_language(self, data_dir, language):
return self.get_dev_examples(data_dir, "xquad.{0}.json".format(language))
def get_test_examples_by_language(self, data_dir, language):
return self.get_dev_examples(data_dir, "xquad.{0}.json".format(language))
class TyDiQAProcessor(SquadProcessor):
train_file = "tydiqa-goldp-v1.1-train/tydiqa.en.train.json"
dev_file = "tydiqa-goldp-v1.1-dev/tydiqa.en.dev.json"
def get_dataset_path(self, data_dir, split, language):
if split == "train":
return os.path.join(data_dir, self.train_file)
else:
return os.path.join(data_dir, "tydiqa-goldp-v1.1-dev/tydiqa.{}.dev.json".format(language))
def get_dev_examples_by_language(self, data_dir, language):
return self.get_dev_examples(data_dir, "tydiqa-goldp-v1.1-dev/tydiqa.{}.dev.json".format(language))
def get_test_examples_by_language(self, data_dir, language):
return self.get_dev_examples(data_dir, "tydiqa-goldp-v1.1-dev/tydiqa.{}.dev.json".format(language))
class SquadExample(object):
"""
A single training/test example for the Squad dataset, as loaded from disk.
Args:
qas_id: The example's unique identifier
question_text: The question string
context_text: The context string
answer_text: The answer string
start_position_character: The character position of the start of the answer
title: The title of the example
answers: None by default, this is used during evaluation. Holds answers as well as their start positions.
is_impossible: False by default, set to True if the example has no possible answer.
"""
def __init__(
self,
qas_id,
question_text,
context_text,
answer_text,
start_position_character,
title,
answers=[],
is_impossible=False,
):
self.qas_id = qas_id
self.question_text = question_text
self.context_text = context_text
self.answer_text = answer_text
self.title = title
self.is_impossible = is_impossible
self.answers = answers
self.start_position, self.end_position = 0, 0
doc_tokens = []
char_to_word_offset = []
prev_is_whitespace = True
# Split on whitespace so that different tokens may be attributed to their original position.
for c in self.context_text:
if _is_whitespace(c):
prev_is_whitespace = True
else:
if prev_is_whitespace:
doc_tokens.append(c)
else:
doc_tokens[-1] += c
prev_is_whitespace = False
char_to_word_offset.append(len(doc_tokens) - 1)
self.doc_tokens = doc_tokens
self.char_to_word_offset = char_to_word_offset
# Start and end positions only has a value during evaluation.
if start_position_character is not None and not is_impossible:
self.start_position = char_to_word_offset[start_position_character]
self.end_position = char_to_word_offset[
min(start_position_character + len(answer_text) - 1, len(char_to_word_offset) - 1)
]
class SquadFeatures(object):
"""
Single squad example features to be fed to a model.
Those features are model-specific and can be crafted from :class:`~transformers.data.processors.squad.SquadExample`
using the :method:`~transformers.data.processors.squad.squad_convert_examples_to_features` method.
Args:
input_ids: Indices of input sequence tokens in the vocabulary.
attention_mask: Mask to avoid performing attention on padding token indices.
token_type_ids: Segment token indices to indicate first and second portions of the inputs.
cls_index: the index of the CLS token.
p_mask: Mask identifying tokens that can be answers vs. tokens that cannot.
Mask with 1 for tokens than cannot be in the answer and 0 for token that can be in an answer
example_index: the index of the example
unique_id: The unique Feature identifier
paragraph_len: The length of the context
token_is_max_context: List of booleans identifying which tokens have their maximum context in this feature object.
If a token does not have their maximum context in this feature object, it means that another feature object
has more information related to that token and should be prioritized over this feature for that token.
tokens: list of tokens corresponding to the input ids
token_to_orig_map: mapping between the tokens and the original text, needed in order to identify the answer.
start_position: start of the answer token index
end_position: end of the answer token index
"""
def __init__(
self,
input_ids,
attention_mask,
token_type_ids,
cls_index,
p_mask,
example_index,
unique_id,
paragraph_len,
token_is_max_context,
tokens,
token_to_orig_map,
start_position,
end_position,
is_impossible,
):
self.input_ids = input_ids
self.attention_mask = attention_mask
self.token_type_ids = token_type_ids
self.cls_index = cls_index
self.p_mask = p_mask
self.example_index = example_index
self.unique_id = unique_id
self.paragraph_len = paragraph_len
self.token_is_max_context = token_is_max_context
self.tokens = tokens
self.token_to_orig_map = token_to_orig_map
self.start_position = start_position
self.end_position = end_position
self.is_impossible = is_impossible
class SquadResult(object):
"""
Constructs a SquadResult which can be used to evaluate a model's output on the SQuAD dataset.
Args:
unique_id: The unique identifier corresponding to that example.
start_logits: The logits corresponding to the start of the answer
end_logits: The logits corresponding to the end of the answer
"""
def __init__(self, unique_id, start_logits, end_logits, start_top_index=None, end_top_index=None, cls_logits=None):
self.start_logits = start_logits
self.end_logits = end_logits
self.unique_id = unique_id
if start_top_index:
self.start_top_index = start_top_index
self.end_top_index = end_top_index
self.cls_logits = cls_logits
| EXA-1-master | exa/models/unilm-master/xtune/src/transformers/data/processors/squad.py |
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" GLUE processors and helpers """
import logging
import os
from ...file_utils import is_tf_available
from .utils import DataProcessor, InputExample, InputFeatures
if is_tf_available():
import tensorflow as tf
logger = logging.getLogger(__name__)
def glue_convert_examples_to_features(
examples,
tokenizer,
max_length=512,
task=None,
label_list=None,
output_mode=None,
pad_on_left=False,
pad_token=0,
pad_token_segment_id=0,
mask_padding_with_zero=True,
):
"""
Loads a data file into a list of ``InputFeatures``
Args:
examples: List of ``InputExamples`` or ``tf.data.Dataset`` containing the examples.
tokenizer: Instance of a tokenizer that will tokenize the examples
max_length: Maximum example length
task: GLUE task
label_list: List of labels. Can be obtained from the processor using the ``processor.get_labels()`` method
output_mode: String indicating the output mode. Either ``regression`` or ``classification``
pad_on_left: If set to ``True``, the examples will be padded on the left rather than on the right (default)
pad_token: Padding token
pad_token_segment_id: The segment ID for the padding token (It is usually 0, but can vary such as for XLNet where it is 4)
mask_padding_with_zero: If set to ``True``, the attention mask will be filled by ``1`` for actual values
and by ``0`` for padded values. If set to ``False``, inverts it (``1`` for padded values, ``0`` for
actual values)
Returns:
If the ``examples`` input is a ``tf.data.Dataset``, will return a ``tf.data.Dataset``
containing the task-specific features. If the input is a list of ``InputExamples``, will return
a list of task-specific ``InputFeatures`` which can be fed to the model.
"""
is_tf_dataset = False
if is_tf_available() and isinstance(examples, tf.data.Dataset):
is_tf_dataset = True
if task is not None:
processor = glue_processors[task]()
if label_list is None:
label_list = processor.get_labels()
logger.info("Using label list %s for task %s" % (label_list, task))
if output_mode is None:
output_mode = glue_output_modes[task]
logger.info("Using output mode %s for task %s" % (output_mode, task))
label_map = {label: i for i, label in enumerate(label_list)}
features = []
for (ex_index, example) in enumerate(examples):
len_examples = 0
if is_tf_dataset:
example = processor.get_example_from_tensor_dict(example)
example = processor.tfds_map(example)
len_examples = tf.data.experimental.cardinality(examples)
else:
len_examples = len(examples)
if ex_index % 10000 == 0:
logger.info("Writing example %d/%d" % (ex_index, len_examples))
inputs = tokenizer.encode_plus(example.text_a, example.text_b, add_special_tokens=True, max_length=max_length,)
input_ids, token_type_ids = inputs["input_ids"], inputs["token_type_ids"]
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
attention_mask = [1 if mask_padding_with_zero else 0] * len(input_ids)
# Zero-pad up to the sequence length.
padding_length = max_length - len(input_ids)
if pad_on_left:
input_ids = ([pad_token] * padding_length) + input_ids
attention_mask = ([0 if mask_padding_with_zero else 1] * padding_length) + attention_mask
token_type_ids = ([pad_token_segment_id] * padding_length) + token_type_ids
else:
input_ids = input_ids + ([pad_token] * padding_length)
attention_mask = attention_mask + ([0 if mask_padding_with_zero else 1] * padding_length)
token_type_ids = token_type_ids + ([pad_token_segment_id] * padding_length)
assert len(input_ids) == max_length, "Error with input length {} vs {}".format(len(input_ids), max_length)
assert len(attention_mask) == max_length, "Error with input length {} vs {}".format(
len(attention_mask), max_length
)
assert len(token_type_ids) == max_length, "Error with input length {} vs {}".format(
len(token_type_ids), max_length
)
if output_mode == "classification":
label = label_map[example.label]
elif output_mode == "regression":
label = float(example.label)
else:
raise KeyError(output_mode)
if ex_index < 5:
logger.info("*** Example ***")
logger.info("guid: %s" % (example.guid))
logger.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
logger.info("attention_mask: %s" % " ".join([str(x) for x in attention_mask]))
logger.info("token_type_ids: %s" % " ".join([str(x) for x in token_type_ids]))
logger.info("label: %s (id = %d)" % (example.label, label))
features.append(
InputFeatures(
input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, label=label
)
)
if is_tf_available() and is_tf_dataset:
def gen():
for ex in features:
yield (
{
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
return tf.data.Dataset.from_generator(
gen,
({"input_ids": tf.int32, "attention_mask": tf.int32, "token_type_ids": tf.int32}, tf.int64),
(
{
"input_ids": tf.TensorShape([None]),
"attention_mask": tf.TensorShape([None]),
"token_type_ids": tf.TensorShape([None]),
},
tf.TensorShape([]),
),
)
return features
class MrpcProcessor(DataProcessor):
"""Processor for the MRPC data set (GLUE version)."""
def get_example_from_tensor_dict(self, tensor_dict):
"""See base class."""
return InputExample(
tensor_dict["idx"].numpy(),
tensor_dict["sentence1"].numpy().decode("utf-8"),
tensor_dict["sentence2"].numpy().decode("utf-8"),
str(tensor_dict["label"].numpy()),
)
def get_train_examples(self, data_dir):
"""See base class."""
logger.info("LOOKING AT {}".format(os.path.join(data_dir, "train.tsv")))
return self._create_examples(self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, i)
text_a = line[3]
text_b = line[4]
label = line[0]
examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class MnliProcessor(DataProcessor):
"""Processor for the MultiNLI data set (GLUE version)."""
def get_example_from_tensor_dict(self, tensor_dict):
"""See base class."""
return InputExample(
tensor_dict["idx"].numpy(),
tensor_dict["premise"].numpy().decode("utf-8"),
tensor_dict["hypothesis"].numpy().decode("utf-8"),
str(tensor_dict["label"].numpy()),
)
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, "dev_matched.tsv")), "dev_matched")
def get_labels(self):
"""See base class."""
return ["contradiction", "entailment", "neutral"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, line[0])
text_a = line[8]
text_b = line[9]
label = line[-1]
examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class MnliMismatchedProcessor(MnliProcessor):
"""Processor for the MultiNLI Mismatched data set (GLUE version)."""
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, "dev_mismatched.tsv")), "dev_matched")
class ColaProcessor(DataProcessor):
"""Processor for the CoLA data set (GLUE version)."""
def get_example_from_tensor_dict(self, tensor_dict):
"""See base class."""
return InputExample(
tensor_dict["idx"].numpy(),
tensor_dict["sentence"].numpy().decode("utf-8"),
None,
str(tensor_dict["label"].numpy()),
)
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
guid = "%s-%s" % (set_type, i)
text_a = line[3]
label = line[1]
examples.append(InputExample(guid=guid, text_a=text_a, text_b=None, label=label))
return examples
class Sst2Processor(DataProcessor):
"""Processor for the SST-2 data set (GLUE version)."""
def get_example_from_tensor_dict(self, tensor_dict):
"""See base class."""
return InputExample(
tensor_dict["idx"].numpy(),
tensor_dict["sentence"].numpy().decode("utf-8"),
None,
str(tensor_dict["label"].numpy()),
)
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, i)
text_a = line[0]
label = line[1]
examples.append(InputExample(guid=guid, text_a=text_a, text_b=None, label=label))
return examples
class StsbProcessor(DataProcessor):
"""Processor for the STS-B data set (GLUE version)."""
def get_example_from_tensor_dict(self, tensor_dict):
"""See base class."""
return InputExample(
tensor_dict["idx"].numpy(),
tensor_dict["sentence1"].numpy().decode("utf-8"),
tensor_dict["sentence2"].numpy().decode("utf-8"),
str(tensor_dict["label"].numpy()),
)
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_labels(self):
"""See base class."""
return [None]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, line[0])
text_a = line[7]
text_b = line[8]
label = line[-1]
examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class QqpProcessor(DataProcessor):
"""Processor for the QQP data set (GLUE version)."""
def get_example_from_tensor_dict(self, tensor_dict):
"""See base class."""
return InputExample(
tensor_dict["idx"].numpy(),
tensor_dict["question1"].numpy().decode("utf-8"),
tensor_dict["question2"].numpy().decode("utf-8"),
str(tensor_dict["label"].numpy()),
)
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, line[0])
try:
text_a = line[3]
text_b = line[4]
label = line[5]
except IndexError:
continue
examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class QnliProcessor(DataProcessor):
"""Processor for the QNLI data set (GLUE version)."""
def get_example_from_tensor_dict(self, tensor_dict):
"""See base class."""
return InputExample(
tensor_dict["idx"].numpy(),
tensor_dict["question"].numpy().decode("utf-8"),
tensor_dict["sentence"].numpy().decode("utf-8"),
str(tensor_dict["label"].numpy()),
)
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev_matched")
def get_labels(self):
"""See base class."""
return ["entailment", "not_entailment"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, line[0])
text_a = line[1]
text_b = line[2]
label = line[-1]
examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class RteProcessor(DataProcessor):
"""Processor for the RTE data set (GLUE version)."""
def get_example_from_tensor_dict(self, tensor_dict):
"""See base class."""
return InputExample(
tensor_dict["idx"].numpy(),
tensor_dict["sentence1"].numpy().decode("utf-8"),
tensor_dict["sentence2"].numpy().decode("utf-8"),
str(tensor_dict["label"].numpy()),
)
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_labels(self):
"""See base class."""
return ["entailment", "not_entailment"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, line[0])
text_a = line[1]
text_b = line[2]
label = line[-1]
examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class WnliProcessor(DataProcessor):
"""Processor for the WNLI data set (GLUE version)."""
def get_example_from_tensor_dict(self, tensor_dict):
"""See base class."""
return InputExample(
tensor_dict["idx"].numpy(),
tensor_dict["sentence1"].numpy().decode("utf-8"),
tensor_dict["sentence2"].numpy().decode("utf-8"),
str(tensor_dict["label"].numpy()),
)
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, line[0])
text_a = line[1]
text_b = line[2]
label = line[-1]
examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
glue_tasks_num_labels = {
"cola": 2,
"mnli": 3,
"mrpc": 2,
"sst-2": 2,
"sts-b": 1,
"qqp": 2,
"qnli": 2,
"rte": 2,
"wnli": 2,
}
glue_processors = {
"cola": ColaProcessor,
"mnli": MnliProcessor,
"mnli-mm": MnliMismatchedProcessor,
"mrpc": MrpcProcessor,
"sst-2": Sst2Processor,
"sts-b": StsbProcessor,
"qqp": QqpProcessor,
"qnli": QnliProcessor,
"rte": RteProcessor,
"wnli": WnliProcessor,
}
glue_output_modes = {
"cola": "classification",
"mnli": "classification",
"mnli-mm": "classification",
"mrpc": "classification",
"sst-2": "classification",
"sts-b": "regression",
"qqp": "classification",
"qnli": "classification",
"rte": "classification",
"wnli": "classification",
}
| EXA-1-master | exa/models/unilm-master/xtune/src/transformers/data/processors/glue.py |
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import csv
import json
import logging
import sys
from ...file_utils import is_tf_available, is_torch_available
csv.field_size_limit(sys.maxsize)
logger = logging.getLogger(__name__)
class InputExample(object):
"""
A single training/test example for simple sequence classification.
Args:
guid: Unique id for the example.
text_a: string. The untokenized text of the first sequence. For single
sequence tasks, only this sequence must be specified.
text_b: (Optional) string. The untokenized text of the second sequence.
Only must be specified for sequence pair tasks.
label: (Optional) string. The label of the example. This should be
specified for train and dev examples, but not for test examples.
"""
def __init__(self, guid, text_a, text_b=None, label=None):
self.guid = guid
self.text_a = text_a
self.text_b = text_b
self.label = label
def __repr__(self):
return str(self.to_json_string())
def to_dict(self):
"""Serializes this instance to a Python dictionary."""
output = copy.deepcopy(self.__dict__)
return output
def to_json_string(self):
"""Serializes this instance to a JSON string."""
return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
class InputFeatures(object):
"""
A single set of features of data.
Args:
input_ids: Indices of input sequence tokens in the vocabulary.
attention_mask: Mask to avoid performing attention on padding token indices.
Mask values selected in ``[0, 1]``:
Usually ``1`` for tokens that are NOT MASKED, ``0`` for MASKED (padded) tokens.
token_type_ids: Segment token indices to indicate first and second portions of the inputs.
label: Label corresponding to the input
"""
def __init__(self, input_ids, attention_mask=None, token_type_ids=None, label=None, guid=None):
self.input_ids = input_ids
self.attention_mask = attention_mask
self.token_type_ids = token_type_ids
self.label = label
self.guid = guid
def __repr__(self):
return str(self.to_json_string())
def to_dict(self):
"""Serializes this instance to a Python dictionary."""
output = copy.deepcopy(self.__dict__)
return output
def to_json_string(self):
"""Serializes this instance to a JSON string."""
return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
class DataProcessor(object):
"""Base class for data converters for sequence classification data sets."""
def get_example_from_tensor_dict(self, tensor_dict):
"""Gets an example from a dict with tensorflow tensors
Args:
tensor_dict: Keys and values should match the corresponding Glue
tensorflow_dataset examples.
"""
raise NotImplementedError()
def get_train_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the train set."""
raise NotImplementedError()
def get_dev_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the dev set."""
raise NotImplementedError()
def get_labels(self):
"""Gets the list of labels for this data set."""
raise NotImplementedError()
def tfds_map(self, example):
"""Some tensorflow_datasets datasets are not formatted the same way the GLUE datasets are.
This method converts examples to the correct format."""
if len(self.get_labels()) > 1:
example.label = self.get_labels()[int(example.label)]
return example
@classmethod
def _read_tsv(cls, input_file, quotechar=None):
"""Reads a tab separated value file."""
with open(input_file, "r", encoding="utf-8-sig") as f:
return list(csv.reader(f, delimiter="\t", quotechar=quotechar))
class SingleSentenceClassificationProcessor(DataProcessor):
""" Generic processor for a single sentence classification data set."""
def __init__(self, labels=None, examples=None, mode="classification", verbose=False):
self.labels = [] if labels is None else labels
self.examples = [] if examples is None else examples
self.mode = mode
self.verbose = verbose
def __len__(self):
return len(self.examples)
def __getitem__(self, idx):
if isinstance(idx, slice):
return SingleSentenceClassificationProcessor(labels=self.labels, examples=self.examples[idx])
return self.examples[idx]
@classmethod
def create_from_csv(
cls, file_name, split_name="", column_label=0, column_text=1, column_id=None, skip_first_row=False, **kwargs
):
processor = cls(**kwargs)
processor.add_examples_from_csv(
file_name,
split_name=split_name,
column_label=column_label,
column_text=column_text,
column_id=column_id,
skip_first_row=skip_first_row,
overwrite_labels=True,
overwrite_examples=True,
)
return processor
@classmethod
def create_from_examples(cls, texts_or_text_and_labels, labels=None, **kwargs):
processor = cls(**kwargs)
processor.add_examples(texts_or_text_and_labels, labels=labels)
return processor
def add_examples_from_csv(
self,
file_name,
split_name="",
column_label=0,
column_text=1,
column_id=None,
skip_first_row=False,
overwrite_labels=False,
overwrite_examples=False,
):
lines = self._read_tsv(file_name)
if skip_first_row:
lines = lines[1:]
texts = []
labels = []
ids = []
for (i, line) in enumerate(lines):
texts.append(line[column_text])
labels.append(line[column_label])
if column_id is not None:
ids.append(line[column_id])
else:
guid = "%s-%s" % (split_name, i) if split_name else "%s" % i
ids.append(guid)
return self.add_examples(
texts, labels, ids, overwrite_labels=overwrite_labels, overwrite_examples=overwrite_examples
)
def add_examples(
self, texts_or_text_and_labels, labels=None, ids=None, overwrite_labels=False, overwrite_examples=False
):
assert labels is None or len(texts_or_text_and_labels) == len(labels)
assert ids is None or len(texts_or_text_and_labels) == len(ids)
if ids is None:
ids = [None] * len(texts_or_text_and_labels)
if labels is None:
labels = [None] * len(texts_or_text_and_labels)
examples = []
added_labels = set()
for (text_or_text_and_label, label, guid) in zip(texts_or_text_and_labels, labels, ids):
if isinstance(text_or_text_and_label, (tuple, list)) and label is None:
text, label = text_or_text_and_label
else:
text = text_or_text_and_label
added_labels.add(label)
examples.append(InputExample(guid=guid, text_a=text, text_b=None, label=label))
# Update examples
if overwrite_examples:
self.examples = examples
else:
self.examples.extend(examples)
# Update labels
if overwrite_labels:
self.labels = list(added_labels)
else:
self.labels = list(set(self.labels).union(added_labels))
return self.examples
def get_features(
self,
tokenizer,
max_length=None,
pad_on_left=False,
pad_token=0,
mask_padding_with_zero=True,
return_tensors=None,
):
"""
Convert examples in a list of ``InputFeatures``
Args:
tokenizer: Instance of a tokenizer that will tokenize the examples
max_length: Maximum example length
task: GLUE task
label_list: List of labels. Can be obtained from the processor using the ``processor.get_labels()`` method
output_mode: String indicating the output mode. Either ``regression`` or ``classification``
pad_on_left: If set to ``True``, the examples will be padded on the left rather than on the right (default)
pad_token: Padding token
mask_padding_with_zero: If set to ``True``, the attention mask will be filled by ``1`` for actual values
and by ``0`` for padded values. If set to ``False``, inverts it (``1`` for padded values, ``0`` for
actual values)
Returns:
If the ``examples`` input is a ``tf.data.Dataset``, will return a ``tf.data.Dataset``
containing the task-specific features. If the input is a list of ``InputExamples``, will return
a list of task-specific ``InputFeatures`` which can be fed to the model.
"""
if max_length is None:
max_length = tokenizer.max_len
label_map = {label: i for i, label in enumerate(self.labels)}
all_input_ids = []
for (ex_index, example) in enumerate(self.examples):
if ex_index % 10000 == 0:
logger.info("Tokenizing example %d", ex_index)
input_ids = tokenizer.encode(
example.text_a, add_special_tokens=True, max_length=min(max_length, tokenizer.max_len),
)
all_input_ids.append(input_ids)
batch_length = max(len(input_ids) for input_ids in all_input_ids)
features = []
for (ex_index, (input_ids, example)) in enumerate(zip(all_input_ids, self.examples)):
if ex_index % 10000 == 0:
logger.info("Writing example %d/%d" % (ex_index, len(self.examples)))
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
attention_mask = [1 if mask_padding_with_zero else 0] * len(input_ids)
# Zero-pad up to the sequence length.
padding_length = batch_length - len(input_ids)
if pad_on_left:
input_ids = ([pad_token] * padding_length) + input_ids
attention_mask = ([0 if mask_padding_with_zero else 1] * padding_length) + attention_mask
else:
input_ids = input_ids + ([pad_token] * padding_length)
attention_mask = attention_mask + ([0 if mask_padding_with_zero else 1] * padding_length)
assert len(input_ids) == batch_length, "Error with input length {} vs {}".format(
len(input_ids), batch_length
)
assert len(attention_mask) == batch_length, "Error with input length {} vs {}".format(
len(attention_mask), batch_length
)
if self.mode == "classification":
label = label_map[example.label]
elif self.mode == "regression":
label = float(example.label)
else:
raise ValueError(self.mode)
if ex_index < 5 and self.verbose:
logger.info("*** Example ***")
logger.info("guid: %s" % (example.guid))
logger.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
logger.info("attention_mask: %s" % " ".join([str(x) for x in attention_mask]))
logger.info("label: %s (id = %d)" % (example.label, label))
features.append(InputFeatures(input_ids=input_ids, attention_mask=attention_mask, label=label))
if return_tensors is None:
return features
elif return_tensors == "tf":
if not is_tf_available():
raise RuntimeError("return_tensors set to 'tf' but TensorFlow 2.0 can't be imported")
import tensorflow as tf
def gen():
for ex in features:
yield ({"input_ids": ex.input_ids, "attention_mask": ex.attention_mask}, ex.label)
dataset = tf.data.Dataset.from_generator(
gen,
({"input_ids": tf.int32, "attention_mask": tf.int32}, tf.int64),
({"input_ids": tf.TensorShape([None]), "attention_mask": tf.TensorShape([None])}, tf.TensorShape([])),
)
return dataset
elif return_tensors == "pt":
if not is_torch_available():
raise RuntimeError("return_tensors set to 'pt' but PyTorch can't be imported")
import torch
from torch.utils.data import TensorDataset
all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
all_attention_mask = torch.tensor([f.attention_mask for f in features], dtype=torch.long)
if self.mode == "classification":
all_labels = torch.tensor([f.label for f in features], dtype=torch.long)
elif self.mode == "regression":
all_labels = torch.tensor([f.label for f in features], dtype=torch.float)
dataset = TensorDataset(all_input_ids, all_attention_mask, all_labels)
return dataset
else:
raise ValueError("return_tensors should be one of 'tf' or 'pt'")
| EXA-1-master | exa/models/unilm-master/xtune/src/transformers/data/processors/utils.py |
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" GLUE processors and helpers """
import logging
import os
import random
from ...file_utils import is_tf_available
from .utils import DataProcessor, InputExample, InputFeatures
if is_tf_available():
import tensorflow as tf
logger = logging.getLogger(__name__)
def xglue_convert_examples_to_vat_features(
examples,
tokenizer,
max_length=512,
task=None,
label_list=None,
output_mode=None,
pad_on_left=False,
pad_token=0,
pad_token_segment_id=0,
mask_padding_with_zero=True,
nbest_size=-1,
alpha=0.2,
):
"""
Loads a data file into a list of ``InputFeatures``
Args:
examples: List of ``InputExamples`` or ``tf.data.Dataset`` containing the examples.
tokenizer: Instance of a tokenizer that will tokenize the examples
max_length: Maximum example length
task: GLUE task
label_list: List of labels. Can be obtained from the processor using the ``processor.get_labels()`` method
output_mode: String indicating the output mode. Either ``regression`` or ``classification``
pad_on_left: If set to ``True``, the examples will be padded on the left rather than on the right (default)
pad_token: Padding token
pad_token_segment_id: The segment ID for the padding token (It is usually 0, but can vary such as for XLNet where it is 4)
mask_padding_with_zero: If set to ``True``, the attention mask will be filled by ``1`` for actual values
and by ``0`` for padded values. If set to ``False``, inverts it (``1`` for padded values, ``0`` for
actual values)
Returns:
If the ``examples`` input is a ``tf.data.Dataset``, will return a ``tf.data.Dataset``
containing the task-specific features. If the input is a list of ``InputExamples``, will return
a list of task-specific ``InputFeatures`` which can be fed to the model.
"""
is_tf_dataset = False
if is_tf_available() and isinstance(examples, tf.data.Dataset):
is_tf_dataset = True
if task is not None:
processor = xglue_processors[task]()
if label_list is None:
label_list = processor.get_labels()
logger.info("Using label list %s for task %s" % (label_list, task))
if output_mode is None:
output_mode = xglue_output_modes[task]
logger.info("Using output mode %s for task %s" % (output_mode, task))
label_map = {label: i for i, label in enumerate(label_list)}
features = []
for (ex_index, example) in enumerate(examples):
len_examples = 0
if is_tf_dataset:
example = processor.get_example_from_tensor_dict(example)
example = processor.tfds_map(example)
len_examples = tf.data.experimental.cardinality(examples)
else:
len_examples = len(examples)
if ex_index % 10000 == 0:
logger.info("Writing example %d/%d" % (ex_index, len_examples))
inputs = tokenizer.encode_plus(example.text_a, example.text_b, add_special_tokens=True, max_length=max_length,
nbest_size=nbest_size, alpha=alpha)
input_ids, token_type_ids = inputs["input_ids"], inputs["token_type_ids"]
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
attention_mask = [1 if mask_padding_with_zero else 0] * len(input_ids)
# Zero-pad up to the sequence length.
padding_length = max_length - len(input_ids)
if pad_on_left:
input_ids = ([pad_token] * padding_length) + input_ids
attention_mask = ([0 if mask_padding_with_zero else 1] * padding_length) + attention_mask
token_type_ids = ([pad_token_segment_id] * padding_length) + token_type_ids
else:
input_ids = input_ids + ([pad_token] * padding_length)
attention_mask = attention_mask + ([0 if mask_padding_with_zero else 1] * padding_length)
token_type_ids = token_type_ids + ([pad_token_segment_id] * padding_length)
assert len(input_ids) == max_length, "Error with input length {} vs {}".format(len(input_ids), max_length)
assert len(attention_mask) == max_length, "Error with input length {} vs {}".format(
len(attention_mask), max_length
)
assert len(token_type_ids) == max_length, "Error with input length {} vs {}".format(
len(token_type_ids), max_length
)
if output_mode == "classification":
label = label_map[example.label]
elif output_mode == "regression":
label = float(example.label)
else:
raise KeyError(output_mode)
if ex_index < 5:
logger.info("*** Example ***")
logger.info("guid: %s" % (example.guid))
logger.info("text a: %s" % (example.text_a))
logger.info("text b: %s" % (example.text_b))
logger.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
logger.info("attention_mask: %s" % " ".join([str(x) for x in attention_mask]))
logger.info("token_type_ids: %s" % " ".join([str(x) for x in token_type_ids]))
logger.info("label: %s (id = %d)" % (example.label, label))
features.append(
InputFeatures(
input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, label=label,
guid=example.guid
)
)
if is_tf_available() and is_tf_dataset:
def gen():
for ex in features:
yield (
{
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
return tf.data.Dataset.from_generator(
gen,
({"input_ids": tf.int32, "attention_mask": tf.int32, "token_type_ids": tf.int32}, tf.int64),
(
{
"input_ids": tf.TensorShape([None]),
"attention_mask": tf.TensorShape([None]),
"token_type_ids": tf.TensorShape([None]),
},
tf.TensorShape([]),
),
)
return features
def xglue_convert_examples_to_features(
examples,
tokenizer,
max_length=512,
task=None,
label_list=None,
output_mode=None,
pad_on_left=False,
pad_token=0,
pad_token_segment_id=0,
mask_padding_with_zero=True,
):
"""
Loads a data file into a list of ``InputFeatures``
Args:
examples: List of ``InputExamples`` or ``tf.data.Dataset`` containing the examples.
tokenizer: Instance of a tokenizer that will tokenize the examples
max_length: Maximum example length
task: GLUE task
label_list: List of labels. Can be obtained from the processor using the ``processor.get_labels()`` method
output_mode: String indicating the output mode. Either ``regression`` or ``classification``
pad_on_left: If set to ``True``, the examples will be padded on the left rather than on the right (default)
pad_token: Padding token
pad_token_segment_id: The segment ID for the padding token (It is usually 0, but can vary such as for XLNet where it is 4)
mask_padding_with_zero: If set to ``True``, the attention mask will be filled by ``1`` for actual values
and by ``0`` for padded values. If set to ``False``, inverts it (``1`` for padded values, ``0`` for
actual values)
Returns:
If the ``examples`` input is a ``tf.data.Dataset``, will return a ``tf.data.Dataset``
containing the task-specific features. If the input is a list of ``InputExamples``, will return
a list of task-specific ``InputFeatures`` which can be fed to the model.
"""
is_tf_dataset = False
if is_tf_available() and isinstance(examples, tf.data.Dataset):
is_tf_dataset = True
if task is not None:
processor = xglue_processors[task]()
if label_list is None:
label_list = processor.get_labels()
logger.info("Using label list %s for task %s" % (label_list, task))
if output_mode is None:
output_mode = xglue_output_modes[task]
logger.info("Using output mode %s for task %s" % (output_mode, task))
label_map = {label: i for i, label in enumerate(label_list)}
features = []
for (ex_index, example) in enumerate(examples):
len_examples = 0
if is_tf_dataset:
example = processor.get_example_from_tensor_dict(example)
example = processor.tfds_map(example)
len_examples = tf.data.experimental.cardinality(examples)
else:
len_examples = len(examples)
if ex_index % 10000 == 0:
logger.info("Writing example %d/%d" % (ex_index, len_examples))
inputs = tokenizer.encode_plus(example.text_a, example.text_b, add_special_tokens=True, max_length=max_length, )
input_ids, token_type_ids = inputs["input_ids"], inputs["token_type_ids"]
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
attention_mask = [1 if mask_padding_with_zero else 0] * len(input_ids)
# Zero-pad up to the sequence length.
padding_length = max_length - len(input_ids)
if pad_on_left:
input_ids = ([pad_token] * padding_length) + input_ids
attention_mask = ([0 if mask_padding_with_zero else 1] * padding_length) + attention_mask
token_type_ids = ([pad_token_segment_id] * padding_length) + token_type_ids
else:
input_ids = input_ids + ([pad_token] * padding_length)
attention_mask = attention_mask + ([0 if mask_padding_with_zero else 1] * padding_length)
token_type_ids = token_type_ids + ([pad_token_segment_id] * padding_length)
assert len(input_ids) == max_length, "Error with input length {} vs {}".format(len(input_ids), max_length)
assert len(attention_mask) == max_length, "Error with input length {} vs {}".format(
len(attention_mask), max_length
)
assert len(token_type_ids) == max_length, "Error with input length {} vs {}".format(
len(token_type_ids), max_length
)
if output_mode == "classification":
label = label_map[example.label]
elif output_mode == "regression":
label = float(example.label)
else:
raise KeyError(output_mode)
if ex_index < 5:
logger.info("*** Example ***")
logger.info("guid: %s" % (example.guid))
logger.info("text a: %s" % (example.text_a))
logger.info("text b: %s" % (example.text_b))
logger.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
logger.info("attention_mask: %s" % " ".join([str(x) for x in attention_mask]))
logger.info("token_type_ids: %s" % " ".join([str(x) for x in token_type_ids]))
logger.info("label: %s (id = %d)" % (example.label, label))
features.append(
InputFeatures(
input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, label=label,
guid=example.guid
)
)
if is_tf_available() and is_tf_dataset:
def gen():
for ex in features:
yield (
{
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
return tf.data.Dataset.from_generator(
gen,
({"input_ids": tf.int32, "attention_mask": tf.int32, "token_type_ids": tf.int32}, tf.int64),
(
{
"input_ids": tf.TensorShape([None]),
"attention_mask": tf.TensorShape([None]),
"token_type_ids": tf.TensorShape([None]),
},
tf.TensorShape([]),
),
)
return features
class MrpcProcessor(DataProcessor):
"""Processor for the MRPC data set (GLUE version)."""
def get_example_from_tensor_dict(self, tensor_dict):
"""See base class."""
return InputExample(
tensor_dict["idx"].numpy(),
tensor_dict["sentence1"].numpy().decode("utf-8"),
tensor_dict["sentence2"].numpy().decode("utf-8"),
str(tensor_dict["label"].numpy()),
)
def get_train_examples(self, data_dir):
"""See base class."""
logger.info("LOOKING AT {}".format(os.path.join(data_dir, "train.tsv")))
return self._create_examples(self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, i)
text_a = line[3]
text_b = line[4]
label = line[0]
examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class MnliProcessor(DataProcessor):
"""Processor for the MultiNLI data set (GLUE version)."""
def get_example_from_tensor_dict(self, tensor_dict):
"""See base class."""
return InputExample(
tensor_dict["idx"].numpy(),
tensor_dict["premise"].numpy().decode("utf-8"),
tensor_dict["hypothesis"].numpy().decode("utf-8"),
str(tensor_dict["label"].numpy()),
)
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, "dev_matched.tsv")), "dev_matched")
def get_labels(self):
"""See base class."""
return ["contradiction", "entailment", "neutral"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, line[0])
text_a = line[8]
text_b = line[9]
label = line[-1]
examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class MnliMismatchedProcessor(MnliProcessor):
"""Processor for the MultiNLI Mismatched data set (GLUE version)."""
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, "dev_mismatched.tsv")), "dev_matched")
class ColaProcessor(DataProcessor):
"""Processor for the CoLA data set (GLUE version)."""
def get_example_from_tensor_dict(self, tensor_dict):
"""See base class."""
return InputExample(
tensor_dict["idx"].numpy(),
tensor_dict["sentence"].numpy().decode("utf-8"),
None,
str(tensor_dict["label"].numpy()),
)
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
guid = "%s-%s" % (set_type, i)
text_a = line[3]
label = line[1]
examples.append(InputExample(guid=guid, text_a=text_a, text_b=None, label=label))
return examples
class Sst2Processor(DataProcessor):
"""Processor for the SST-2 data set (GLUE version)."""
def get_example_from_tensor_dict(self, tensor_dict):
"""See base class."""
return InputExample(
tensor_dict["idx"].numpy(),
tensor_dict["sentence"].numpy().decode("utf-8"),
None,
str(tensor_dict["label"].numpy()),
)
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, i)
text_a = line[0]
label = line[1]
examples.append(InputExample(guid=guid, text_a=text_a, text_b=None, label=label))
return examples
class StsbProcessor(DataProcessor):
"""Processor for the STS-B data set (GLUE version)."""
def get_example_from_tensor_dict(self, tensor_dict):
"""See base class."""
return InputExample(
tensor_dict["idx"].numpy(),
tensor_dict["sentence1"].numpy().decode("utf-8"),
tensor_dict["sentence2"].numpy().decode("utf-8"),
str(tensor_dict["label"].numpy()),
)
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_labels(self):
"""See base class."""
return [None]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, line[0])
text_a = line[7]
text_b = line[8]
label = line[-1]
examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class QqpProcessor(DataProcessor):
"""Processor for the QQP data set (GLUE version)."""
def get_example_from_tensor_dict(self, tensor_dict):
"""See base class."""
return InputExample(
tensor_dict["idx"].numpy(),
tensor_dict["question1"].numpy().decode("utf-8"),
tensor_dict["question2"].numpy().decode("utf-8"),
str(tensor_dict["label"].numpy()),
)
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, line[0])
try:
text_a = line[3]
text_b = line[4]
label = line[5]
except IndexError:
continue
examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class QnliProcessor(DataProcessor):
"""Processor for the QNLI data set (GLUE version)."""
def get_example_from_tensor_dict(self, tensor_dict):
"""See base class."""
return InputExample(
tensor_dict["idx"].numpy(),
tensor_dict["question"].numpy().decode("utf-8"),
tensor_dict["sentence"].numpy().decode("utf-8"),
str(tensor_dict["label"].numpy()),
)
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev_matched")
def get_labels(self):
"""See base class."""
return ["entailment", "not_entailment"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, line[0])
text_a = line[1]
text_b = line[2]
label = line[-1]
examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class RteProcessor(DataProcessor):
"""Processor for the RTE data set (GLUE version)."""
def get_example_from_tensor_dict(self, tensor_dict):
"""See base class."""
return InputExample(
tensor_dict["idx"].numpy(),
tensor_dict["sentence1"].numpy().decode("utf-8"),
tensor_dict["sentence2"].numpy().decode("utf-8"),
str(tensor_dict["label"].numpy()),
)
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_labels(self):
"""See base class."""
return ["entailment", "not_entailment"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, line[0])
text_a = line[1]
text_b = line[2]
label = line[-1]
examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class WnliProcessor(DataProcessor):
"""Processor for the WNLI data set (GLUE version)."""
def get_example_from_tensor_dict(self, tensor_dict):
"""See base class."""
return InputExample(
tensor_dict["idx"].numpy(),
tensor_dict["sentence1"].numpy().decode("utf-8"),
tensor_dict["sentence2"].numpy().decode("utf-8"),
str(tensor_dict["label"].numpy()),
)
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, line[0])
text_a = line[1]
text_b = line[2]
label = line[-1]
examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class WprProcessor(DataProcessor):
"""Processor for the PAWS-X data set (XGLUE version)."""
def __init__(self, language, train_language=None):
self.language = language
self.train_language = train_language
def get_example_from_tensor_dict(self, tensor_dict):
"""See base class."""
return InputExample(
tensor_dict["idx"].numpy(),
tensor_dict["sentence1"].numpy().decode("utf-8"),
tensor_dict["sentence2"].numpy().decode("utf-8"),
str(tensor_dict["label"].numpy()),
)
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "xglue.wpr." + self.train_language + ".train")), "train")
def get_valid_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, "xglue.wpr." + self.language + ".dev")),
"dev")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, "xglue.wpr." + self.language + ".test")),
"test")
def get_labels(self):
"""See base class."""
return ["0", "1", "2", "3", "4"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
guid = "%s-%s" % (set_type, line[0] + "_" + str(i))
text_a = line[0]
text_b = line[1] + " " + line[2]
label = line[-1]
if set_type == "test":
examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label="0"))
else:
examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class QamProcessor(DataProcessor):
"""Processor for the Qam data set (XGLUE version)."""
def __init__(self, language, train_language=None):
self.language = language
self.train_language = train_language
def get_example_from_tensor_dict(self, tensor_dict):
"""See base class."""
return InputExample(
tensor_dict["idx"].numpy(),
tensor_dict["sentence1"].numpy().decode("utf-8"),
tensor_dict["sentence2"].numpy().decode("utf-8"),
str(tensor_dict["label"].numpy()),
)
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "xglue.qam.{0}.train".format(self.train_language))), "train")
def get_valid_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "xglue.qam.{0}.dev".format(self.train_language))), "dev")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, "xglue.qam.{0}.test".format(self.language))),
"test")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
guid = "%s-%s" % (set_type, str(i))
text_a = line[0]
text_b = line[1]
label = line[-1]
if set_type == "test":
examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label="0"))
else:
examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class QadsmProcessor(DataProcessor):
"""Processor for the Ads data set (XGLUE version)."""
def __init__(self, language, train_language=None):
self.language = language
self.train_language = train_language
if self.train_language is None:
self.train_language = self.language
def get_example_from_tensor_dict(self, tensor_dict):
"""See base class."""
return InputExample(
tensor_dict["idx"].numpy(),
tensor_dict["sentence1"].numpy().decode("utf-8"),
tensor_dict["sentence2"].numpy().decode("utf-8"),
str(tensor_dict["label"].numpy()),
)
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "xglue.qadsm." + self.train_language + ".train")), "train")
def get_valid_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, "xglue.qadsm." + self.language + ".dev")),
"dev")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, "xglue.qadsm." + self.language + ".test")),
"test")
def get_labels(self):
"""See base class."""
return ["Bad", "Good"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
guid = "%s-%s" % (set_type, str(i))
text_a = line[0]
text_b = line[1] + " " + line[2]
label = line[-1]
if set_type == "test":
examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label="Bad"))
else:
examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class NcProcessor(DataProcessor):
"""Processor for the News data set (XGLUE version)."""
def __init__(self, language, train_language=None):
self.language = language
self.train_language = train_language
def get_example_from_tensor_dict(self, tensor_dict):
"""See base class."""
return InputExample(
tensor_dict["idx"].numpy(),
tensor_dict["sentence1"].numpy().decode("utf-8"),
tensor_dict["sentence2"].numpy().decode("utf-8"),
str(tensor_dict["label"].numpy()),
)
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "xglue.nc." + self.train_language + ".train")), "train")
def get_valid_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, "xglue.nc." + self.language + ".dev")),
"dev")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, "xglue.nc." + self.language + ".test")),
"test")
def get_labels(self):
"""See base class."""
return ['foodanddrink', 'sports', 'news', 'entertainment', 'health', 'video', 'finance', 'travel', 'lifestyle',
'autos']
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
guid = "%s-%s" % (set_type, str(i))
text_a = line[0]
text_b = line[1]
label = line[-1]
if set_type == "test":
examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label="news"))
else:
examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class PawsxProcessor(DataProcessor):
"""Processor for the PAWS-X data set (XGLUE version)."""
def __init__(self, language, train_language=None):
self.language = language
self.train_language = train_language
def get_example_from_tensor_dict(self, tensor_dict):
"""See base class."""
return InputExample(
tensor_dict["idx"].numpy(),
tensor_dict["sentence1"].numpy().decode("utf-8"),
tensor_dict["sentence2"].numpy().decode("utf-8"),
str(tensor_dict["label"].numpy()),
)
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, "en/train.tsv")), "train")
def get_translate_train_examples(self, data_dir):
lg = self.language if self.train_language is None else self.train_language
lines = self._read_tsv(os.path.join(data_dir, "translate-train/en-{}-translated.tsv".format(lg)))
examples = []
for (i, line) in enumerate(lines):
guid = "%s-%s" % ("train", i)
text_a = line[2]
text_b = line[3]
label = line[-1]
assert isinstance(text_a, str) and isinstance(text_b, str) and isinstance(label, str)
examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
def get_valid_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, self.language + "/dev_2k.tsv")), "dev")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, self.language + "/test_2k.tsv")), "test")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, line[0])
text_a = line[1]
text_b = line[2]
label = line[-1]
examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class XnliProcessor(DataProcessor):
"""Processor for the XNLI data set (XGLUE version)."""
def __init__(self, language, train_language=None):
self.language = language
self.train_language = train_language
def get_example_from_tensor_dict(self, tensor_dict):
"""See base class."""
return InputExample(
tensor_dict["idx"].numpy(),
tensor_dict["sentence"].numpy().decode("utf-8"),
None,
str(tensor_dict["label"].numpy()),
)
def get_train_examples(self, data_dir):
"""See base class."""
lg = self.language if self.train_language is None else self.train_language
lines = self._read_tsv(os.path.join(data_dir, "multinli.train.{}.tsv".format(lg)))
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % ("train", i)
text_a = line[0]
text_b = line[1]
label = "contradiction" if line[2] == "contradictory" else line[2]
assert isinstance(text_a, str) and isinstance(text_b, str) and isinstance(label, str)
examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
def get_translate_train_examples(self, data_dir):
lg = self.language if self.train_language is None else self.train_language
lines = self._read_tsv(os.path.join(data_dir, "translate-train/en-{}-translated.tsv".format(lg)))
examples = []
for (i, line) in enumerate(lines):
guid = "%s-%s" % ("train", i)
text_a = line[2]
text_b = line[3]
label = "contradiction" if line[4] == "contradictory" else line[4]
assert isinstance(text_a, str) and isinstance(text_b, str) and isinstance(label, str)
examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
def get_translate_train_dict(self, data_dir):
"""See base class."""
lg = self.language if self.train_language is None else self.train_language
lines = self._read_tsv(os.path.join(data_dir, "translate-train/en-{}-translated.tsv".format(lg)))
dict = {}
cnt = {}
for (i, line) in enumerate(lines):
text_a = line[0].strip()
text_b = line[1].strip()
translated_text_a = line[2].strip()
translated_text_b = line[3].strip()
assert isinstance(text_a, str) and isinstance(text_b, str) and \
isinstance(translated_text_a, str) and isinstance(translated_text_b, str)
if text_a not in cnt:
cnt[text_a] = 0
cnt[text_a] += 1
if text_b not in cnt:
cnt[text_b] = 0
cnt[text_b] += 1
if text_a not in dict or random.random() <= 1.0 / cnt[text_a]:
dict[text_a] = translated_text_a
if text_b not in dict or random.random() <= 1.0 / cnt[text_b]:
dict[text_b] = translated_text_b
return dict
def get_valid_examples(self, data_dir):
"""See base class."""
return self.get_test_valid_examples(data_dir, "valid")
def get_test_examples(self, data_dir):
return self.get_test_valid_examples(data_dir, "test")
def get_test_valid_examples(self, data_dir, split):
assert split in ["test", "valid"]
file_name = "test" if split == "test" else "dev"
lines = self._read_tsv(os.path.join(data_dir, "xnli.{0}.tsv".format(file_name)))
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
language = line[0]
if language != self.language:
continue
guid = "%s-%s" % (split, i)
text_a = line[6]
text_b = line[7]
label = line[1]
assert isinstance(text_a, str) and isinstance(text_b, str) and isinstance(label, str)
examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
def get_labels(self):
"""See base class."""
return ["contradiction", "entailment", "neutral"]
xglue_tasks_num_labels = {
"xnli": 3,
"pawsx": 2,
"qam": 2,
"ads": 2,
"news": 10,
"rel": 4,
"cola": 2,
"mnli": 3,
"mrpc": 2,
"sst-2": 2,
"sts-b": 1,
"qqp": 2,
"qnli": 2,
"rte": 2,
"wnli": 2,
}
xglue_processors = {
"xnli": XnliProcessor,
"pawsx": PawsxProcessor,
"qam": QamProcessor,
"ads": QadsmProcessor,
"news": NcProcessor,
"rel": WprProcessor,
"cola": ColaProcessor,
"mnli": MnliProcessor,
"mnli-mm": MnliMismatchedProcessor,
"mrpc": MrpcProcessor,
"sst-2": Sst2Processor,
"sts-b": StsbProcessor,
"qqp": QqpProcessor,
"qnli": QnliProcessor,
"rte": RteProcessor,
"wnli": WnliProcessor,
}
xglue_output_modes = {
"xnli": "classification",
"pawsx": "classification",
"qam": "classification",
"ads": "classification",
"news": "classification",
"rel": "classification",
"cola": "classification",
"mnli": "classification",
"mnli-mm": "classification",
"mrpc": "classification",
"sst-2": "classification",
"sts-b": "regression",
"qqp": "classification",
"qnli": "classification",
"rte": "classification",
"wnli": "classification",
}
| EXA-1-master | exa/models/unilm-master/xtune/src/transformers/data/processors/xglue.py |
"""I/O"""
def _lines_gen_from_single_file(filename):
with open(filename) as fp:
for line in fp: yield line.strip()
def lines_gen(*filenames):
for ret in zip(*map(_lines_gen_from_single_file, filenames)): yield ret | EXA-1-master | exa/models/unilm-master/xtune/src/pequod/io.py |
EXA-1-master | exa/models/unilm-master/xtune/src/pequod/__init__.py |
|
import logging
import torch
from transformers.modeling_bert import (BertConfig, BertEncoder,
BertIntermediate, BertLayer,
BertModel, BertOutput,
BertSelfAttention,
BertSelfOutput)
from transformers.modeling_roberta import (RobertaEmbeddings,
RobertaForMaskedLM,
RobertaForSequenceClassification,
RobertaModel)
logger = logging.getLogger(__name__)
def convert_cxlm_to_transformers(ckpt_path):
ckpt = torch.load(ckpt_path, map_location="cpu")
args = ckpt["args"]
config = BertConfig(
vocab_size_or_config_json_file=250002,
hidden_size=args.encoder_embed_dim,
num_hidden_layers=args.encoder_layers,
num_attention_heads=args.encoder_attention_heads,
intermediate_size=args.encoder_ffn_embed_dim,
max_position_embeddings=args.max_positions + 2,
type_vocab_size=1,
layer_norm_eps=1e-5, # PyTorch default used in fairseq
)
print("Our BERT config:", config)
stat_dict = ckpt["model"]
new_stat_dict = {}
model = RobertaForMaskedLM(config)
model.eval()
sent_enc = "model_fast.decoder.sentence_encoder"
new_stat_dict["roberta.embeddings.word_embeddings.weight"] = stat_dict[sent_enc + ".embed_tokens.weight"]
new_stat_dict["roberta.embeddings.position_embeddings.weight"] = stat_dict[sent_enc + ".embed_positions.weight"]
new_stat_dict["roberta.embeddings.token_type_embeddings.weight"] = torch.zeros_like(model.roberta.embeddings.token_type_embeddings.weight)
new_stat_dict["roberta.embeddings.LayerNorm.weight"] = stat_dict[sent_enc +".emb_layer_norm.weight"]
new_stat_dict["roberta.embeddings.LayerNorm.bias"] = stat_dict[sent_enc + ".emb_layer_norm.bias"]
for i in range(config.num_hidden_layers):
# Encoder: start of layer
# layer: BertLayer = model.roberta.encoder.layer[i]
layer = "roberta.encoder.layer.%d" % i
roberta_layer = sent_enc + (".layers.%d" % i)
### self attention
# self_attn: BertSelfAttention = layer.attention.self
self_attn = layer + ".attention.self"
assert(
stat_dict[roberta_layer+".self_attn.k_proj.weight"].data.shape == \
stat_dict[roberta_layer+".self_attn.q_proj.weight"].data.shape == \
stat_dict[roberta_layer+".self_attn.v_proj.weight"].data.shape == \
torch.Size((config.hidden_size, config.hidden_size))
)
new_stat_dict[self_attn+".query.weight"] = stat_dict[roberta_layer+".self_attn.q_proj.weight"]
new_stat_dict[self_attn+".query.bias"] = stat_dict[roberta_layer+".self_attn.q_proj.bias"]
new_stat_dict[self_attn+".key.weight"] = stat_dict[roberta_layer+".self_attn.k_proj.weight"]
new_stat_dict[self_attn+".key.bias"] = stat_dict[roberta_layer+".self_attn.k_proj.bias"]
new_stat_dict[self_attn+".value.weight"] = stat_dict[roberta_layer+".self_attn.v_proj.weight"]
new_stat_dict[self_attn+".value.bias"] = stat_dict[roberta_layer+".self_attn.v_proj.bias"]
### self-attention output
# self_output: BertSelfOutput = layer.attention.output
self_output = layer + ".attention.output"
assert(
model.roberta.encoder.layer[i].attention.output.dense.weight.shape == stat_dict[roberta_layer+".self_attn.out_proj.weight"].shape
)
new_stat_dict[self_output+".dense.weight"] = stat_dict[roberta_layer+".self_attn.out_proj.weight"]
new_stat_dict[self_output+".dense.bias"] = stat_dict[roberta_layer+".self_attn.out_proj.bias"]
new_stat_dict[self_output+".LayerNorm.weight"] = stat_dict[roberta_layer+".self_attn_layer_norm.weight"]
new_stat_dict[self_output+".LayerNorm.bias"] = stat_dict[roberta_layer+".self_attn_layer_norm.bias"]
### intermediate
# intermediate: BertIntermediate = layer.intermediate
intermediate = layer + ".intermediate"
assert(
model.roberta.encoder.layer[i].intermediate.dense.weight.shape == stat_dict[roberta_layer+".fc1.weight"].shape
)
#TODO
new_stat_dict[intermediate+".dense.weight"] = stat_dict[roberta_layer+".fc1.weight"]
new_stat_dict[intermediate+".dense.bias"] = stat_dict[roberta_layer+".fc1.bias"]
### output
# bert_output: BertOutput = layer.output
bert_output = layer + ".output"
assert(
model.roberta.encoder.layer[i].output.dense.weight.shape == stat_dict[roberta_layer+".fc2.weight"].shape
)
new_stat_dict[bert_output+".dense.weight"] = stat_dict[roberta_layer+".fc2.weight"]
new_stat_dict[bert_output+".dense.bias"] = stat_dict[roberta_layer+".fc2.bias"]
new_stat_dict[bert_output+".LayerNorm.weight"] = stat_dict[roberta_layer+".final_layer_norm.weight"]
new_stat_dict[bert_output+".LayerNorm.bias"] = stat_dict[roberta_layer+".final_layer_norm.bias"]
#### end of layer
new_stat_dict["lm_head.dense.weight"] = stat_dict["model_fast.decoder.lm_head.dense.weight"]
new_stat_dict["lm_head.dense.bias"] = stat_dict["model_fast.decoder.lm_head.dense.bias"]
new_stat_dict["lm_head.layer_norm.weight"] = stat_dict["model_fast.decoder.lm_head.layer_norm.weight"]
new_stat_dict["lm_head.layer_norm.bias"] = stat_dict["model_fast.decoder.lm_head.layer_norm.bias"]
new_stat_dict["lm_head.decoder.weight"] = stat_dict["model_fast.decoder.lm_head.weight"]
new_stat_dict["lm_head.bias"] = stat_dict["model_fast.decoder.lm_head.bias"]
new_stat_dict["roberta.pooler.dense.weight"] = model.roberta.pooler.dense.weight
new_stat_dict["roberta.pooler.dense.bias"] = model.roberta.pooler.dense.bias
if "proj_matrix_fast" in stat_dict:
new_stat_dict["proj_matrix_fast"] = stat_dict["proj_matrix_fast"]
# model.load_state_dict(new_stat_dict)
return new_stat_dict
def convert_roberta_to_transformers(ckpt_path):
ckpt = torch.load(ckpt_path, map_location="cpu")
args = ckpt["args"]
config = BertConfig(
vocab_size_or_config_json_file=250002,
hidden_size=args.encoder_embed_dim,
num_hidden_layers=args.encoder_layers,
num_attention_heads=args.encoder_attention_heads,
intermediate_size=args.encoder_ffn_embed_dim,
max_position_embeddings=args.max_positions + 2,
type_vocab_size=1,
layer_norm_eps=1e-5, # PyTorch default used in fairseq
)
print("Our BERT config:", config)
stat_dict = ckpt["model"]
new_stat_dict = {}
model = RobertaForMaskedLM(config)
model.eval()
sent_enc = "decoder.sentence_encoder"
new_stat_dict["roberta.embeddings.word_embeddings.weight"] = stat_dict[sent_enc + ".embed_tokens.weight"]
new_stat_dict["roberta.embeddings.position_embeddings.weight"] = stat_dict[sent_enc + ".embed_positions.weight"]
new_stat_dict["roberta.embeddings.token_type_embeddings.weight"] = torch.zeros_like(model.roberta.embeddings.token_type_embeddings.weight)
new_stat_dict["roberta.embeddings.LayerNorm.weight"] = stat_dict[sent_enc +".emb_layer_norm.weight"]
new_stat_dict["roberta.embeddings.LayerNorm.bias"] = stat_dict[sent_enc + ".emb_layer_norm.bias"]
for i in range(config.num_hidden_layers):
# Encoder: start of layer
# layer: BertLayer = model.roberta.encoder.layer[i]
layer = "roberta.encoder.layer.%d" % i
roberta_layer = sent_enc + (".layers.%d" % i)
### self attention
# self_attn: BertSelfAttention = layer.attention.self
self_attn = layer + ".attention.self"
assert(
stat_dict[roberta_layer+".self_attn.k_proj.weight"].data.shape == \
stat_dict[roberta_layer+".self_attn.q_proj.weight"].data.shape == \
stat_dict[roberta_layer+".self_attn.v_proj.weight"].data.shape == \
torch.Size((config.hidden_size, config.hidden_size))
)
new_stat_dict[self_attn+".query.weight"] = stat_dict[roberta_layer+".self_attn.q_proj.weight"]
new_stat_dict[self_attn+".query.bias"] = stat_dict[roberta_layer+".self_attn.q_proj.bias"]
new_stat_dict[self_attn+".key.weight"] = stat_dict[roberta_layer+".self_attn.k_proj.weight"]
new_stat_dict[self_attn+".key.bias"] = stat_dict[roberta_layer+".self_attn.k_proj.bias"]
new_stat_dict[self_attn+".value.weight"] = stat_dict[roberta_layer+".self_attn.v_proj.weight"]
new_stat_dict[self_attn+".value.bias"] = stat_dict[roberta_layer+".self_attn.v_proj.bias"]
### self-attention output
# self_output: BertSelfOutput = layer.attention.output
self_output = layer + ".attention.output"
assert(
model.roberta.encoder.layer[i].attention.output.dense.weight.shape == stat_dict[roberta_layer+".self_attn.out_proj.weight"].shape
)
new_stat_dict[self_output+".dense.weight"] = stat_dict[roberta_layer+".self_attn.out_proj.weight"]
new_stat_dict[self_output+".dense.bias"] = stat_dict[roberta_layer+".self_attn.out_proj.bias"]
new_stat_dict[self_output+".LayerNorm.weight"] = stat_dict[roberta_layer+".self_attn_layer_norm.weight"]
new_stat_dict[self_output+".LayerNorm.bias"] = stat_dict[roberta_layer+".self_attn_layer_norm.bias"]
### intermediate
# intermediate: BertIntermediate = layer.intermediate
intermediate = layer + ".intermediate"
assert(
model.roberta.encoder.layer[i].intermediate.dense.weight.shape == stat_dict[roberta_layer+".fc1.weight"].shape
)
#TODO
new_stat_dict[intermediate+".dense.weight"] = stat_dict[roberta_layer+".fc1.weight"]
new_stat_dict[intermediate+".dense.bias"] = stat_dict[roberta_layer+".fc1.bias"]
### output
# bert_output: BertOutput = layer.output
bert_output = layer + ".output"
assert(
model.roberta.encoder.layer[i].output.dense.weight.shape == stat_dict[roberta_layer+".fc2.weight"].shape
)
new_stat_dict[bert_output+".dense.weight"] = stat_dict[roberta_layer+".fc2.weight"]
new_stat_dict[bert_output+".dense.bias"] = stat_dict[roberta_layer+".fc2.bias"]
new_stat_dict[bert_output+".LayerNorm.weight"] = stat_dict[roberta_layer+".final_layer_norm.weight"]
new_stat_dict[bert_output+".LayerNorm.bias"] = stat_dict[roberta_layer+".final_layer_norm.bias"]
#### end of layer
new_stat_dict["lm_head.dense.weight"] = stat_dict["decoder.lm_head.dense.weight"]
new_stat_dict["lm_head.dense.bias"] = stat_dict["decoder.lm_head.dense.bias"]
new_stat_dict["lm_head.layer_norm.weight"] = stat_dict["decoder.lm_head.layer_norm.weight"]
new_stat_dict["lm_head.layer_norm.bias"] = stat_dict["decoder.lm_head.layer_norm.bias"]
new_stat_dict["lm_head.decoder.weight"] = stat_dict["decoder.lm_head.weight"]
new_stat_dict["lm_head.bias"] = stat_dict["decoder.lm_head.bias"]
new_stat_dict["roberta.pooler.dense.weight"] = model.roberta.pooler.dense.weight
new_stat_dict["roberta.pooler.dense.bias"] = model.roberta.pooler.dense.bias
return new_stat_dict
if __name__ == "__main__":
sd = convert_cxlm_to_transformers("/home/v-zechi/data/unilm/zechi/exp/cxlm_exp/dump-g16-lr2e-4/checkpoint_1_10000.pt")
print(sd.keys()) | EXA-1-master | exa/models/unilm-master/xtune/src/pequod/tools/convert.py |
EXA-1-master | exa/models/unilm-master/xtune/src/pequod/tools/__init__.py |
|
import logging
import numpy as np
import os
import torch
import random
from torch.autograd import Variable
from torch.utils.data import DataLoader, TensorDataset
try:
from apex import amp
except ImportError:
pass
from src.pequod.trainer import (Trainer,
XClassificationTrainer, XQATrainer, SelfTrainer)
from transformers import AdamW, ConstantLRSchedule, WarmupLinearSchedule
logger = logging.getLogger(__name__)
class BaseTrainer(Trainer):
def __init__(self, args, model, tokenizer):
super().__init__(args, model, tokenizer)
self.optimizer = None
self.scheduler = None
self.global_steps = 0
self.all_shard_fn = {}
def init_optimizer(self, model, lr, t_total, fixed=None):
args = self.args
no_decay = ['bias', 'LayerNorm.weight']
if fixed is None: fixed = []
optimizer_grouped_parameters = [
{"params": [p for n, p in model.named_parameters() if not any(
nd in n for nd in no_decay) and not any(f in n for f in fixed)
], "weight_decay": args.weight_decay},
{"params": [p for n, p in model.named_parameters() if any(
nd in n for nd in no_decay) and not any(f in n for f in fixed)
], "weight_decay": 0.0}]
# TODO calculate t_total
optimizer = AdamW(
optimizer_grouped_parameters, lr=lr, eps=args.adam_epsilon)
if args.scheduler == "linear":
warmup_steps = t_total * args.warmup_ratio if args.warmup_steps == -1 else args.warmup_steps
logger.info("Setting scheduler, warmups=%d, lr=%.7f, total_updates=%d" % (
warmup_steps, lr, t_total))
scheduler = WarmupLinearSchedule(optimizer, warmup_steps=warmup_steps, t_total=t_total)
elif args.scheduler == "constant":
logger.info("Setting scheduler, ConstantLRSchedule")
scheduler = ConstantLRSchedule(optimizer)
else:
raise ValueError
return optimizer_grouped_parameters, optimizer, scheduler
def optim_step(self, **kwargs):
args = self.args
# self.model.zero_grad()
if args.fp16:
# with amp.scale_loss(loss, self.optimizer) as scaled_loss:
# scaled_loss.backward()
torch.nn.utils.clip_grad_norm_(
amp.master_params(self.optimizer), args.max_grad_norm)
else:
# loss.backward()
torch.nn.utils.clip_grad_norm_(self.model.parameters(), args.max_grad_norm)
self.optimizer.step()
self.scheduler.step()
self.model.zero_grad()
def backward_step(self, loss, **kwargs):
args = self.args
if args.accumulate_steps > 1:
loss = loss / args.accumulate_steps
if args.fp16:
with amp.scale_loss(loss, self.optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
def step(self, *args, **kwargs):
algo = kwargs.pop("algo", self.args.algo)
if algo is None: algo = self.args.algo
step_func_names = ["%s_step" % s for s in algo.split(",")]
return getattr(self, random.choice(step_func_names))(*args, **kwargs)
def base_step(self, batches, is_qa=False, **kwargs):
tot_loss = 0.0
for step_batches in batches:
batch = step_batches[0]
batch_dict = self._parse_batch(batch)
loss = self.model(**batch_dict)[0]
self.backward_step(loss)
tot_loss += loss.item()
self.optim_step()
return tot_loss / len(batches)
def train_full_epoch(self, train_ds_keys, epoch_id, is_qa=False, algo=None):
if train_ds_keys == "": return
logger.info("***** Training epoch %d - train_ds_keys: %s *****" % (
epoch_id, str(train_ds_keys)))
args = self.args
n_instances = 0
data_loaders = []
if isinstance(train_ds_keys, str):
train_ds_keys = train_ds_keys.split(";")
for ds_key_str in train_ds_keys:
data_loaders.append(self.get_dataloader_from_str(ds_key_str, epoch_id))
if self.optimizer is None:
_, self.optimizer, self.scheduler = self.init_optimizer(
self.model, args.learning_rate, len(data_loaders[0]) * args.num_train_epochs // args.accumulate_steps)
if args.fp16:
self.model, self.optimizer = amp.initialize(
self.model, self.optimizer, opt_level=args.fp16_opt_level)
model = self.model
model.train()
losses = []
step = 0
step_func_dict = {"batches": [], "is_qa": is_qa, "epoch_id": epoch_id}
# for step, batches in enumerate(zip(*data_loaders)):
for batches in zip(*data_loaders):
# step_func_dict = {"batches": batches, "is_qa": is_qa, "epoch_id": epoch_id}
step_func_dict["batches"].append(batches)
if len(step_func_dict["batches"]) == args.accumulate_steps:
loss = self.step(**step_func_dict, algo=algo)
losses.append(loss)
step_func_dict["batches"] = []
else:
continue
n_instances += args.train_batch_size * args.accumulate_steps
self.global_steps += 1
step += 1
if step % args.logging_steps == 0:
cur_lr = self.scheduler.get_lr()[0]
logger.info(
"Epoch %d - step %7d - global step %d - lr %.8f - n instances %7d - loss: %.4f " % (
epoch_id, step, self.global_steps, cur_lr, n_instances, sum(losses) / len(losses)))
losses = []
def _parse_ds_key(self, ds_key_str):
assert isinstance(ds_key_str, str)
args, kwargs = [], {}
for s in ds_key_str.split(","):
if ":" in s:
k, v = s.split(":")
kwargs[k] = v
else: args.append(s)
return args, kwargs
def get_mixed_dataloader(self, *dataloaders):
iters = [iter(d) for d in dataloaders]
len_dl = len(iters)
finish = [False] * len_dl
cnt = 0
while cnt < len_dl:
idx = random.randint(0, len_dl - 1)
if finish[idx]: continue
try:
yield next(iters[idx])
except StopIteration:
finish[idx] = True
cnt += 1
def get_all_shard_fn(self, *args, cache_filename=None):
if args in self.all_shard_fn: return self.all_shard_fn[args]
all_shard_fn = []
shard_id = 0
while True:
fn = cache_filename + "." + str(shard_id)
if not os.path.exists(fn): break
all_shard_fn.append(fn)
shard_id += 1
logger.info("%d shards found." % len(all_shard_fn))
np.random.shuffle(all_shard_fn)
self.all_shard_fn[args] = all_shard_fn
return all_shard_fn
def get_sharded_dataloader(self, *args, **kwargs):
logger.info("Getting dataloader - args: %s" % str(args))
split, lang, epoch_id = args
cache_key = self.get_cache_key()
cache_filename = os.path.join(
self.args.data_dir, "cached_%s_%s_%s" % (split, lang, cache_key))
all_shard_fn = self.get_all_shard_fn(
split, lang, cache_filename=cache_filename)
fn = all_shard_fn[epoch_id % len(all_shard_fn)]
logger.info("Loading dataset from %s" % str(fn))
tensor_dict = torch.load(fn)
tensors = []
for _, t in tensor_dict.items():
tensors.append(t.long())
dataset = TensorDataset(*tensors)
sampler = self.get_sampler(dataset, *args, **kwargs)
dataloader = DataLoader(dataset, sampler=sampler,
batch_size=self.args.train_batch_size)
return dataloader
def get_dataloader_from_str(self, ds_key_str, epoch_id):
if ds_key_str.startswith("mix("):
# example: mix(train,en,cut:200|train,zh,cut:20)
assert ds_key_str[-1] == ")"
ds_key_str = ds_key_str[4:-1]
dataloaders = []
for dks in ds_key_str.split("|"):
dataloaders.append(self.get_dataloader_from_str(dks, epoch_id))
return self.get_mixed_dataloader(*dataloaders)
ds_key_args, ds_key_kwargs = self._parse_ds_key(ds_key_str)
sharded_dataloader = ds_key_kwargs.pop("sharded_dataloader", "")
if sharded_dataloader == "True":
return self.get_sharded_dataloader(*ds_key_args, epoch_id, **ds_key_kwargs)
return self.get_dataloader(*ds_key_args, **ds_key_kwargs)
def get_model_class(proto_train_class=None, is_qa=False):
class ProtoXClassificationTrainer(XClassificationTrainer, proto_train_class):
def __init__(self, args, model, tokenizer):
proto_train_class.__init__(self, args, model, tokenizer)
# _, self.optimizer, self.scheduler = self.init_optimizer(
# model, args.learning_rate)
def train_full_epoch(self, train_ds_keys, epoch_id, algo=None):
proto_train_class.train_full_epoch(
self, train_ds_keys, epoch_id, is_qa=False, algo=algo)
def before_loop(self):
# args = self.args
# if args.labeling_unlabeled_data:
# assert args.semi_split != ""
# for lang in args.test_langs.split(","):
# logger.info("Labeling lang: %s" % lang)
# self.labeling_dataset(self.model, (args.semi_split, lang))
pass
def init_optimizer(self, *args, **kwargs):
return proto_train_class.init_optimizer(self, *args, **kwargs)
class ProtoXQATrainer(XQATrainer, proto_train_class):
def __init__(self, args, model, tokenizer):
proto_train_class.__init__(self, args, model, tokenizer)
# _, self.optimizer, self.scheduler = self.init_optimizer(
# model, args.learning_rate)
self.example_feature_cache = {}
def train_full_epoch(self, train_ds_keys, epoch_id, algo=None):
proto_train_class.train_full_epoch(
self, train_ds_keys, epoch_id, is_qa=True, algo=algo)
def init_optimizer(self, *args, **kwargs):
return proto_train_class.init_optimizer(self, *args, **kwargs)
return ProtoXQATrainer if is_qa else ProtoXClassificationTrainer
| EXA-1-master | exa/models/unilm-master/xtune/src/pequod/training/xtrainer.py |
import re
import sys
import os
import random
import torch
import pickle
import logging
import numpy as np
# from transformers import (WEIGHTS_NAME,
# BertConfig, BertForSequenceClassification, BertTokenizer,
# RobertaConfig, RobertaForSequenceClassification, RobertaTokenizer,
# RobertaModel, BertModel, XLMModel,
# XLMConfig, XLMForSequenceClassification, XLMTokenizer,
# XLNetConfig, XLNetForSequenceClassification, XLNetTokenizer,
# DistilBertConfig, DistilBertForSequenceClassification, DistilBertTokenizer,
# BertForQuestionAnswering)
#
# from src.pequod.model.roberta import RobertaForQuestionAnswering
from transformers import XLMRobertaConfig, XLMRobertaForRetrieval, XLMRobertaTokenizer
# ALL_MODELS = sum((tuple(conf.pretrained_config_archive_map.keys()) \
# for conf in (BertConfig, XLNetConfig, XLMConfig,
# RobertaConfig, DistilBertConfig)), ())
ALL_MODELS = []
# # Model classes for classification
# MODEL_CLASSES = {
# 'bert': (BertConfig, BertForSequenceClassification, BertTokenizer),
# 'xlnet': (XLNetConfig, XLNetForSequenceClassification, XLNetTokenizer),
# 'xlm': (XLMConfig, XLMForSequenceClassification, XLMTokenizer),
# 'roberta': (RobertaConfig, RobertaForSequenceClassification, RobertaTokenizer),
# 'distilbert': (DistilBertConfig, DistilBertForSequenceClassification, DistilBertTokenizer),
# "xlmr": (RobertaConfig, RobertaForSequenceClassification, XLMRTokenizer)
# }
#
# QA_MODELS = {
# "bert": BertForQuestionAnswering,
# "roberta": RobertaForQuestionAnswering,
# "xlmr": RobertaForQuestionAnswering,
# }
BERT_CLASSES = {
"xlmr": (XLMRobertaConfig, XLMRobertaForRetrieval, XLMRobertaTokenizer),
}
def to_cuda(tup):
return tuple(t.cuda() for t in tup)
def set_seed(args):
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
#TODO multi gpu support
# if args.n_gpu > 0:
# torch.cuda.manual_seed_all(args.seed)
def init_exp(args):
# dump parameters
set_dump_path(args)
pickle.dump(args, open(os.path.join(args.dump_path, 'params.pkl'), 'wb'))
# get running command
command = ["python", sys.argv[0]]
for x in sys.argv[1:]:
if x.startswith('--'):
assert '"' not in x and "'" not in x
command.append(x)
else:
assert "'" not in x
if re.match('^[a-zA-Z0-9_]+$', x):
command.append("%s" % x)
else:
command.append("'%s'" % x)
command = ' '.join(command)
args.command = command + ' --exp_id "%s"' % args.exp_id
# check experiment name
assert len(args.exp_name.strip()) > 0
logging.basicConfig(
format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt = '%m/%d/%Y %H:%M:%S',
level = logging.INFO)
logger = logging.getLogger(__name__)
logger.info("\n".join(
"%s: %s" % (k, str(v)) for k, v in sorted(dict(vars(args)).items())))
logger.info("The experiment will be stored in %s\n" % args.dump_path)
logger.info("Running command: %s" % command)
logger.info("")
def set_dump_path(args, output_dir=None, exp_name=None):
if output_dir is None: output_dir = args.output_dir
if exp_name is None: exp_name = args.exp_name
chars = 'abcdefghijklmnopqrstuvwxyz0123456789'
while True:
exp_id = ''.join(random.choice(chars) for _ in range(10))
if not os.path.isdir(os.path.join(output_dir, exp_name, exp_id)):
break
args.exp_id = exp_id
dump_path = os.path.join(output_dir, exp_name, exp_id)
os.makedirs(dump_path)
args.dump_path = dump_path
| EXA-1-master | exa/models/unilm-master/xtune/src/pequod/training/__init__.py |
import os
import json
import logging
import random
import torch
import numpy as np
try:
from apex import amp
except ImportError:
pass
from torch.utils.data import (DataLoader,
RandomSampler, SequentialSampler, TensorDataset, SubsetRandomSampler,
Subset, ConcatDataset)
#from transformers import AdamW, ConstantLRSchedule, WarmupLinearSchedule
from transformers import AdamW, get_constant_schedule, get_linear_schedule_with_warmup
from src.pequod.training import to_cuda, set_seed
from src.pequod.eval import (eval_classification, eval_qa,
score_dict_to_string, score_dicts_to_latex)
from src.pequod.data import xdoc, xqa
from src.pequod.data.sampler import SubSampler
logger = logging.getLogger(__name__)
class Trainer(object):
def __init__(self, args, model, tokenizer):
self.args = args
self.datasets = {}
self.dataloaders = {}
self.iter_cache = {}
self.best_scores = {}
self.model = model
self.tokenizer = tokenizer
def run(self):
raise NotImplementedError
def _parse_batch(self, batch, **kwargs):
_batch = to_cuda(batch)
# _batch = batch
ret = {"input_ids": _batch[0],
"attention_mask": _batch[1],
"token_type_ids": _batch[2] if self.args.model_type == "bert" else None,
"labels": _batch[3]}
ret.update(**kwargs)
return ret
def train_epoch(self, split, lang, epoch_id):
raise NotImplementedError
def before_loop(self):
return
def train_full_epoch(self, split, lang, epoch_id):
raise NotImplementedError
def eval_epoch(self, split, lang, epoch_id):
raise NotImplementedError
def load_and_cache_examples(self, *args, **kwargs):
raise NotImplementedError
def save(self, name, epoch=0):
path = os.path.join(self.args.dump_path, "%s.pth" % name)
logger.info("Saving %s to %s ..." % (name, path))
data = {
"epoch":epoch,
"model":self.model.state_dict(),
"params": {k: v for k, v in self.args.__dict__.items()}}
torch.save(data, path)
def get_dataset_deprecated(self, *args, **kwargs):
logger.warning("cut_args is deprecated, please use train_ds_keys.")
if args in self.datasets: return self.datasets[args]
dataset = self.load_and_cache_examples(*args, **kwargs)
cut_split, cut_num = self.args.cut_args.split(",")
cut_num = int(cut_num)
if cut_num != -1 and cut_num < len(dataset) and cut_split == args[0]:
# cut_indices = random.sample(range(len(dataset)), cut_num)
cut_indices = [i for i in range(cut_num)]
# dataset = Subset(dataset, cut_indices)
dataset = TensorDataset(
*tuple(tensor[cut_indices] for tensor in dataset.tensors))
self.datasets[args] = dataset
return dataset
def get_cache_key(self):
cache_key = "%s-%s" % (self.args.model_key, self.args.model_type)
return cache_key
def get_dataset(self, *args, **kwargs):
if args in self.datasets: return self.datasets[args]
dataset = self.load_and_cache_examples(*args, **kwargs)
cut_num = int(kwargs.pop("cut", "-1"))
if cut_num != -1 and cut_num < len(dataset):
cut_indices = [i for i in range(cut_num)]
dataset = TensorDataset(
*tuple(tensor[cut_indices] for tensor in dataset.tensors))
self.datasets[args] = dataset
return dataset
def get_sampler(self, data_source, *args, **kwargs):
shuffle = kwargs.get("shuffle", args[0] == "train")
num_samples = kwargs.get("num_samples", None)
if num_samples is not None:
num_samples = int(num_samples)
sampler = SubSampler(data_source, num_samples)
else:
sampler = RandomSampler(data_source) if shuffle else SequentialSampler(data_source)
return sampler
def get_dataloader(self, *args, **kwargs):
logger.info("Getting dataloader - args: %s" % str(args))
if args in self.dataloaders: return self.dataloaders[args]
dataset = kwargs["dataset"] if "dataset" in kwargs \
else self.get_dataset(*args, **kwargs)
sampler = self.get_sampler(dataset, *args, **kwargs)
dataloader = DataLoader(dataset, sampler=sampler,
batch_size=self.args.train_batch_size)
self.dataloaders[args] = dataloader
return dataloader
def next_batch(self, *args, **kwargs):
if args not in self.iter_cache:
self.iter_cache[args] = iter(self.get_dataloader(*args, **kwargs))
try:
ret = next(self.iter_cache[args])
except StopIteration:
self.iter_cache[args] = iter(self.get_dataloader(*args, **kwargs))
ret = next(self.iter_cache[args])
return ret
def set_dataset(self, dataset, args):
self.datasets[args] = dataset
if args in self.dataloaders: self.dataloaders.pop(args)
if args in self.iter_cache: self.iter_cache.pop(args)
def copy_label(self, trg_key, src_key):
src_ds = self.get_dataset(*src_key)
trg_ds = self.get_dataset(*trg_key)
new_trg_ds = TensorDataset(*(trg_ds.tensors[:-1]) + (src_ds.tensors[-1],))
self.set_dataset(new_trg_ds, trg_key)
class SelfTrainer(Trainer):
def __init__(self, args, model=None, tokenizer=None):
super().__init__(args, model, tokenizer)
def labeling_dataset(self, model, ds_key):
logger.info("Labeling dataset %s" % str(ds_key))
model.eval()
dataset:TensorDataset = self.get_dataset(*ds_key)
# NOTE all_labels must be the last
preds = None
for batch in self.get_dataloader(*ds_key, shuffle=False):
with torch.no_grad():
batch_dict = self._parse_batch(batch, labels=None)
outputs = model(**batch_dict)
logits = outputs[0]
pred = logits.detach().cpu().numpy()
preds = pred if preds is None else np.append(preds, pred, axis=0)
new_labels = np.argmax(preds, axis=1)
new_labels = torch.tensor(new_labels, dtype=torch.long)
self.set_dataset(
TensorDataset(*(dataset.tensors[:-1] + (new_labels, ))), ds_key)
return preds
def update_concat_dataset_cache(self, ds_keys, preds_list, key_prefix="concat"):
"""
if preds_list[i] is None, then the ith dataset won't be cut by confidence.
"""
assert len(ds_keys) == len(preds_list)
assert all(ds_key[1:] == ds_keys[0][1:] for ds_key in ds_keys)
new_split = "-".join((key_prefix,) + tuple(ds_key[0] for ds_key in ds_keys))
logger.info("Concating %d dataset %s ..." % (len(ds_keys), new_split))
new_ds_key = (new_split, ) + ds_keys[0][1:]
datasets = []
for ds_key, preds in zip(ds_keys, preds_list):
dataset = self.get_dataset(*ds_key)
if preds is None:
datasets.append(dataset)
continue
new_labels = dataset.tensors[-1]
confident_indices = []
for i in range(len(new_labels)):
if preds[i,new_labels[i]] >= self.args.confidence_threshold:
confident_indices.append(i)
logger.info(
"Labeled %d confident examples out of %d examples for dataset %s" % (
len(confident_indices), len(new_labels), str(ds_key)))
if len(confident_indices) > 0:
datasets.append(Subset(dataset, confident_indices))
self.set_dataset(ConcatDataset(datasets), new_ds_key)
# self.datasets[new_ds_key] = ConcatDataset(datasets)
logger.info("Construct new dataset %s with %d examples" % (
str(new_ds_key), len(self.datasets[new_ds_key])))
return new_ds_key
class DistillTrainer(Trainer):
def __init__(self, args, model=None, tokenizer=None):
super().__init__(args, model, tokenizer)
def labeling_dataset(self, model, ds_key):
logger.info("Labeling dataset %s" % str(ds_key))
model.eval()
dataset:TensorDataset = self.get_dataset(*ds_key)
preds = None
for batch in self.get_dataloader(*ds_key, shuffle=False):
with torch.no_grad():
batch_dict = self._parse_batch(batch, labels=None)
outputs = model(**batch_dict)
logits = outputs[0]
pred = logits.detach().cpu().numpy()
preds = pred if preds is None else np.append(preds, pred, axis=0)
preds = torch.from_numpy(preds)
self.set_dataset(
TensorDataset(*(dataset.tensors[:-1] + (preds, ))), ds_key)
def update_concat_dataset_cache(self, ds_keys, key_prefix="concat"):
assert all(ds_key[1:] == ds_keys[0][1:] for ds_key in ds_keys)
new_split = "-".join((key_prefix,) + tuple(ds_key[0] for ds_key in ds_keys))
logger.info("Concating %d dataset %s ..." % (len(ds_keys), new_split))
new_ds_key = (new_split, ) + ds_keys[0][1:]
new_ds = ConcatDataset([self.get_dataset(*ds_key) for ds_key in ds_keys])
self.set_dataset(new_ds, new_ds_key)
logger.info("Construct new dataset %s with %d examples" % (
str(new_ds_key), len(new_ds)))
return new_ds_key
class XClassificationTrainer(Trainer):
def __init__(self, args, model, tokenizer):
super().__init__(args, model, tokenizer)
_, self.optimizer, self.scheduler = self.init_optimizer(
model, args.learning_rate)
self.example_feature_cache = {}
self.no_improve_cnt = 0
def init_optimizer(self, model, lr):
args = self.args
no_decay = ['bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{"params": [p for n, p in model.named_parameters() if not any(
nd in n for nd in no_decay)], "weight_decay": args.weight_decay},
{"params": [p for n, p in model.named_parameters() if any(
nd in n for nd in no_decay)], "weight_decay": 0.0}]
# TODO calculate t_total
optimizer = AdamW(
optimizer_grouped_parameters, lr=lr, eps=args.adam_epsilon)
# scheduler = WarmupLinearSchedule(
# optimizer, warmup_steps=args.warmup_steps, t_total=t_total)
scheduler = get_constant_schedule(optimizer)
return optimizer_grouped_parameters, optimizer, scheduler
def run(self):
args = self.args
set_seed(args)
if self.optimizer is not None and args.fp16:
self.model, self.optimizer = amp.initialize(
self.model, self.optimizer, opt_level=args.fp16_opt_level)
test_langs = args.test_langs.split(",")
assert args.dev_mode in ["train_lang", "test_lang", "avg"]
if args.dev_mode == "train_lang":
assert args.train_lang in test_langs
train_lang_index = test_langs.index(args.train_lang)
logger.info("***** Running Trainer *****")
logger.info("***** Before Trainer Loop *****")
self.before_loop()
def _eval(update_no_improve_cnt=False):
score_tups = []
should_save = False
for lang in test_langs:
dev_score_dict = self.eval_epoch(
split="dev", lang=lang, epoch_id=epoch_id)
test_score_dict = self.eval_epoch(
split="test", lang=lang, epoch_id=epoch_id)
score_tup = (dev_score_dict, test_score_dict)
score_tups.append(score_tup)
logger.info("Eval epoch %d - lang - %s score - dev: %s - test: %s" % (
epoch_id, lang, score_dict_to_string(dev_score_dict),
score_dict_to_string(test_score_dict)))
dev_scores, test_scores = [], []
if args.dev_mode == "test_lang":
# select best results w.r.t. the res on test-lang dev sets.
for lang, score_tup in zip(test_langs, score_tups):
if lang not in self.best_scores:
if lang == test_langs[-1]: should_save = True
self.best_scores[lang] = score_tup
elif self.best_scores[lang][0][args.dev_criterion] < \
score_tup[0][args.dev_criterion]:
if lang == test_langs[-1]: should_save = True
self.best_scores[lang] = score_tup
dev_scores.append(self.best_scores[lang][0])
test_scores.append(self.best_scores[lang][1])
elif args.dev_mode == "train_lang":
# select best results w.r.t. the res on train-lang dev sets.
if (args.train_lang not in self.best_scores) or self.best_scores[
args.train_lang][0][args.dev_criterion] < \
score_tups[train_lang_index][0][args.dev_criterion]:
should_save = True
for lang, score_tup in zip(test_langs, score_tups):
self.best_scores[lang] = score_tup
dev_scores.append(self.best_scores[lang][0])
test_scores.append(self.best_scores[lang][1])
if update_no_improve_cnt:
self.no_improve_cnt = 0
logger.info("New best results!")
else:
for lang in test_langs:
dev_scores.append(self.best_scores[lang][0])
test_scores.append(self.best_scores[lang][1])
if update_no_improve_cnt:
self.no_improve_cnt += 1
logger.info("Results not improved, no_improve_cnt:%d" % self.no_improve_cnt)
elif args.dev_mode == "avg":
# select best results by the best sum scores
avg_key = "_avg"
sum_dev_scores = sum_test_scores = 0
for score_tup in score_tups:
sum_dev_scores += score_tup[0][args.dev_criterion]
sum_test_scores += score_tup[1][args.dev_criterion]
if (avg_key not in self.best_scores) or self.best_scores[avg_key] < sum_dev_scores:
should_save = True
self.best_scores[avg_key] = sum_dev_scores
for lang, score_tup in zip(test_langs, score_tups):
self.best_scores[lang] = score_tup
dev_scores.append(self.best_scores[lang][0])
test_scores.append(self.best_scores[lang][1])
logger.info("New best results! Dev avg: %.2f Test avg: %.2f" % (
sum_dev_scores/len(test_langs), sum_test_scores/len(test_langs),
))
if update_no_improve_cnt:
self.no_improve_cnt = 0
else:
for lang in test_langs:
dev_scores.append(self.best_scores[lang][0])
test_scores.append(self.best_scores[lang][1])
if update_no_improve_cnt:
self.no_improve_cnt += 1
logger.info("Results not improved, no_improve_cnt:%d" % self.no_improve_cnt)
logger.info("Eval epoch %d - langs %s - dev scores - %s" % (
epoch_id, " & ".join(test_langs), score_dicts_to_latex(dev_scores)))
logger.info("Eval epoch %d - langs %s - test scores - %s" % (
epoch_id, " & ".join(test_langs), score_dicts_to_latex(test_scores)))
with open(os.path.join(args.exp_results_dir, args.exp_name), "w") as fp:
json.dump(self.best_scores, fp)
fp.flush()
if should_save and args.save:
save_to = os.path.join(args.dump_path, "best-%s-%s" % (
args.dev_criterion, args.model_type))
logger.info("Epoch %d, saving best model to %s" % (
epoch_id, save_to))
torch.save(self.model.state_dict(), save_to)
logger.info("***** Start Trainer Loop *****")
for epoch_id in range(args.num_train_epochs):
self.train_full_epoch(args.train_ds_keys, epoch_id=epoch_id, algo=args.algo)
_eval(update_no_improve_cnt=args.stopping_threshold>0)
if args.stopping_threshold > 0 and self.no_improve_cnt >= args.stopping_threshold:
logger.info("***** Early stop *****")
break
if args.add_train_ds_keys != "":
logger.info("***** Additional Trainer Loop *****")
state_dict_path = os.path.join(args.dump_path, "best-%s-%s" % (
args.dev_criterion, args.model_type))
logger.info("Reloading model parameters from %s ..." % state_dict_path)
state_dict = torch.load(state_dict_path, map_location="cpu")
self.model.load_state_dict(state_dict)
self.model.cuda()
num_additional_train_epochs = getattr(
args, "num_additional_train_epochs", args.num_train_epochs)
for epoch_id in range(num_additional_train_epochs):
self.train_full_epoch(
args.add_train_ds_keys, epoch_id=epoch_id, algo=args.add_algo)
_eval()
def train_full_epoch(self, train_ds_keys, epoch_id, algo=None):
raise NotImplementedError
def train_full_epoch_deprecated(self, split, lang, epoch_id):
logger.info("***** Training epoch %d - lang: %s *****" % (epoch_id, lang))
args = self.args
model = self.model
model.train()
losses = []
n_instances = 0
for step, batch in enumerate(self.get_dataloader(split, lang)):
feed_dict = self._parse_batch(batch)
outputs = model(**feed_dict)
loss = outputs[0]
model.zero_grad()
if args.fp16:
with amp.scale_loss(loss, self.optimizer) as scaled_loss:
scaled_loss.backward()
torch.nn.utils.clip_grad_norm_(
amp.master_params(self.optimizer), args.max_grad_norm)
else:
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
self.optimizer.step()
self.scheduler.step()
losses.append(loss)
n_instances += args.train_batch_size
if step % args.logging_steps == 0:
logger.info(
"Epoch %d - n instances %7d - loss: %.4f " % (
epoch_id, n_instances, sum(losses) / len(losses)))
losses = []
def eval_epoch(self, split, lang, epoch_id):
logger.info("***** Evaluating epoch %d - split: %s - lang: %s*****" % (
epoch_id, split, lang))
def _get_batch_iter():
for batch in self.get_dataloader(split, lang, shuffle=False):
yield self._parse_batch(batch)
return eval_classification(self.model, _get_batch_iter())
def load_and_cache_examples(self, split, lang, **kwargs):
processor = xdoc.get_processor_class(self.args.dataset_name)()
cache_key = self.get_cache_key()
return xdoc.load_and_cache_examples(
self.args, processor, split, lang, self.tokenizer, cache_key)
class XQATrainer(XClassificationTrainer):
def __init__(self, args, model, tokenizer):
super().__init__(args, model, tokenizer)
def init_optimizer(self, model, lr):
args = self.args
no_decay = ['bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{"params": [p for n, p in model.named_parameters() if not any(
nd in n for nd in no_decay)], "weight_decay": args.weight_decay},
{"params": [p for n, p in model.named_parameters() if any(
nd in n for nd in no_decay)], "weight_decay": 0.0}]
optimizer = AdamW(
optimizer_grouped_parameters, lr=lr, eps=args.adam_epsilon)
dataloader = self.get_dataloader("train", args.train_lang)
t_total = len(dataloader) * args.num_train_epochs
scheduler = get_linear_schedule_with_warmup(
optimizer, warmup_steps=args.warmup_steps, t_total=t_total)
return optimizer_grouped_parameters, optimizer, scheduler
def _parse_batch(self, batch, training=True, **kwargs):
_batch = to_cuda(batch)
# _batch = batch
if training:
ret = {"input_ids": _batch[0],
"attention_mask": _batch[1],
"token_type_ids": _batch[2] if self.args.model_type == "bert" else None,
'start_positions': _batch[3],
'end_positions': _batch[4]}
else:
ret = {"input_ids": _batch[0],
"attention_mask": _batch[1],
"token_type_ids": _batch[2] if self.args.model_type == "bert" else None}
ret.update(**kwargs)
return ret
def eval_epoch(self, split, lang, epoch_id):
args = self.args
logger.info("***** Evaluating epoch %d - split: %s - lang: %s*****" % (
epoch_id, split, lang))
dataset, examples, features = self.get_eval_data(split, lang)
def _get_batch_iter():
for batch in self.get_dataloader(
split, lang, shuffle=False, dataset=dataset):
example_indices = batch[3]
yield self._parse_batch(batch, training=False), example_indices
return eval_qa(self.model, _get_batch_iter(), **{
"all_examples": examples,
"all_features": features,
"predict_file": os.path.join(args.data_dir, "%s-%s.json" % (split, lang)),
"output_dir": args.dump_path,
"n_best_size": args.n_best_size,
"max_answer_length": args.max_answer_length,
"do_lower_case": args.do_lower_case,
"verbose_logging": args.verbose_logging,
"version_2_with_negative": args.version_2_with_negative,
"null_score_diff_threshold": args.null_score_diff_threshold})
def load_and_cache_examples(self, split, lang, **kwargs):
evaluate = kwargs.pop("evaluate", False)
cache_key = "%s-%s" % (self.args.model_key, self.args.model_type)
dataset, _, _ = xqa.load_and_cache_examples(
self.args, split, lang, self.tokenizer, cache_key, evaluate=evaluate)
return dataset
def get_eval_data(self, split, lang):
ds_key = (split, lang)
if ds_key in self.example_feature_cache:
return self.example_feature_cache[ds_key]
cache_key = "%s-%s" % (self.args.model_key, self.args.model_type)
dataset, examples, features = xqa.load_and_cache_examples(
self.args, split, lang, self.tokenizer, cache_key, evaluate=True)
self.example_feature_cache[ds_key] = (dataset, examples, features)
return dataset, examples, features
| EXA-1-master | exa/models/unilm-master/xtune/src/pequod/training/trainer.py |
from collections import defaultdict
import torch
from torch.optim.optimizer import Optimizer
class LookaheadWrapper(Optimizer):
r"""Implements a Lookahead wrapper around a given optimizer
"""
def __init__(self, optimizer, la_steps, la_alpha=0.5):
self.optimizer = optimizer
self._la_step = 0 # counter for inner optimizer
self.la_alpha = la_alpha
self._total_la_steps = la_steps
self.state = defaultdict(dict)
# Cache the current optimizer parameters
for group in optimizer.param_groups:
for p in group['params']:
param_state = self.state[p]
param_state['cached_params'] = torch.zeros_like(p.data)
param_state['cached_params'].copy_(p.data)
def __getstate__(self):
return self.optimizer.__getstate__()
def __setstate__(self, state):
self.optimizer.__setstate__(state)
def zero_grad(self):
self.optimizer.zero_grad()
def state_dict(self):
return self.optimizer.state_dict()
def load_state_dict(self, state_dict):
self.optimizer.load_state_dict(state_dict)
@property
def param_groups(self):
return self.optimizer.param_groups
def step(self, closure=None):
"""Performs a single Lookahead optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = self.optimizer.step(closure)
self._la_step += 1
if self._la_step >= self._total_la_steps:
self._la_step = 0
# Lookahead and cache the current optimizer parameters
for group in self.optimizer.param_groups:
for p in group['params']:
param_state = self.state[p]
p.data.mul_(self.la_alpha).add_(1 - self.la_alpha, param_state['cached_params'])
param_state['cached_params'].copy_(p.data)
return loss
| EXA-1-master | exa/models/unilm-master/xtune/src/pequod/optim/la.py |
EXA-1-master | exa/models/unilm-master/xtune/src/pequod/optim/__init__.py |
|
from collections import defaultdict
import torch
from torch.optim.optimizer import Optimizer
class Lookahead0Wrapper(Optimizer):
r"""Implements a Lookahead wrapper around a given optimizer
"""
def __init__(self, optimizer, la_steps, la_alpha=0.5):
self.optimizer = optimizer
self._la_step = 0 # counter for inner optimizer
self.la_alpha = la_alpha
self._total_la_steps = la_steps
self.state = defaultdict(dict)
# Cache the current optimizer parameters
for group in optimizer.param_groups:
for p in group['params']:
param_state = self.state[p]
param_state['cached_params'] = torch.zeros_like(p.data)
param_state['cached_params'].copy_(p.data)
def __getstate__(self):
return self.optimizer.__getstate__()
def __setstate__(self, state):
self.optimizer.__setstate__(state)
def zero_grad(self):
self.optimizer.zero_grad()
def state_dict(self):
return self.optimizer.state_dict()
def load_state_dict(self, state_dict):
self.optimizer.load_state_dict(state_dict)
@property
def param_groups(self):
return self.optimizer.param_groups
def step(self, closure=None):
"""Performs a single Lookahead optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = self.optimizer.step(closure)
self._la_step += 1
if self._la_step >= self._total_la_steps:
self._la_step = 0
# Lookahead and cache the current optimizer parameters
for group in self.optimizer.param_groups:
for p in group['params']:
param_state = self.state[p]
p.data.mul_(self.la_alpha).add_(1 - self.la_alpha, param_state['cached_params'])
# param_state['cached_params'].copy_(p.data)
return loss
| EXA-1-master | exa/models/unilm-master/xtune/src/pequod/optim/la0.py |
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
from transformers.modeling_bert import BertPreTrainedModel, BertForQuestionAnswering
from transformers.modeling_roberta import RobertaModel
class RobertaForQuestionAnswering(BertPreTrainedModel):
base_model_prefix = "roberta"
def __init__(self, config):
BertPreTrainedModel.__init__(self, config)
self.num_labels = config.num_labels
self.roberta = RobertaModel(config)
self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
BertPreTrainedModel.init_weights(self)
def forward(self, input_ids, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, start_positions=None, end_positions=None, **kwargs):
outputs = self.roberta(input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
**kwargs)
sequence_output = outputs[0]
logits = self.qa_outputs(sequence_output)
start_logits, end_logits = logits.split(1, dim=-1)
start_logits = start_logits.squeeze(-1)
end_logits = end_logits.squeeze(-1)
outputs = (start_logits, end_logits,) + outputs[2:]
if start_positions is not None and end_positions is not None:
# If we are on multi-GPU, split add a dimension
if len(start_positions.size()) > 1:
start_positions = start_positions.squeeze(-1)
if len(end_positions.size()) > 1:
end_positions = end_positions.squeeze(-1)
# sometimes the start/end positions are outside our model inputs, we ignore these terms
ignored_index = start_logits.size(1)
start_positions.clamp_(0, ignored_index)
end_positions.clamp_(0, ignored_index)
loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2
outputs = (total_loss,) + outputs
return outputs # (loss), start_logits, end_logits, (hidden_states), (attentions) | EXA-1-master | exa/models/unilm-master/xtune/src/pequod/model/roberta.py |
EXA-1-master | exa/models/unilm-master/xtune/src/pequod/model/__init__.py |
|
import os
import numpy as np
import torch
import inspect
from src.pequod.data.utils_squad import RawResult, write_predictions
from src.pequod.data.utils_squad_evaluate import EVAL_OPTS, main as evaluate_on_squad
def to_list(tensor):
return tensor.detach().cpu().tolist()
def score_dict_to_string(score_dict):
return " ".join([("%s:%.2f" % (k, v)) for k, v in score_dict.items()])
def score_dicts_to_latex(score_dicts):
keys = [k for k in score_dicts[0]]
return "\n".join([""] + [(
" & ".join([key] + [("%.2f" % (sd[key])) for sd in score_dicts])
) for key in keys])
def eval_classification(model, batch_dict_iter):
model.eval()
preds, labels = None, None
for batch_dict in batch_dict_iter:
label_id = batch_dict["labels"].detach().cpu().numpy()
batch_dict.pop("labels")
with torch.no_grad(): logits = model(**batch_dict)[0]
pred = logits.detach().cpu().numpy()
if preds is None: preds, labels = pred, label_id
else:
preds = np.append(preds, pred, axis=0)
labels = np.append(labels, label_id)
preds = np.argmax(preds, axis=1)
result = (preds == labels).mean()
return {"acc": result*100.0}
def eval_qa(model, batch_dict_iter, prefix="", **kwargs):
features = kwargs["all_features"]
output_dir = kwargs["output_dir"]
model.eval()
all_results = []
for batch_dict, example_indices in batch_dict_iter:
with torch.no_grad(): outputs = model(**batch_dict)
for i, example_index in enumerate(example_indices):
eval_feature = features[example_index.item()]
unique_id = int(eval_feature.unique_id)
result = RawResult(unique_id = unique_id,
start_logits = to_list(outputs[0][i]),
end_logits = to_list(outputs[1][i]))
all_results.append(result)
output_prediction_file = os.path.join(
output_dir, "predictions_{}.json".format(prefix))
output_nbest_file = os.path.join(
output_dir, "nbest_predictions_{}.json".format(prefix))
if kwargs["version_2_with_negative"]:
output_null_log_odds_file = os.path.join(
output_dir, "null_odds_{}.json".format(prefix))
else: output_null_log_odds_file = None
wrt_pred_kwargs = {
"all_results": all_results,
"output_prediction_file": output_prediction_file,
"output_nbest_file": output_nbest_file,
"output_null_log_odds_file": output_null_log_odds_file}
for key in inspect.getfullargspec(write_predictions).args:
if key not in wrt_pred_kwargs:
wrt_pred_kwargs[key] = kwargs[key]
write_predictions(**wrt_pred_kwargs)
# Evaluate with the official SQuAD script
evaluate_options = EVAL_OPTS(
data_file=kwargs["predict_file"],
pred_file=output_prediction_file,
na_prob_file=output_null_log_odds_file,
out_file="/dev/null")
results = evaluate_on_squad(evaluate_options)
return results
| EXA-1-master | exa/models/unilm-master/xtune/src/pequod/eval/__init__.py |
import faiss
import json
import logging
import numpy as np
import os
import torch
from src.pequod.data.xretrieval import load_and_cache_examples
from src.pequod.eval.evaluator import Evaluator
from src.pequod.eval.utils_retrieve import mine_bitext, bucc_eval
logger = logging.getLogger(__name__)
def load_embeddings(embed_file, num_sentences=None):
logger.info(' loading from {}'.format(embed_file))
embeds = np.load(embed_file)
return embeds
class BuccEvaluator(Evaluator):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.model_langs = ["share_lang", "order"]
self.proj_matrix_fast = kwargs.get("proj_matrix_fast", None)
if self.proj_matrix_fast is not None:
logger.info("proj_matrix_fast:" + str(self.proj_matrix_fast.size()))
self.proj_matrix_fast = self.proj_matrix_fast[0].float().cuda()
self.res = {}
def get_mean_emb(self, layer_outputs, pool_mask):
embs = (layer_outputs * pool_mask.unsqueeze(2).float()).sum(dim=1) / \
pool_mask.sum(dim=1).view(-1, 1).float()
return embs
def get_cxlm_emb(self, layer_outputs):
if self.proj_matrix_fast is None:
raise ValueError
ret = torch.mm(layer_outputs[:,0,:], self.proj_matrix_fast)
# ret = layer_outputs[:,0,:]
return ret
def get_cls_emb(self, layer_outputs):
return layer_outputs[:,0,:]
def bt_norm(self, x):
m = x.mean(0, keepdim=True)
v = x.var(0, unbiased=True, keepdim=True)
return (x-m) / torch.sqrt(v+1e-5)
def get_embeddings(self, batch, outputs, emb_type=None, is_bt_norm=False):
if emb_type is None:
emb_type = self.args.emb_type
last_layer_outputs, first_token_outputs, all_layer_outputs = outputs
if emb_type == "mean":
ret = self.get_mean_emb(all_layer_outputs[self.args.mean_layer_id], batch["attention_mask"])
elif emb_type == "cls":
ret = self.get_cls_emb(all_layer_outputs[-1])
elif emb_type == "cxlm":
ret = self.get_cxlm_emb(all_layer_outputs[self.args.mean_layer_id]) #TODO
else: raise ValueError
if is_bt_norm:
ret = self.bt_norm(ret)
ret = ret.cpu().numpy().astype(np.float32)
# ret = None
del last_layer_outputs, first_token_outputs, all_layer_outputs
torch.cuda.empty_cache()
return ret
def run(self):
args = self.args
self.model.eval()
best_threshold = None
SL, TL = args.src_language, args.tgt_language
for split in ['test']:
# for split in ['dev', 'test']:
prefix = f'{SL}-{TL}.{split}'
if args.extract_embeds:
for lang in [SL, TL]:
file = os.path.join(args.output_dir, f'{prefix}.{lang}.npy')
if os.path.exists(file):
continue
langpair = f'{SL}-{TL}.{split}'
dl1 = self.get_dataloader(langpair, lang)
all_emb1 = []
for batch1 in dl1:
batch1 = self._parse_batch(batch1, has_label=False)
#forward
with torch.no_grad():
outputs1 = self.model(**batch1)
all_emb1.append(self.get_embeddings(batch1, outputs1, is_bt_norm=args.bt_norm))
all_emb1 = np.concatenate(all_emb1)
file = os.path.join(args.output_dir, f'{prefix}.{lang}.npy')
logger.info('save embed {} to file {}'.format(all_emb1.shape, file))
np.save(file, all_emb1)
if args.mine_bitext:
threshold = None
cand2score_file = os.path.join(args.output_dir, 'candidates.tsv')
x = load_embeddings(os.path.join(args.output_dir, f'{prefix}.{SL}.npy'))
y = load_embeddings(os.path.join(args.output_dir, f'{prefix}.{TL}.npy'))
x_text_file = os.path.join(args.data_dir, f'{prefix}.{SL}.txt')
y_text_file = os.path.join(args.data_dir, f'{prefix}.{TL}.txt')
x_id_file = os.path.join(args.data_dir, f'{prefix}.{SL}.id')
y_id_file = os.path.join(args.data_dir, f'{prefix}.{TL}.id')
mine_bitext(x, y, x_text_file, y_text_file, cand2score_file, dist=args.dist, use_shift_embeds=args.use_shift_embeds)
gold_file = os.path.join(args.data_dir, f'{prefix}.gold')
if os.path.exists(gold_file):
predict_file = os.path.join(args.output_dir, f'test-{SL}.tsv')
results = bucc_eval(cand2score_file, gold_file, x_text_file, y_text_file, x_id_file, y_id_file, predict_file, threshold)
with open(os.path.join(args.output_dir, 'final.txt'), 'w', encoding='utf-8') as f:
f.write(json.dumps(results))
best_threshold = results['best-threshold']
logger.info('--Candidates: {}'.format(cand2score_file))
logger.info(' '.join('{}={:.4f}'.format(k,v) for k,v in results.items()))
# if args.layer_ensemble:
# threshold = None
# prefix = 'mean_l2'
# layers = args.ens_layers.split(',')
#
# cand2score_file = os.path.join(args.output_dir, 'candidates.tsv')
#
# x = load_embeddings(os.path.join(args.output_dir, f'{prefix}.{SL}.npy'))
# y = load_embeddings(os.path.join(args.output_dir, f'{prefix}.{TL}.npy'))
#
# x_text_file = os.path.join(args.data_dir, f'{prefix}.{SL}.txt')
# y_text_file = os.path.join(args.data_dir, f'{prefix}.{TL}.txt')
# x_id_file = os.path.join(args.data_dir, f'{prefix}.{SL}.id')
# y_id_file = os.path.join(args.data_dir, f'{prefix}.{TL}.id')
#
# mine_bitext(x, y, x_text_file, y_text_file, cand2score_file, dist=args.dist, use_shift_embeds=args.use_shift_embeds)
# gold_file = os.path.join(args.data_dir, f'{prefix}.gold')
# if os.path.exists(gold_file):
# predict_file = os.path.join(args.output_dir, f'test-{SL}.tsv')
# results = bucc_eval(cand2score_file, gold_file, x_text_file, y_text_file, x_id_file, y_id_file, predict_file, threshold)
#
# with open(os.path.join(args.output_dir, 'final.txt'), 'w', encoding='utf-8') as f:
# f.write(json.dumps(results))
#
# best_threshold = results['best-threshold']
# logger.info('--Candidates: {}'.format(cand2score_file))
# logger.info(' '.join('{}={:.4f}'.format(k,v) for k,v in results.items()))
# output retrieval results
# with open(os.path.join(args.output_dir, 'test-{0}.tsv'.format(lang1)), 'w', encoding='utf-8') as writer:
# for i, pred in enumerate(predictions):
# writer.write(str(pred[0]) + '\n')
def load_and_cache_examples(self, langpair, lang, **kwargs):
args = self.args
cache_key = "%s-%s" % (args.model_key, args.model_type)
return load_and_cache_examples(
args=args,
langpair=langpair,
lang=lang,
tokenizer=self.tokenizer,
key=cache_key,
prefix=args.data_prefix,
)
| EXA-1-master | exa/models/unilm-master/xtune/src/pequod/eval/bretrieval.py |
import faiss
import json
import logging
import numpy as np
import os
import torch
from src.pequod.data.xretrieval import load_and_cache_examples
from src.pequod.eval.evaluator import Evaluator
logger = logging.getLogger(__name__)
def similarity_search(x, y, dim, normalize=False, dist='L2'):
top_k = 10
num = x.shape[0]
if dist == 'cosine':
idx = faiss.IndexFlatIP(dim)
else:
idx = faiss.IndexFlatL2(dim)
if normalize:
faiss.normalize_L2(x)
faiss.normalize_L2(y)
idx.add(x)
scores, prediction = idx.search(y, top_k)
return prediction, scores
class TatoebaEvaluator(Evaluator):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.model_langs = ["share_lang", "order"]
self.proj_matrix_fast = kwargs.get("proj_matrix_fast", None)
if self.proj_matrix_fast is not None:
logger.info("proj_matrix_fast:" + str(self.proj_matrix_fast.size()))
self.proj_matrix_fast = self.proj_matrix_fast[0].float().cuda()
self.res = {}
def get_mean_emb(self, layer_outputs, pool_mask):
embs = (layer_outputs * pool_mask.unsqueeze(2).float()).sum(dim=1) / \
pool_mask.sum(dim=1).view(-1, 1).float()
return embs
def get_cxlm_emb(self, layer_outputs):
if self.proj_matrix_fast is None:
raise ValueError
ret = torch.mm(layer_outputs[:,0,:], self.proj_matrix_fast)
# ret = layer_outputs[:,0,:]
return ret
def get_cls_emb(self, layer_outputs):
return layer_outputs[:,0,:]
def bt_norm(self, x):
m = x.mean(0, keepdim=True)
v = x.var(0, unbiased=True, keepdim=True)
return (x-m) / torch.sqrt(v+1e-5)
def get_embeddings(self, batch, outputs, emb_type=None, is_bt_norm=False):
if emb_type is None:
emb_type = self.args.emb_type
last_layer_outputs, first_token_outputs, all_layer_outputs = outputs
if emb_type == "mean":
ret = self.get_mean_emb(all_layer_outputs[self.args.mean_layer_id], batch["attention_mask"])
elif emb_type == "cls":
ret = self.get_cls_emb(all_layer_outputs[-1])
elif emb_type == "cxlm":
ret = self.get_cxlm_emb(all_layer_outputs[self.args.mean_layer_id]) #TODO
else: raise ValueError
if is_bt_norm:
ret = self.bt_norm(ret)
ret = ret.cpu().numpy().astype(np.float32)
# ret = None
del last_layer_outputs, first_token_outputs, all_layer_outputs
torch.cuda.empty_cache()
return ret
def run(self):
args = self.args
self.model.eval()
if args.data_prefix == "tatoeba":
langs = ["ara", "bul", "deu", "ell", "spa", "fra", "hin", "rus", "swh", "tha", "tur", "urd", "vie", "cmn"]
langpairs = ["%s-eng" % lang for lang in langs]
elif args.data_prefix == "cxlm":
langpairs = "ar-en bg-en de-en el-en en-es en-fr en-hi en-ru en-sw en-th en-tr en-ur en-vi en-zh".split()
elif args.data_prefix == "debug":
langpairs = ["ar-en" ]
elif args.data_prefix == "tat15plus":
args.data_prefix = "tatoeba"
l15 = set(["ara", "bul", "deu", "ell", "spa", "fra", "hin", "rus", "swh", "tha", "tur", "urd", "vie", "cmn"])
ld = {'ara':'ar', 'heb':'he', 'vie':'vi', 'ind':'id',
'jav':'jv', 'tgl':'tl', 'eus':'eu', 'mal':'ml',
'tel':'te', 'afr':'af', 'nld':'nl', 'deu':'de',
'ell':'el', 'ben':'bn', 'hin':'hi', 'mar':'mr', 'urd':'ur',
'tam':'ta', 'fra':'fr', 'ita':'it', 'por':'pt', 'spa':'es',
'bul':'bg', 'rus':'ru', 'jpn':'ja', 'kat':'ka', 'kor':'ko',
'tha':'th', 'swh':'sw', 'cmn':'zh', 'kaz':'kk', 'tur':'tr',
'est':'et', 'fin':'fi', 'hun':'hu', 'pes':'fa'}
langs_str = 'ar he vi id jv tl eu ml ta te af nl de el bn hi mr ur fa fr it pt es bg ru ja ka ko th sw zh kk tr et fi hu'
#langs_str = 'hi mr ur fa fr it pt es bg ru ja ka ko th sw zh kk tr et fi hu'
#langs_str = 'ar he'
#langs_str = 'ara heb'
langs = langs_str.split(' ')
#for l in ld:
# if l in l15: continue
# langs.append(l)
# langs = ["afr", "jpn", "kor", "kaz", "est", "fin", "hun", "pes"]
langpairs = ["%s-en" % lang for lang in langs]
else: raise ValueError
for langpair in langpairs:
lang1, lang2 = langpair.split("-")
logger.info("Eval langpair: %s" % langpair)
dl1 = self.get_dataloader(langpair, lang1)
dl2 = self.get_dataloader(langpair, lang2)
all_emb1 = []
all_emb2 = []
for batch1, batch2 in zip(dl1, dl2):
batch1 = self._parse_batch(batch1, has_label=False)
batch2 = self._parse_batch(batch2, has_label=False)
#forward
with torch.no_grad():
outputs1 = self.model(**batch1)
all_emb1.append(self.get_embeddings(batch1, outputs1, is_bt_norm=args.bt_norm))
outputs2 = self.model(**batch2)
all_emb2.append(self.get_embeddings(batch2, outputs2, is_bt_norm=args.bt_norm))
all_emb1 = np.concatenate(all_emb1)
all_emb2 = np.concatenate(all_emb2)
emb_sz = all_emb1.shape[-1]
if args.reverse_eval:
all_emb1, all_emb2 = all_emb2, all_emb1
predictions, scores = similarity_search(
all_emb1, all_emb2, emb_sz, normalize=self.args.normalize, dist=self.args.dist)
correct = tot = 0
# output retrieval results
with open(os.path.join(args.output_dir, 'test-{0}.tsv'.format(lang1)), 'w', encoding='utf-8') as writer:
for i, pred in enumerate(predictions):
writer.write(str(pred[0]) + '\n')
with open(os.path.join(args.output_dir, 'test-{0}-scores.tsv'.format(lang1)), 'w', encoding='utf-8') as writer:
for pred, score in zip(predictions, scores):
writer.write(' '.join([str(p) for p in pred]) + '\t' + ' '.join([str(s) for s in score]) + '\n')
for i, pred in enumerate(predictions):
if i == pred[0]: correct += 1
tot += 1
logger.info("langpair:%s acc:%.2f" % (langpair, 100*correct/tot))
self.res[langpair] = 100*correct/tot
#output_fn = os.path.join(args.exp_results_dir, args.exp_name)
#if args.reverse_eval: output_fn += "-rev"
#with open(output_fn, "w") as fp:
# json.dump(self.res, fp)
def load_and_cache_examples(self, langpair, lang, **kwargs):
args = self.args
cache_key = "%s-%s" % (args.model_key, args.model_type)
return load_and_cache_examples(
args=args,
langpair=langpair,
lang=lang,
tokenizer=self.tokenizer,
key=cache_key,
prefix=args.data_prefix,
)
| EXA-1-master | exa/models/unilm-master/xtune/src/pequod/eval/xretrieval.py |
# coding=utf-8
# This repository is modified based on the LASER repository.
# https://github.com/facebookresearch/LASER
# Copyright The LASER Team Authors, and The XTREME Benchmark Authors.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions for retrieval tasks."""
import os
import sys
import faiss
import tempfile
import numpy as np
def knn(x, y, k, use_gpu, dist='cosine'):
return knnGPU(x, y, k) if use_gpu else knnCPU(x, y, k, dist)
def knnGPU(x, y, k, mem=5*1024*1024*1024):
dim = x.shape[1]
batch_size = mem // (dim*4)
sim = np.zeros((x.shape[0], k), dtype=np.float32)
ind = np.zeros((x.shape[0], k), dtype=np.int64)
for xfrom in range(0, x.shape[0], batch_size):
xto = min(xfrom + batch_size, x.shape[0])
bsims, binds = [], []
for yfrom in range(0, y.shape[0], batch_size):
yto = min(yfrom + batch_size, y.shape[0])
print('{}-{} -> {}-{}'.format(xfrom, xto, yfrom, yto))
idx = faiss.IndexFlatIP(dim)
idx = faiss.index_cpu_to_all_gpus(idx)
idx.add(y[yfrom:yto])
bsim, bind = idx.search(x[xfrom:xto], min(k, yto-yfrom))
bsims.append(bsim)
binds.append(bind + yfrom)
del idx
bsims = np.concatenate(bsims, axis=1)
binds = np.concatenate(binds, axis=1)
aux = np.argsort(-bsims, axis=1)
for i in range(xfrom, xto):
for j in range(k):
sim[i, j] = bsims[i-xfrom, aux[i-xfrom, j]]
ind[i, j] = binds[i-xfrom, aux[i-xfrom, j]]
return sim, ind
def knnCPU(x, y, k, dist='cosine'):
# x: query, y: database
dim = x.shape[1]
if dist == 'cosine':
idx = faiss.IndexFlatIP(dim)
else:
idx = faiss.IndexFlatL2(dim)
idx.add(y)
sim, ind = idx.search(x, k)
if dist != 'cosine':
sim = 1 / (1 + sim)
return sim, ind
def score(x, y, fwd_mean, bwd_mean, margin, dist='cosine'):
if dist == 'cosine':
return margin(x.dot(y), (fwd_mean + bwd_mean) / 2)
else:
l2 = ((x - y) ** 2).sum()
sim = 1 / (1 + l2)
return margin(sim, (fwd_mean + bwd_mean) / 2)
def score_candidates(x, y, candidate_inds, fwd_mean, bwd_mean, margin, dist='cosine'):
print(' - scoring {:d} candidates using {}'.format(x.shape[0], dist))
scores = np.zeros(candidate_inds.shape)
for i in range(scores.shape[0]):
for j in range(scores.shape[1]):
k = candidate_inds[i, j]
scores[i, j] = score(x[i], y[k], fwd_mean[i], bwd_mean[k], margin, dist)
return scores
def text_load_unify(fname, encoding, unify=True):
print(' - loading texts {:s}: '.format(fname), end='')
fin = open(fname, encoding=encoding, errors='surrogateescape')
inds = []
sents = []
sent2ind = {}
n = 0
nu = 0
for line in fin:
new_ind = len(sent2ind)
inds.append(sent2ind.setdefault(line, new_ind))
if unify:
if inds[-1] == new_ind:
sents.append(line[:-1])
nu += 1
else:
sents.append(line[:-1])
nu += 1
n += 1
print('{:d} lines, {:d} unique'.format(n, nu))
del sent2ind
return inds, sents
def unique_embeddings(emb, ind):
aux = {j: i for i, j in enumerate(ind)}
print(' - unify embeddings: {:d} -> {:d}'.format(len(emb), len(aux)))
return emb[[aux[i] for i in range(len(aux))]]
def shift_embeddings(x, y):
print(' - shift embeddings')
delta = x.mean(axis=0) - y.mean(axis=0)
x2y = x - delta
y2x = y + delta
return x2y, y2x
def mine_bitext(x, y, src_text_file, trg_text_file, output_file, mode='mine',
retrieval='max', margin='ratio', threshold=0,
neighborhood=4, use_gpu=False, encoding='utf-8', dist='cosine', use_shift_embeds=False):
src_inds, src_sents = text_load_unify(src_text_file, encoding, True)
trg_inds, trg_sents = text_load_unify(trg_text_file, encoding, True)
x = unique_embeddings(x, src_inds)
y = unique_embeddings(y, trg_inds)
if dist == 'cosine':
faiss.normalize_L2(x)
faiss.normalize_L2(y)
if use_shift_embeds:
x2y, y2x = shift_embeddings(x, y)
# calculate knn in both directions
if retrieval is not 'bwd':
print(' - perform {:d}-nn source against target, dist={}'.format(neighborhood, dist))
if use_shift_embeds:
# project x to y space, and search k-nn ys for each x
x2y_sim, x2y_ind = knn(x2y, y, min(y.shape[0], neighborhood), use_gpu, dist)
x2y_mean = x2y_sim.mean(axis=1)
else:
x2y_sim, x2y_ind = knn(x, y, min(y.shape[0], neighborhood), use_gpu, dist)
x2y_mean = x2y_sim.mean(axis=1)
if retrieval is not 'fwd':
print(' - perform {:d}-nn target against source, dist={}'.format(neighborhood, dist))
if use_shift_embeds:
y2x_sim, y2x_ind = knn(y2x, x, min(x.shape[0], neighborhood), use_gpu, dist)
y2x_mean = y2x_sim.mean(axis=1)
else:
y2x_sim, y2x_ind = knn(y, x, min(x.shape[0], neighborhood), use_gpu, dist)
y2x_mean = y2x_sim.mean(axis=1)
# margin function
if margin == 'absolute':
margin = lambda a, b: a
elif margin == 'distance':
margin = lambda a, b: a - b
else: # margin == 'ratio':
margin = lambda a, b: a / b
fout = open(output_file, mode='w', encoding=encoding, errors='surrogateescape')
if mode == 'search':
print(' - Searching for closest sentences in target')
print(' - writing alignments to {:s}'.format(output_file))
scores = score_candidates(x, y, x2y_ind, x2y_mean, y2x_mean, margin)
best = x2y_ind[np.arange(x.shape[0]), scores.argmax(axis=1)]
nbex = x.shape[0]
ref = np.linspace(0, nbex-1, nbex).astype(int) # [0, nbex)
err = nbex - np.equal(best.reshape(nbex), ref).astype(int).sum()
print(' - errors: {:d}={:.2f}%'.format(err, 100*err/nbex))
for i in src_inds:
print(trg_sents[best[i]], file=fout)
elif mode == 'score':
for i, j in zip(src_inds, trg_inds):
s = score(x[i], y[j], x2y_mean[i], y2x_mean[j], margin)
print(s, src_sents[i], trg_sents[j], sep='\t', file=fout)
elif mode == 'mine':
print(' - mining for parallel data')
if use_shift_embeds:
fwd_scores = score_candidates(x2y, y, x2y_ind, x2y_mean, y2x_mean, margin)
bwd_scores = score_candidates(y2x, x, y2x_ind, y2x_mean, x2y_mean, margin)
else:
fwd_scores = score_candidates(x, y, x2y_ind, x2y_mean, y2x_mean, margin)
bwd_scores = score_candidates(y, x, y2x_ind, y2x_mean, x2y_mean, margin)
fwd_best = x2y_ind[np.arange(x.shape[0]), fwd_scores.argmax(axis=1)]
bwd_best = y2x_ind[np.arange(y.shape[0]), bwd_scores.argmax(axis=1)]
print(' - writing alignments to {:s}'.format(output_file))
if threshold > 0:
print(' - with threshold of {:f}'.format(threshold))
if retrieval == 'fwd':
for i, j in enumerate(fwd_best):
print(fwd_scores[i].max(), src_sents[i], trg_sents[j], sep='\t', file=fout)
if retrieval == 'bwd':
for j, i in enumerate(bwd_best):
print(bwd_scores[j].max(), src_sents[i], trg_sents[j], sep='\t', file=fout)
if retrieval == 'intersect':
for i, j in enumerate(fwd_best):
if bwd_best[j] == i:
print(fwd_scores[i].max(), src_sents[i], trg_sents[j], sep='\t', file=fout)
if retrieval == 'max':
indices = np.stack((np.concatenate((np.arange(x.shape[0]), bwd_best)),
np.concatenate((fwd_best, np.arange(y.shape[0])))), axis=1)
scores = np.concatenate((fwd_scores.max(axis=1), bwd_scores.max(axis=1)))
seen_src, seen_trg = set(), set()
for i in np.argsort(-scores):
src_ind, trg_ind = indices[i]
if not src_ind in seen_src and not trg_ind in seen_trg:
seen_src.add(src_ind)
seen_trg.add(trg_ind)
if scores[i] > threshold:
print(scores[i], src_sents[src_ind], trg_sents[trg_ind], sep='\t', file=fout)
fout.close()
def bucc_optimize(candidate2score, gold):
items = sorted(candidate2score.items(), key=lambda x: -x[1])
ngold = len(gold)
nextract = ncorrect = 0
threshold = 0
best_f1 = 0
for i in range(len(items)):
nextract += 1
if '\t'.join(items[i][0]) in gold:
ncorrect += 1
if ncorrect > 0:
precision = ncorrect / nextract
recall = ncorrect / ngold
f1 = 2 * precision * recall / (precision + recall)
if f1 > best_f1:
best_f1 = f1
threshold = (items[i][1] + items[i + 1][1]) / 2
return threshold
def bucc_extract(cand2score, th, fname):
if fname:
of = open(fname, 'w', encoding='utf-8')
bitexts = []
for (src, trg), score in cand2score.items():
if score >= th:
bitexts.append(src + '\t' + trg)
if fname:
of.write(src + '\t' + trg + '\n')
if fname:
of.close()
return bitexts
def read_sent2id(text_file, id_file, encoding='utf-8'):
repeated = set()
sent2id = {}
with open(id_file, encoding=encoding, errors='surrogateescape') as f:
ids = [l.strip() for l in f]
with open(text_file, encoding=encoding, errors='surrogateescape') as f:
sentences = [l.strip() for l in f]
for id, sent in zip(ids, sentences):
if sent in sent2id:
repeated.add(sent)
else:
sent2id[sent] = id
for sent in repeated:
del sent2id[sent]
return sent2id
def read_candidate2score(candidates_file, src_text_file, trg_text_file, src_id_file, trg_id_file, encoding='utf-8'):
print(' - reading sentences {}'.format(candidates_file))
src_sent2id = read_sent2id(src_text_file, src_id_file, encoding)
trg_sent2id = read_sent2id(trg_text_file, trg_id_file, encoding)
print(' - reading candidates {}'.format(candidates_file))
candidate2score = {}
with open(candidates_file, encoding=encoding, errors='surrogateescape') as f:
for line in f:
score, src, trg = line.split('\t')
score = float(score)
src = src.strip()
trg = trg.strip()
if src in src_sent2id and trg in trg_sent2id:
src_id = src_sent2id[src]
trg_id = trg_sent2id[trg]
score = max(score, candidate2score.get((src_id, trg_id), score))
candidate2score[(src_id, trg_id)] = score
return candidate2score
def bucc_eval(candidates_file, gold_file, src_file, trg_file, src_id_file, trg_id_file, predict_file, threshold=None, encoding='utf-8'):
candidate2score = read_candidate2score(candidates_file, src_file, trg_file, src_id_file, trg_id_file, encoding)
if threshold is not None and gold_file is None:
print(' - using threshold {}'.format(threshold))
else:
print(' - optimizing threshold on gold alignments {}'.format(gold_file))
gold = {line.strip() for line in open(gold_file)}
threshold = bucc_optimize(candidate2score, gold)
bitexts = bucc_extract(candidate2score, threshold, predict_file)
if gold_file is not None:
ncorrect = len(gold.intersection(bitexts))
if ncorrect > 0:
precision = ncorrect / len(bitexts)
recall = ncorrect / len(gold)
f1 = 2*precision*recall / (precision + recall)
else:
precision = recall = f1 = 0
print(' - best threshold={:f}: precision={:.2f}, recall={:.2f}, F1={:.2f}'
.format(threshold, 100*precision, 100*recall, 100*f1))
return {'best-threshold': threshold, 'precision': 100*precision, 'recall': 100*recall, 'F1': 100*f1}
else:
return None
def similarity_search(x, y, dim, normalize=False):
num = x.shape[0]
idx = faiss.IndexFlatL2(dim)
if normalize:
faiss.normalize_L2(x)
faiss.normalize_L2(y)
idx.add(x)
scores, prediction = idx.search(y, 1)
return prediction | EXA-1-master | exa/models/unilm-master/xtune/src/pequod/eval/utils_retrieve.py |
import logging
import torch
from torch.utils.data import DataLoader
from src.pequod.training.trainer import to_cuda
logger = logging.getLogger(__name__)
class Evaluator(object):
def __init__(self, args, model, tokenizer, **kwargs):
self.args = args
self.datasets = {}
self.model = model
self.tokenizer = tokenizer
def _parse_batch(self, batch, has_label=True, **kwargs):
_batch = to_cuda(batch)
# _batch = batch
ret = {"input_ids": _batch[0],
"attention_mask": _batch[1],
"token_type_ids": _batch[2] if self.args.model_type == "bert" else None,}
if has_label: ret["labels"] = _batch[3]
ret.update(**kwargs)
return ret
def run(self):
raise NotImplementedError
def get_dataset(self, *args, **kwargs):
if args in self.datasets: return self.datasets[args]
dataset = self.load_and_cache_examples(*args, **kwargs)
self.datasets[args] = dataset
return dataset
def load_and_cache_examples(self, *args, **kwargs):
raise NotImplementedError
def get_dataloader(self, *args, **kwargs):
logger.info("Getting dataloader - args: %s" % str(args))
dataset = kwargs.pop("dataset", self.get_dataset(*args, **kwargs))
dataloader = DataLoader(dataset, batch_size=self.args.eval_batch_size)
return dataloader
| EXA-1-master | exa/models/unilm-master/xtune/src/pequod/eval/evaluator.py |
EXA-1-master | exa/models/unilm-master/xtune/src/pequod/text/__init__.py |
|
import os
import logging
import sentencepiece as spm
from transformers.tokenization_utils import PreTrainedTokenizer
logger = logging.getLogger(__name__)
class XLMRTokenizer(PreTrainedTokenizer):
def __init__(self, bpe_file, dict_file, **kwargs):
super(XLMRTokenizer, self).__init__(
bos_token="<s>",
eos_token="</s>",
unk_token="<unk>",
pad_token="<pad>",
mask_token="<mask>",
sep_token="</s>",
cls_token="<s>",
**kwargs)
self.max_len_single_sentence = self.max_len - 2
self.max_len_sentences_pair = self.max_len - 4
self.sp = spm.SentencePieceProcessor()
self.sp.Load(bpe_file)
self.encoder = {}
self.decoder = []
for token in [self.bos_token, self.pad_token, self.eos_token, self.unk_token]:
self._add_token(token)
with open(dict_file, encoding="utf-8") as fp:
for line in fp:
# NOTE DO NOT USE .split()
tokens_cnt = line.rstrip().split(" ")
try:
assert len(tokens_cnt) >= 2, line
except AssertionError:
logger.error(
"tokenizer line %s asserterror, replaced as <unk-%d>" % (
line, len(self.decoder)))
exit(0)
self._add_token(" ".join(tokens_cnt[:-1]))
def _add_token(self, token):
idx = len(self.encoder)
self.encoder[token] = idx
self.decoder.append(token)
def _tokenize(self, text):
return self.sp.EncodeAsPieces(text)
def _convert_id_to_token(self, index):
return self.decoder[index]
def _convert_token_to_id(self, token):
return self.encoder.get(token, self.encoder.get(self.unk_token))
def convert_tokens_to_string(self, tokens):
return "".join(tokens).replace('\u2581', ' ').strip()
@classmethod
def from_pretrained(cls, model_path, **kwargs):
bpe_file = os.path.join(model_path, "sentencepiece.bpe.model")
dict_file = os.path.join(model_path, "dict.txt")
tokenizer = cls(bpe_file, dict_file)
return tokenizer
def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
if token_ids_1 is None:
return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
cls = [self.cls_token_id]
sep = [self.sep_token_id]
return cls + token_ids_0 + sep + sep + token_ids_1 + sep
def get_special_tokens_mask(self, token_ids_0, token_ids_1=None, already_has_special_tokens=False):
if already_has_special_tokens:
if token_ids_1 is not None:
raise ValueError("You should not supply a second sequence if the provided sequence of ids is already formated with special tokens for the model.")
return list(map(lambda x: 1 if x in [self.sep_token_id, self.cls_token_id] else 0, token_ids_0))
if token_ids_1 is None:
return [1] + ([0] * len(token_ids_0)) + [1]
return [1] + ([0] * len(token_ids_0)) + [1, 1] + ([0] * len(token_ids_1)) + [1]
def create_token_type_ids_from_sequences(self, token_ids_0, token_ids_1=None):
sep = [self.sep_token_id]
cls = [self.cls_token_id]
if token_ids_1 is None:
return len(cls + token_ids_0 + sep) * [0]
return len(cls + token_ids_0 + sep) * [0] + len(sep + token_ids_1 + sep) * [1]
if __name__ == "__main__":
tokenizer = XLMRTokenizer.from_pretrained("/home/v-zechi/data/unilm/zechi/exp/bert_data/xlmr-large")
for text in ["Hello world!", "你好,世界", "नमस्ते दुनिया", "مرحبا بالعالم", "Bonjour le monde"]:
print(tokenizer.tokenize(text))
print(tokenizer.encode_plus(text, text, add_special_tokens=True))
| EXA-1-master | exa/models/unilm-master/xtune/src/pequod/text/tokenization_sentencepiece.py |
"""Loading examples and features for CLS and MLDoc"""
import logging
import os
import torch
from transformers.data.processors.utils import (DataProcessor,
InputExample, InputFeatures)
from torch.utils.data import (DataLoader, RandomSampler, SequentialSampler,
TensorDataset)
logger = logging.getLogger(__name__)
def get_processor_class(dataset_name):
if dataset_name == "MLDoc": return MLDocProcessor
elif dataset_name == "CLS": return CLSProcessor
elif dataset_name == "XNLI": return XNLIProcesser
elif dataset_name == "TriXNLI": return TriXNLIProcesser
else: raise ValueError
def xdoc_convert_examples_to_features(
processor, examples, tokenizer, max_length, label_list,
pad_token=0, pad_token_segment_id=0, mask_padding_with_zero=True):
if label_list is None: label_list = processor.get_labels()
label_map = {label: i for i, label in enumerate(label_list)}
features = []
for ex_index, example in enumerate(examples):
if ex_index % 10000 == 0:
logger.info("Writing example %d" % ex_index)
inputs = tokenizer.encode_plus(
example.text_a,
example.text_b,
add_special_tokens=True,
max_length=max_length)
input_ids, token_type_ids = inputs["input_ids"], inputs["token_type_ids"]
attention_mask = [1 if mask_padding_with_zero else 0] * len(input_ids)
padding_length = max_length - len(input_ids)
input_ids = input_ids + ([pad_token] * padding_length)
attention_mask = attention_mask + ([0 if mask_padding_with_zero else 1] * padding_length)
token_type_ids = token_type_ids + ([pad_token_segment_id] * padding_length)
assert len(input_ids) == max_length, "Error with input length {} vs {}".format(len(input_ids), max_length)
assert len(attention_mask) == max_length, "Error with input length {} vs {}".format(len(attention_mask), max_length)
assert len(token_type_ids) == max_length, "Error with input length {} vs {}".format(len(token_type_ids), max_length)
label = label_map[example.label]
if ex_index < 3:
logger.info("*** Example ***")
logger.info("guid: %s" % (example.guid))
logger.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
logger.info("attention_mask: %s" % " ".join([str(x) for x in attention_mask]))
logger.info("token_type_ids: %s" % " ".join([str(x) for x in token_type_ids]))
logger.info("label: %s (id = %d)" % (example.label, label))
features.append(InputFeatures(
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
label=label))
return features
def load_and_cache_examples(args, processor, split, lang, tokenizer, key=""):
cache_filename = os.path.join(
args.data_dir, "cached_%s_%s_%s" % (split, lang, key))
if os.path.exists(cache_filename) and not args.overwrite_cache:
logger.info("Loading features from cached file %s" % cache_filename)
features = torch.load(cache_filename)
else:
logger.info("Creating features from dataset file at %s" % args.data_dir)
label_list = processor.get_labels()
examples = processor.get_examples(args.data_dir, split, lang)
logger.info("%d Examples loaded" % len(examples))
features = xdoc_convert_examples_to_features(
processor, examples, tokenizer, max_length=args.max_seq_length,
label_list=label_list, pad_token_segment_id=0,
pad_token=tokenizer.convert_tokens_to_ids([tokenizer.pad_token])[0])
logger.info("Saving features to cache file %s" % cache_filename)
torch.save(features, cache_filename)
all_input_ids = torch.tensor(
[f.input_ids for f in features], dtype=torch.long)
all_attention_mask = torch.tensor(
[f.attention_mask for f in features], dtype=torch.long)
all_token_type_ids = torch.tensor(
[f.token_type_ids for f in features], dtype=torch.long)
all_labels = torch.tensor([f.label for f in features], dtype=torch.long)
dataset = TensorDataset(
all_input_ids, all_attention_mask, all_token_type_ids, all_labels)
return dataset
class XDocProcessor(DataProcessor):
"""Processor for the MLDoc dataset."""
def get_example_from_tensor_dict(self, tensor_dict):
return InputExample(
tensor_dict["idx"].numpy(),
tensor_dict["sentence"].numpy().decode("utf-8"),
str(tensor_dict["label"].numpy()))
def get_examples(self, data_dir, split, lang):
filename = "%s-%s.tsv" % (split, lang)
logger.info("LOOKING AT %s" % os.path.join(data_dir, filename))
return self._create_examples(
self._read_tsv(os.path.join(data_dir, filename)), filename)
def _create_examples(self, lines, set_type):
examples = []
for i, line in enumerate(lines):
guid = "%s-%s" % (set_type, i)
try:
label, text_a = line[0], line[1]
except IndexError:
logger.warn("IndexError while decomposing line %s" % str(line))
logger.warn("Line ignored... Loop continued...")
continue
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=None, label=label))
return examples
class MLDocProcessor(XDocProcessor):
def get_labels(self): return ["ECAT", "CCAT", "GCAT", "MCAT"]
class CLSProcessor(XDocProcessor):
def get_labels(self): return ["0", "1"]
class XNLIProcesser(XDocProcessor):
"""data format: a pair: (label, text)"""
def get_labels(self): return ["neutral", "entailment", "contradiction"]
class TriXNLIProcesser(XNLIProcesser):
"""data format: a 3-tuple: (label, text-a, text-b)"""
def _create_examples(self, lines, set_type):
examples = []
for i, line in enumerate(lines):
guid = "%s-%s" % (set_type, i)
label, text_a, text_b = line[0], line[1], line[2]
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples | EXA-1-master | exa/models/unilm-master/xtune/src/pequod/data/xdoc.py |
import os
import logging
import torch
from torch.utils.data import TensorDataset
from src.pequod.data.utils_squad import (read_squad_examples,
convert_examples_to_features)
logger = logging.getLogger(__name__)
def load_and_cache_examples(args, split, lang, tokenizer, key="", evaluate=False):
cache_filename = os.path.join(
args.data_dir, "cached_%s_%s_%s" % (split, lang, key))
input_file = os.path.join(args.data_dir, "%s-%s.json" % (split, lang))
if os.path.exists(cache_filename):
logger.info("Loading features from cached file %s", cache_filename)
features = torch.load(cache_filename)
if evaluate:
examples = read_squad_examples(input_file=input_file,
is_training=not evaluate,
version_2_with_negative=args.version_2_with_negative)
else: examples = None
else:
logger.info("Creating features from dataset file at %s", input_file)
examples = read_squad_examples(input_file=input_file,
is_training=not evaluate,
version_2_with_negative=args.version_2_with_negative)
features = convert_examples_to_features(examples=examples,
tokenizer=tokenizer, max_seq_length=args.max_seq_length,
doc_stride=args.doc_stride, max_query_length=args.max_query_length,
is_training=not evaluate, cls_token=tokenizer.cls_token,
sep_token=tokenizer.sep_token)
logger.info("Saving features into cached file %s", cache_filename)
torch.save(features, cache_filename)
# Convert to Tensors and build dataset
all_input_ids = torch.tensor(
[f.input_ids for f in features], dtype=torch.long)
all_input_mask = torch.tensor(
[f.input_mask for f in features], dtype=torch.long)
all_segment_ids = torch.tensor(
[f.segment_ids for f in features], dtype=torch.long)
all_cls_index = torch.tensor(
[f.cls_index for f in features], dtype=torch.long)
all_p_mask = torch.tensor(
[f.p_mask for f in features], dtype=torch.float)
if evaluate:
all_example_index = torch.arange(all_input_ids.size(0), dtype=torch.long)
dataset = TensorDataset(all_input_ids, all_input_mask, all_segment_ids,
all_example_index, all_cls_index, all_p_mask)
else:
all_start_positions = torch.tensor(
[f.start_position for f in features], dtype=torch.long)
all_end_positions = torch.tensor(
[f.end_position for f in features], dtype=torch.long)
dataset = TensorDataset(all_input_ids, all_input_mask, all_segment_ids,
all_start_positions, all_end_positions, all_cls_index, all_p_mask)
return dataset, examples, features
| EXA-1-master | exa/models/unilm-master/xtune/src/pequod/data/xqa.py |
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Load SQuAD dataset. """
from __future__ import absolute_import, division, print_function
import json
import logging
import math
import collections
from io import open
from transformers.tokenization_bert import BasicTokenizer, whitespace_tokenize
# Required by XLNet evaluation method to compute optimal threshold (see write_predictions_extended() method)
from src.pequod.data.utils_squad_evaluate import find_all_best_thresh_v2, make_qid_to_has_ans, get_raw_scores
logger = logging.getLogger(__name__)
class SquadExample(object):
"""
A single training/test example for the Squad dataset.
For examples without an answer, the start and end position are -1.
"""
def __init__(self,
qas_id,
question_text,
doc_tokens,
orig_answer_text=None,
start_position=None,
end_position=None,
is_impossible=None):
self.qas_id = qas_id
self.question_text = question_text
self.doc_tokens = doc_tokens
self.orig_answer_text = orig_answer_text
self.start_position = start_position
self.end_position = end_position
self.is_impossible = is_impossible
def __str__(self):
return self.__repr__()
def __repr__(self):
s = ""
s += "qas_id: %s" % (self.qas_id)
s += ", question_text: %s" % (
self.question_text)
s += ", doc_tokens: [%s]" % (" ".join(self.doc_tokens))
if self.start_position:
s += ", start_position: %d" % (self.start_position)
if self.end_position:
s += ", end_position: %d" % (self.end_position)
if self.is_impossible:
s += ", is_impossible: %r" % (self.is_impossible)
return s
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self,
unique_id,
example_index,
doc_span_index,
tokens,
token_to_orig_map,
token_is_max_context,
input_ids,
input_mask,
segment_ids,
cls_index,
p_mask,
paragraph_len,
start_position=None,
end_position=None,
is_impossible=None):
self.unique_id = unique_id
self.example_index = example_index
self.doc_span_index = doc_span_index
self.tokens = tokens
self.token_to_orig_map = token_to_orig_map
self.token_is_max_context = token_is_max_context
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.cls_index = cls_index
self.p_mask = p_mask
self.paragraph_len = paragraph_len
self.start_position = start_position
self.end_position = end_position
self.is_impossible = is_impossible
def read_squad_examples(input_file, is_training, version_2_with_negative):
"""Read a SQuAD json file into a list of SquadExample."""
with open(input_file, "r", encoding='utf-8') as reader:
input_data = json.load(reader)["data"]
def is_whitespace(c):
if c == " " or c == "\t" or c == "\r" or c == "\n" or ord(c) == 0x202F:
return True
return False
examples = []
for entry in input_data:
for paragraph in entry["paragraphs"]:
paragraph_text = paragraph["context"]
doc_tokens = []
char_to_word_offset = []
prev_is_whitespace = True
for c in paragraph_text:
if is_whitespace(c):
prev_is_whitespace = True
else:
if prev_is_whitespace:
doc_tokens.append(c)
else:
doc_tokens[-1] += c
prev_is_whitespace = False
char_to_word_offset.append(len(doc_tokens) - 1)
for qa in paragraph["qas"]:
qas_id = qa["id"]
question_text = qa["question"]
start_position = None
end_position = None
orig_answer_text = None
is_impossible = False
if is_training:
if version_2_with_negative:
is_impossible = qa["is_impossible"]
if (len(qa["answers"]) != 1) and (not is_impossible):
raise ValueError(
"For training, each question should have exactly 1 answer.")
if not is_impossible:
answer = qa["answers"][0]
orig_answer_text = answer["text"]
answer_offset = answer["answer_start"]
answer_length = len(orig_answer_text)
start_position = char_to_word_offset[answer_offset]
end_position = char_to_word_offset[answer_offset + answer_length - 1]
# Only add answers where the text can be exactly recovered from the
# document. If this CAN'T happen it's likely due to weird Unicode
# stuff so we will just skip the example.
#
# Note that this means for training mode, every example is NOT
# guaranteed to be preserved.
actual_text = " ".join(doc_tokens[start_position:(end_position + 1)])
cleaned_answer_text = " ".join(
whitespace_tokenize(orig_answer_text))
if actual_text.find(cleaned_answer_text) == -1:
logger.warning("Could not find answer: '%s' vs. '%s'",
actual_text, cleaned_answer_text)
continue
else:
start_position = -1
end_position = -1
orig_answer_text = ""
example = SquadExample(
qas_id=qas_id,
question_text=question_text,
doc_tokens=doc_tokens,
orig_answer_text=orig_answer_text,
start_position=start_position,
end_position=end_position,
is_impossible=is_impossible)
examples.append(example)
return examples
def convert_examples_to_features(examples, tokenizer, max_seq_length,
doc_stride, max_query_length, is_training,
cls_token_at_end=False,
cls_token='[CLS]', sep_token='[SEP]', pad_token=0,
sequence_a_segment_id=0, sequence_b_segment_id=1,
cls_token_segment_id=0, pad_token_segment_id=0,
mask_padding_with_zero=True):
"""Loads a data file into a list of `InputBatch`s."""
unique_id = 1000000000
# cnt_pos, cnt_neg = 0, 0
# max_N, max_M = 1024, 1024
# f = np.zeros((max_N, max_M), dtype=np.float32)
features = []
for (example_index, example) in enumerate(examples):
# if example_index % 100 == 0:
# logger.info('Converting %s/%s pos %s neg %s', example_index, len(examples), cnt_pos, cnt_neg)
query_tokens = tokenizer.tokenize(example.question_text)
if len(query_tokens) > max_query_length:
query_tokens = query_tokens[0:max_query_length]
tok_to_orig_index = []
orig_to_tok_index = []
all_doc_tokens = []
for (i, token) in enumerate(example.doc_tokens):
orig_to_tok_index.append(len(all_doc_tokens))
sub_tokens = tokenizer.tokenize(token)
for sub_token in sub_tokens:
tok_to_orig_index.append(i)
all_doc_tokens.append(sub_token)
tok_start_position = None
tok_end_position = None
if is_training and example.is_impossible:
tok_start_position = -1
tok_end_position = -1
if is_training and not example.is_impossible:
tok_start_position = orig_to_tok_index[example.start_position]
if example.end_position < len(example.doc_tokens) - 1:
tok_end_position = orig_to_tok_index[example.end_position + 1] - 1
else:
tok_end_position = len(all_doc_tokens) - 1
(tok_start_position, tok_end_position) = _improve_answer_span(
all_doc_tokens, tok_start_position, tok_end_position, tokenizer,
example.orig_answer_text)
# The -3 accounts for [CLS], [SEP] and [SEP]
max_tokens_for_doc = max_seq_length - len(query_tokens) - 3
# We can have documents that are longer than the maximum sequence length.
# To deal with this we do a sliding window approach, where we take chunks
# of the up to our max length with a stride of `doc_stride`.
_DocSpan = collections.namedtuple( # pylint: disable=invalid-name
"DocSpan", ["start", "length"])
doc_spans = []
start_offset = 0
while start_offset < len(all_doc_tokens):
length = len(all_doc_tokens) - start_offset
if length > max_tokens_for_doc:
length = max_tokens_for_doc
doc_spans.append(_DocSpan(start=start_offset, length=length))
if start_offset + length == len(all_doc_tokens):
break
start_offset += min(length, doc_stride)
for (doc_span_index, doc_span) in enumerate(doc_spans):
tokens = []
token_to_orig_map = {}
token_is_max_context = {}
segment_ids = []
# p_mask: mask with 1 for token than cannot be in the answer (0 for token which can be in an answer)
# Original TF implem also keep the classification token (set to 0) (not sure why...)
p_mask = []
# CLS token at the beginning
if not cls_token_at_end:
tokens.append(cls_token)
segment_ids.append(cls_token_segment_id)
p_mask.append(0)
cls_index = 0
# Query
for token in query_tokens:
tokens.append(token)
segment_ids.append(sequence_a_segment_id)
p_mask.append(1)
# SEP token
tokens.append(sep_token)
segment_ids.append(sequence_a_segment_id)
p_mask.append(1)
# Paragraph
for i in range(doc_span.length):
split_token_index = doc_span.start + i
token_to_orig_map[len(tokens)] = tok_to_orig_index[split_token_index]
is_max_context = _check_is_max_context(doc_spans, doc_span_index,
split_token_index)
token_is_max_context[len(tokens)] = is_max_context
tokens.append(all_doc_tokens[split_token_index])
segment_ids.append(sequence_b_segment_id)
p_mask.append(0)
paragraph_len = doc_span.length
# SEP token
tokens.append(sep_token)
segment_ids.append(sequence_b_segment_id)
p_mask.append(1)
# CLS token at the end
if cls_token_at_end:
tokens.append(cls_token)
segment_ids.append(cls_token_segment_id)
p_mask.append(0)
cls_index = len(tokens) - 1 # Index of classification token
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1 if mask_padding_with_zero else 0] * len(input_ids)
# Zero-pad up to the sequence length.
while len(input_ids) < max_seq_length:
input_ids.append(pad_token)
input_mask.append(0 if mask_padding_with_zero else 1)
segment_ids.append(pad_token_segment_id)
p_mask.append(1)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
span_is_impossible = example.is_impossible
start_position = None
end_position = None
if is_training and not span_is_impossible:
# For training, if our document chunk does not contain an annotation
# we throw it out, since there is nothing to predict.
doc_start = doc_span.start
doc_end = doc_span.start + doc_span.length - 1
out_of_span = False
if not (tok_start_position >= doc_start and
tok_end_position <= doc_end):
out_of_span = True
if out_of_span:
start_position = 0
end_position = 0
span_is_impossible = True
else:
doc_offset = len(query_tokens) + 2
start_position = tok_start_position - doc_start + doc_offset
end_position = tok_end_position - doc_start + doc_offset
if is_training and span_is_impossible:
start_position = cls_index
end_position = cls_index
if example_index < 2:
logger.info("*** Example ***")
logger.info("unique_id: %s" % (unique_id))
logger.info("example_index: %s" % (example_index))
logger.info("doc_span_index: %s" % (doc_span_index))
logger.info("tokens: %s" % " ".join(tokens))
logger.info("token_to_orig_map: %s" % " ".join([
"%d:%d" % (x, y) for (x, y) in token_to_orig_map.items()]))
logger.info("token_is_max_context: %s" % " ".join([
"%d:%s" % (x, y) for (x, y) in token_is_max_context.items()
]))
logger.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
logger.info(
"input_mask: %s" % " ".join([str(x) for x in input_mask]))
logger.info(
"segment_ids: %s" % " ".join([str(x) for x in segment_ids]))
if is_training and span_is_impossible:
logger.info("impossible example")
if is_training and not span_is_impossible:
answer_text = " ".join(tokens[start_position:(end_position + 1)])
logger.info("start_position: %d" % (start_position))
logger.info("end_position: %d" % (end_position))
logger.info(
"answer: %s" % (answer_text))
features.append(
InputFeatures(
unique_id=unique_id,
example_index=example_index,
doc_span_index=doc_span_index,
tokens=tokens,
token_to_orig_map=token_to_orig_map,
token_is_max_context=token_is_max_context,
input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
cls_index=cls_index,
p_mask=p_mask,
paragraph_len=paragraph_len,
start_position=start_position,
end_position=end_position,
is_impossible=span_is_impossible))
unique_id += 1
return features
def _improve_answer_span(doc_tokens, input_start, input_end, tokenizer,
orig_answer_text):
"""Returns tokenized answer spans that better match the annotated answer."""
# The SQuAD annotations are character based. We first project them to
# whitespace-tokenized words. But then after WordPiece tokenization, we can
# often find a "better match". For example:
#
# Question: What year was John Smith born?
# Context: The leader was John Smith (1895-1943).
# Answer: 1895
#
# The original whitespace-tokenized answer will be "(1895-1943).". However
# after tokenization, our tokens will be "( 1895 - 1943 ) .". So we can match
# the exact answer, 1895.
#
# However, this is not always possible. Consider the following:
#
# Question: What country is the top exporter of electornics?
# Context: The Japanese electronics industry is the lagest in the world.
# Answer: Japan
#
# In this case, the annotator chose "Japan" as a character sub-span of
# the word "Japanese". Since our WordPiece tokenizer does not split
# "Japanese", we just use "Japanese" as the annotation. This is fairly rare
# in SQuAD, but does happen.
tok_answer_text = " ".join(tokenizer.tokenize(orig_answer_text))
for new_start in range(input_start, input_end + 1):
for new_end in range(input_end, new_start - 1, -1):
text_span = " ".join(doc_tokens[new_start:(new_end + 1)])
if text_span == tok_answer_text:
return (new_start, new_end)
return (input_start, input_end)
def _check_is_max_context(doc_spans, cur_span_index, position):
"""Check if this is the 'max context' doc span for the token."""
# Because of the sliding window approach taken to scoring documents, a single
# token can appear in multiple documents. E.g.
# Doc: the man went to the store and bought a gallon of milk
# Span A: the man went to the
# Span B: to the store and bought
# Span C: and bought a gallon of
# ...
#
# Now the word 'bought' will have two scores from spans B and C. We only
# want to consider the score with "maximum context", which we define as
# the *minimum* of its left and right context (the *sum* of left and
# right context will always be the same, of course).
#
# In the example the maximum context for 'bought' would be span C since
# it has 1 left context and 3 right context, while span B has 4 left context
# and 0 right context.
best_score = None
best_span_index = None
for (span_index, doc_span) in enumerate(doc_spans):
end = doc_span.start + doc_span.length - 1
if position < doc_span.start:
continue
if position > end:
continue
num_left_context = position - doc_span.start
num_right_context = end - position
score = min(num_left_context, num_right_context) + 0.01 * doc_span.length
if best_score is None or score > best_score:
best_score = score
best_span_index = span_index
return cur_span_index == best_span_index
RawResult = collections.namedtuple("RawResult",
["unique_id", "start_logits", "end_logits"])
def write_predictions(all_examples, all_features, all_results, n_best_size,
max_answer_length, do_lower_case, output_prediction_file,
output_nbest_file, output_null_log_odds_file, verbose_logging,
version_2_with_negative, null_score_diff_threshold):
"""Write final predictions to the json file and log-odds of null if needed."""
logger.info("Writing predictions to: %s" % (output_prediction_file))
logger.info("Writing nbest to: %s" % (output_nbest_file))
example_index_to_features = collections.defaultdict(list)
for feature in all_features:
example_index_to_features[feature.example_index].append(feature)
unique_id_to_result = {}
for result in all_results:
unique_id_to_result[result.unique_id] = result
_PrelimPrediction = collections.namedtuple( # pylint: disable=invalid-name
"PrelimPrediction",
["feature_index", "start_index", "end_index", "start_logit", "end_logit"])
all_predictions = collections.OrderedDict()
all_nbest_json = collections.OrderedDict()
scores_diff_json = collections.OrderedDict()
for (example_index, example) in enumerate(all_examples):
features = example_index_to_features[example_index]
prelim_predictions = []
# keep track of the minimum score of null start+end of position 0
score_null = 1000000 # large and positive
min_null_feature_index = 0 # the paragraph slice with min null score
null_start_logit = 0 # the start logit at the slice with min null score
null_end_logit = 0 # the end logit at the slice with min null score
for (feature_index, feature) in enumerate(features):
result = unique_id_to_result[feature.unique_id]
start_indexes = _get_best_indexes(result.start_logits, n_best_size)
end_indexes = _get_best_indexes(result.end_logits, n_best_size)
# if we could have irrelevant answers, get the min score of irrelevant
if version_2_with_negative:
feature_null_score = result.start_logits[0] + result.end_logits[0]
if feature_null_score < score_null:
score_null = feature_null_score
min_null_feature_index = feature_index
null_start_logit = result.start_logits[0]
null_end_logit = result.end_logits[0]
for start_index in start_indexes:
for end_index in end_indexes:
# We could hypothetically create invalid predictions, e.g., predict
# that the start of the span is in the question. We throw out all
# invalid predictions.
if start_index >= len(feature.tokens):
continue
if end_index >= len(feature.tokens):
continue
if start_index not in feature.token_to_orig_map:
continue
if end_index not in feature.token_to_orig_map:
continue
if not feature.token_is_max_context.get(start_index, False):
continue
if end_index < start_index:
continue
length = end_index - start_index + 1
if length > max_answer_length:
continue
prelim_predictions.append(
_PrelimPrediction(
feature_index=feature_index,
start_index=start_index,
end_index=end_index,
start_logit=result.start_logits[start_index],
end_logit=result.end_logits[end_index]))
if version_2_with_negative:
prelim_predictions.append(
_PrelimPrediction(
feature_index=min_null_feature_index,
start_index=0,
end_index=0,
start_logit=null_start_logit,
end_logit=null_end_logit))
prelim_predictions = sorted(
prelim_predictions,
key=lambda x: (x.start_logit + x.end_logit),
reverse=True)
_NbestPrediction = collections.namedtuple( # pylint: disable=invalid-name
"NbestPrediction", ["text", "start_logit", "end_logit"])
seen_predictions = {}
nbest = []
for pred in prelim_predictions:
if len(nbest) >= n_best_size:
break
feature = features[pred.feature_index]
if pred.start_index > 0: # this is a non-null prediction
tok_tokens = feature.tokens[pred.start_index:(pred.end_index + 1)]
orig_doc_start = feature.token_to_orig_map[pred.start_index]
orig_doc_end = feature.token_to_orig_map[pred.end_index]
orig_tokens = example.doc_tokens[orig_doc_start:(orig_doc_end + 1)]
tok_text = " ".join(tok_tokens)
# De-tokenize WordPieces that have been split off.
tok_text = tok_text.replace(" ##", "")
tok_text = tok_text.replace("##", "")
# Clean whitespace
tok_text = tok_text.strip()
tok_text = " ".join(tok_text.split())
orig_text = " ".join(orig_tokens)
final_text = get_final_text(tok_text, orig_text, do_lower_case, verbose_logging)
if final_text in seen_predictions:
continue
seen_predictions[final_text] = True
else:
final_text = ""
seen_predictions[final_text] = True
nbest.append(
_NbestPrediction(
text=final_text,
start_logit=pred.start_logit,
end_logit=pred.end_logit))
# if we didn't include the empty option in the n-best, include it
if version_2_with_negative:
if "" not in seen_predictions:
nbest.append(
_NbestPrediction(
text="",
start_logit=null_start_logit,
end_logit=null_end_logit))
# In very rare edge cases we could only have single null prediction.
# So we just create a nonce prediction in this case to avoid failure.
if len(nbest)==1:
nbest.insert(0,
_NbestPrediction(text="empty", start_logit=0.0, end_logit=0.0))
# In very rare edge cases we could have no valid predictions. So we
# just create a nonce prediction in this case to avoid failure.
if not nbest:
nbest.append(
_NbestPrediction(text="empty", start_logit=0.0, end_logit=0.0))
assert len(nbest) >= 1
total_scores = []
best_non_null_entry = None
for entry in nbest:
total_scores.append(entry.start_logit + entry.end_logit)
if not best_non_null_entry:
if entry.text:
best_non_null_entry = entry
probs = _compute_softmax(total_scores)
nbest_json = []
for (i, entry) in enumerate(nbest):
output = collections.OrderedDict()
output["text"] = entry.text
output["probability"] = probs[i]
output["start_logit"] = entry.start_logit
output["end_logit"] = entry.end_logit
nbest_json.append(output)
assert len(nbest_json) >= 1
if not version_2_with_negative:
all_predictions[example.qas_id] = nbest_json[0]["text"]
else:
# predict "" iff the null score - the score of best non-null > threshold
score_diff = score_null - best_non_null_entry.start_logit - (
best_non_null_entry.end_logit)
scores_diff_json[example.qas_id] = score_diff
if score_diff > null_score_diff_threshold:
all_predictions[example.qas_id] = ""
else:
all_predictions[example.qas_id] = best_non_null_entry.text
all_nbest_json[example.qas_id] = nbest_json
with open(output_prediction_file, "w") as writer:
writer.write(json.dumps(all_predictions, indent=4) + "\n")
with open(output_nbest_file, "w") as writer:
writer.write(json.dumps(all_nbest_json, indent=4) + "\n")
if version_2_with_negative:
with open(output_null_log_odds_file, "w") as writer:
writer.write(json.dumps(scores_diff_json, indent=4) + "\n")
return all_predictions
# For XLNet (and XLM which uses the same head)
RawResultExtended = collections.namedtuple("RawResultExtended",
["unique_id", "start_top_log_probs", "start_top_index",
"end_top_log_probs", "end_top_index", "cls_logits"])
def write_predictions_extended(all_examples, all_features, all_results, n_best_size,
max_answer_length, output_prediction_file,
output_nbest_file,
output_null_log_odds_file, orig_data_file,
start_n_top, end_n_top, version_2_with_negative,
tokenizer, verbose_logging):
""" XLNet write prediction logic (more complex than Bert's).
Write final predictions to the json file and log-odds of null if needed.
Requires utils_squad_evaluate.py
"""
_PrelimPrediction = collections.namedtuple( # pylint: disable=invalid-name
"PrelimPrediction",
["feature_index", "start_index", "end_index",
"start_log_prob", "end_log_prob"])
_NbestPrediction = collections.namedtuple( # pylint: disable=invalid-name
"NbestPrediction", ["text", "start_log_prob", "end_log_prob"])
logger.info("Writing predictions to: %s", output_prediction_file)
# logger.info("Writing nbest to: %s" % (output_nbest_file))
example_index_to_features = collections.defaultdict(list)
for feature in all_features:
example_index_to_features[feature.example_index].append(feature)
unique_id_to_result = {}
for result in all_results:
unique_id_to_result[result.unique_id] = result
all_predictions = collections.OrderedDict()
all_nbest_json = collections.OrderedDict()
scores_diff_json = collections.OrderedDict()
for (example_index, example) in enumerate(all_examples):
features = example_index_to_features[example_index]
prelim_predictions = []
# keep track of the minimum score of null start+end of position 0
score_null = 1000000 # large and positive
for (feature_index, feature) in enumerate(features):
result = unique_id_to_result[feature.unique_id]
cur_null_score = result.cls_logits
# if we could have irrelevant answers, get the min score of irrelevant
score_null = min(score_null, cur_null_score)
for i in range(start_n_top):
for j in range(end_n_top):
start_log_prob = result.start_top_log_probs[i]
start_index = result.start_top_index[i]
j_index = i * end_n_top + j
end_log_prob = result.end_top_log_probs[j_index]
end_index = result.end_top_index[j_index]
# We could hypothetically create invalid predictions, e.g., predict
# that the start of the span is in the question. We throw out all
# invalid predictions.
if start_index >= feature.paragraph_len - 1:
continue
if end_index >= feature.paragraph_len - 1:
continue
if not feature.token_is_max_context.get(start_index, False):
continue
if end_index < start_index:
continue
length = end_index - start_index + 1
if length > max_answer_length:
continue
prelim_predictions.append(
_PrelimPrediction(
feature_index=feature_index,
start_index=start_index,
end_index=end_index,
start_log_prob=start_log_prob,
end_log_prob=end_log_prob))
prelim_predictions = sorted(
prelim_predictions,
key=lambda x: (x.start_log_prob + x.end_log_prob),
reverse=True)
seen_predictions = {}
nbest = []
for pred in prelim_predictions:
if len(nbest) >= n_best_size:
break
feature = features[pred.feature_index]
# XLNet un-tokenizer
# Let's keep it simple for now and see if we need all this later.
#
# tok_start_to_orig_index = feature.tok_start_to_orig_index
# tok_end_to_orig_index = feature.tok_end_to_orig_index
# start_orig_pos = tok_start_to_orig_index[pred.start_index]
# end_orig_pos = tok_end_to_orig_index[pred.end_index]
# paragraph_text = example.paragraph_text
# final_text = paragraph_text[start_orig_pos: end_orig_pos + 1].strip()
# Previously used Bert untokenizer
tok_tokens = feature.tokens[pred.start_index:(pred.end_index + 1)]
orig_doc_start = feature.token_to_orig_map[pred.start_index]
orig_doc_end = feature.token_to_orig_map[pred.end_index]
orig_tokens = example.doc_tokens[orig_doc_start:(orig_doc_end + 1)]
tok_text = tokenizer.convert_tokens_to_string(tok_tokens)
# Clean whitespace
tok_text = tok_text.strip()
tok_text = " ".join(tok_text.split())
orig_text = " ".join(orig_tokens)
final_text = get_final_text(tok_text, orig_text, tokenizer.do_lower_case,
verbose_logging)
if final_text in seen_predictions:
continue
seen_predictions[final_text] = True
nbest.append(
_NbestPrediction(
text=final_text,
start_log_prob=pred.start_log_prob,
end_log_prob=pred.end_log_prob))
# In very rare edge cases we could have no valid predictions. So we
# just create a nonce prediction in this case to avoid failure.
if not nbest:
nbest.append(
_NbestPrediction(text="", start_log_prob=-1e6,
end_log_prob=-1e6))
total_scores = []
best_non_null_entry = None
for entry in nbest:
total_scores.append(entry.start_log_prob + entry.end_log_prob)
if not best_non_null_entry:
best_non_null_entry = entry
probs = _compute_softmax(total_scores)
nbest_json = []
for (i, entry) in enumerate(nbest):
output = collections.OrderedDict()
output["text"] = entry.text
output["probability"] = probs[i]
output["start_log_prob"] = entry.start_log_prob
output["end_log_prob"] = entry.end_log_prob
nbest_json.append(output)
assert len(nbest_json) >= 1
assert best_non_null_entry is not None
score_diff = score_null
scores_diff_json[example.qas_id] = score_diff
# note(zhiliny): always predict best_non_null_entry
# and the evaluation script will search for the best threshold
all_predictions[example.qas_id] = best_non_null_entry.text
all_nbest_json[example.qas_id] = nbest_json
with open(output_prediction_file, "w") as writer:
writer.write(json.dumps(all_predictions, indent=4) + "\n")
with open(output_nbest_file, "w") as writer:
writer.write(json.dumps(all_nbest_json, indent=4) + "\n")
if version_2_with_negative:
with open(output_null_log_odds_file, "w") as writer:
writer.write(json.dumps(scores_diff_json, indent=4) + "\n")
with open(orig_data_file, "r", encoding='utf-8') as reader:
orig_data = json.load(reader)["data"]
qid_to_has_ans = make_qid_to_has_ans(orig_data)
has_ans_qids = [k for k, v in qid_to_has_ans.items() if v]
no_ans_qids = [k for k, v in qid_to_has_ans.items() if not v]
exact_raw, f1_raw = get_raw_scores(orig_data, all_predictions)
out_eval = {}
find_all_best_thresh_v2(out_eval, all_predictions, exact_raw, f1_raw, scores_diff_json, qid_to_has_ans)
return out_eval
def get_final_text(pred_text, orig_text, do_lower_case, verbose_logging=False):
"""Project the tokenized prediction back to the original text."""
# When we created the data, we kept track of the alignment between original
# (whitespace tokenized) tokens and our WordPiece tokenized tokens. So
# now `orig_text` contains the span of our original text corresponding to the
# span that we predicted.
#
# However, `orig_text` may contain extra characters that we don't want in
# our prediction.
#
# For example, let's say:
# pred_text = steve smith
# orig_text = Steve Smith's
#
# We don't want to return `orig_text` because it contains the extra "'s".
#
# We don't want to return `pred_text` because it's already been normalized
# (the SQuAD eval script also does punctuation stripping/lower casing but
# our tokenizer does additional normalization like stripping accent
# characters).
#
# What we really want to return is "Steve Smith".
#
# Therefore, we have to apply a semi-complicated alignment heuristic between
# `pred_text` and `orig_text` to get a character-to-character alignment. This
# can fail in certain cases in which case we just return `orig_text`.
def _strip_spaces(text):
ns_chars = []
ns_to_s_map = collections.OrderedDict()
for (i, c) in enumerate(text):
if c == " ":
continue
ns_to_s_map[len(ns_chars)] = i
ns_chars.append(c)
ns_text = "".join(ns_chars)
return (ns_text, ns_to_s_map)
# We first tokenize `orig_text`, strip whitespace from the result
# and `pred_text`, and check if they are the same length. If they are
# NOT the same length, the heuristic has failed. If they are the same
# length, we assume the characters are one-to-one aligned.
tokenizer = BasicTokenizer(do_lower_case=do_lower_case)
tok_text = " ".join(tokenizer.tokenize(orig_text))
start_position = tok_text.find(pred_text)
if start_position == -1:
if verbose_logging:
logger.info(
"Unable to find text: '%s' in '%s'" % (pred_text, orig_text))
return orig_text
end_position = start_position + len(pred_text) - 1
(orig_ns_text, orig_ns_to_s_map) = _strip_spaces(orig_text)
(tok_ns_text, tok_ns_to_s_map) = _strip_spaces(tok_text)
if len(orig_ns_text) != len(tok_ns_text):
if verbose_logging:
logger.info("Length not equal after stripping spaces: '%s' vs '%s'",
orig_ns_text, tok_ns_text)
return orig_text
# We then project the characters in `pred_text` back to `orig_text` using
# the character-to-character alignment.
tok_s_to_ns_map = {}
for (i, tok_index) in tok_ns_to_s_map.items():
tok_s_to_ns_map[tok_index] = i
orig_start_position = None
if start_position in tok_s_to_ns_map:
ns_start_position = tok_s_to_ns_map[start_position]
if ns_start_position in orig_ns_to_s_map:
orig_start_position = orig_ns_to_s_map[ns_start_position]
if orig_start_position is None:
if verbose_logging:
logger.info("Couldn't map start position")
return orig_text
orig_end_position = None
if end_position in tok_s_to_ns_map:
ns_end_position = tok_s_to_ns_map[end_position]
if ns_end_position in orig_ns_to_s_map:
orig_end_position = orig_ns_to_s_map[ns_end_position]
if orig_end_position is None:
if verbose_logging:
logger.info("Couldn't map end position")
return orig_text
output_text = orig_text[orig_start_position:(orig_end_position + 1)]
return output_text
def _get_best_indexes(logits, n_best_size):
"""Get the n-best logits from a list."""
index_and_score = sorted(enumerate(logits), key=lambda x: x[1], reverse=True)
best_indexes = []
for i in range(len(index_and_score)):
if i >= n_best_size:
break
best_indexes.append(index_and_score[i][0])
return best_indexes
def _compute_softmax(scores):
"""Compute softmax probability over raw logits."""
if not scores:
return []
max_score = None
for score in scores:
if max_score is None or score > max_score:
max_score = score
exp_scores = []
total_sum = 0.0
for score in scores:
x = math.exp(score - max_score)
exp_scores.append(x)
total_sum += x
probs = []
for score in exp_scores:
probs.append(score / total_sum)
return probs
| EXA-1-master | exa/models/unilm-master/xtune/src/pequod/data/utils_squad.py |
import logging
from transformers.data.processors.utils import InputFeatures
logger = logging.getLogger(__name__)
def convert_examples_to_features(
processor, examples, tokenizer, max_length, label_list,
pad_token=0, pad_token_segment_id=0, mask_padding_with_zero=True):
if label_list is None: label_list = processor.get_labels()
label_map = {label: i for i, label in enumerate(label_list)}
features = []
for ex_index, example in enumerate(examples):
if ex_index % 10000 == 0:
logger.info("Writing example %d" % ex_index)
inputs = tokenizer.encode_plus(
example.text_a,
example.text_b,
add_special_tokens=True,
max_length=max_length)
input_ids, token_type_ids = inputs["input_ids"], inputs["token_type_ids"]
attention_mask = [1 if mask_padding_with_zero else 0] * len(input_ids)
padding_length = max_length - len(input_ids)
input_ids = input_ids + ([pad_token] * padding_length)
attention_mask = attention_mask + ([0 if mask_padding_with_zero else 1] * padding_length)
token_type_ids = token_type_ids + ([pad_token_segment_id] * padding_length)
assert len(input_ids) == max_length, "Error with input length {} vs {}".format(len(input_ids), max_length)
assert len(attention_mask) == max_length, "Error with input length {} vs {}".format(len(attention_mask), max_length)
assert len(token_type_ids) == max_length, "Error with input length {} vs {}".format(len(token_type_ids), max_length)
label = label_map[example.label]
if ex_index < 3:
logger.info("*** Example ***")
logger.info("guid: %s" % (example.guid))
logger.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
logger.info("attention_mask: %s" % " ".join([str(x) for x in attention_mask]))
logger.info("token_type_ids: %s" % " ".join([str(x) for x in token_type_ids]))
logger.info("label: %s (id = %d)" % (example.label, label))
features.append(InputFeatures(
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
label=label))
return features | EXA-1-master | exa/models/unilm-master/xtune/src/pequod/data/__init__.py |
""" Official evaluation script for SQuAD version 2.0.
Modified by XLNet authors to update `find_best_threshold` scripts for SQuAD V2.0
In addition to basic functionality, we also compute additional statistics and
plot precision-recall curves if an additional na_prob.json file is provided.
This file is expected to map question ID's to the model's predicted probability
that a question is unanswerable.
"""
import argparse
import collections
import json
import numpy as np
import os
import re
import string
import sys
class EVAL_OPTS():
def __init__(self, data_file, pred_file, out_file="",
na_prob_file="na_prob.json", na_prob_thresh=1.0,
out_image_dir=None, verbose=False):
self.data_file = data_file
self.pred_file = pred_file
self.out_file = out_file
self.na_prob_file = na_prob_file
self.na_prob_thresh = na_prob_thresh
self.out_image_dir = out_image_dir
self.verbose = verbose
OPTS = None
def parse_args():
parser = argparse.ArgumentParser('Official evaluation script for SQuAD version 2.0.')
parser.add_argument('data_file', metavar='data.json', help='Input data JSON file.')
parser.add_argument('pred_file', metavar='pred.json', help='Model predictions.')
parser.add_argument('--out-file', '-o', metavar='eval.json',
help='Write accuracy metrics to file (default is stdout).')
parser.add_argument('--na-prob-file', '-n', metavar='na_prob.json',
help='Model estimates of probability of no answer.')
parser.add_argument('--na-prob-thresh', '-t', type=float, default=1.0,
help='Predict "" if no-answer probability exceeds this (default = 1.0).')
parser.add_argument('--out-image-dir', '-p', metavar='out_images', default=None,
help='Save precision-recall curves to directory.')
parser.add_argument('--verbose', '-v', action='store_true')
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
return parser.parse_args()
def make_qid_to_has_ans(dataset):
qid_to_has_ans = {}
for article in dataset:
for p in article['paragraphs']:
for qa in p['qas']:
qid_to_has_ans[qa['id']] = bool(qa['answers'])
return qid_to_has_ans
def normalize_answer(s):
"""Lower text and remove punctuation, articles and extra whitespace."""
def remove_articles(text):
regex = re.compile(r'\b(a|an|the)\b', re.UNICODE)
return re.sub(regex, ' ', text)
def white_space_fix(text):
return ' '.join(text.split())
def remove_punc(text):
exclude = set(string.punctuation)
return ''.join(ch for ch in text if ch not in exclude)
def lower(text):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(s))))
def get_tokens(s):
if not s: return []
return normalize_answer(s).split()
def compute_exact(a_gold, a_pred):
return int(normalize_answer(a_gold) == normalize_answer(a_pred))
def compute_f1(a_gold, a_pred):
gold_toks = get_tokens(a_gold)
pred_toks = get_tokens(a_pred)
common = collections.Counter(gold_toks) & collections.Counter(pred_toks)
num_same = sum(common.values())
if len(gold_toks) == 0 or len(pred_toks) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks)
if num_same == 0:
return 0
precision = 1.0 * num_same / len(pred_toks)
recall = 1.0 * num_same / len(gold_toks)
f1 = (2 * precision * recall) / (precision + recall)
return f1
def get_raw_scores(dataset, preds):
exact_scores = {}
f1_scores = {}
for article in dataset:
for p in article['paragraphs']:
for qa in p['qas']:
qid = qa['id']
gold_answers = [a['text'] for a in qa['answers']
if normalize_answer(a['text'])]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
gold_answers = ['']
if qid not in preds:
print('Missing prediction for %s' % qid)
continue
a_pred = preds[qid]
# Take max over all gold answers
exact_scores[qid] = max(compute_exact(a, a_pred) for a in gold_answers)
f1_scores[qid] = max(compute_f1(a, a_pred) for a in gold_answers)
return exact_scores, f1_scores
def apply_no_ans_threshold(scores, na_probs, qid_to_has_ans, na_prob_thresh):
new_scores = {}
for qid, s in scores.items():
pred_na = na_probs[qid] > na_prob_thresh
if pred_na:
new_scores[qid] = float(not qid_to_has_ans[qid])
else:
new_scores[qid] = s
return new_scores
def make_eval_dict(exact_scores, f1_scores, qid_list=None):
if not qid_list:
total = len(exact_scores)
return collections.OrderedDict([
('exact', 100.0 * sum(exact_scores.values()) / total),
('f1', 100.0 * sum(f1_scores.values()) / total),
('total', total),
])
else:
total = len(qid_list)
return collections.OrderedDict([
('exact', 100.0 * sum(exact_scores[k] for k in qid_list) / total),
('f1', 100.0 * sum(f1_scores[k] for k in qid_list) / total),
('total', total),
])
def merge_eval(main_eval, new_eval, prefix):
for k in new_eval:
main_eval['%s_%s' % (prefix, k)] = new_eval[k]
def plot_pr_curve(precisions, recalls, out_image, title):
plt.step(recalls, precisions, color='b', alpha=0.2, where='post')
plt.fill_between(recalls, precisions, step='post', alpha=0.2, color='b')
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.xlim([0.0, 1.05])
plt.ylim([0.0, 1.05])
plt.title(title)
plt.savefig(out_image)
plt.clf()
def make_precision_recall_eval(scores, na_probs, num_true_pos, qid_to_has_ans,
out_image=None, title=None):
qid_list = sorted(na_probs, key=lambda k: na_probs[k])
true_pos = 0.0
cur_p = 1.0
cur_r = 0.0
precisions = [1.0]
recalls = [0.0]
avg_prec = 0.0
for i, qid in enumerate(qid_list):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
cur_p = true_pos / float(i+1)
cur_r = true_pos / float(num_true_pos)
if i == len(qid_list) - 1 or na_probs[qid] != na_probs[qid_list[i+1]]:
# i.e., if we can put a threshold after this point
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(cur_p)
recalls.append(cur_r)
if out_image:
plot_pr_curve(precisions, recalls, out_image, title)
return {'ap': 100.0 * avg_prec}
def run_precision_recall_analysis(main_eval, exact_raw, f1_raw, na_probs,
qid_to_has_ans, out_image_dir):
if out_image_dir and not os.path.exists(out_image_dir):
os.makedirs(out_image_dir)
num_true_pos = sum(1 for v in qid_to_has_ans.values() if v)
if num_true_pos == 0:
return
pr_exact = make_precision_recall_eval(
exact_raw, na_probs, num_true_pos, qid_to_has_ans,
out_image=os.path.join(out_image_dir, 'pr_exact.png'),
title='Precision-Recall curve for Exact Match score')
pr_f1 = make_precision_recall_eval(
f1_raw, na_probs, num_true_pos, qid_to_has_ans,
out_image=os.path.join(out_image_dir, 'pr_f1.png'),
title='Precision-Recall curve for F1 score')
oracle_scores = {k: float(v) for k, v in qid_to_has_ans.items()}
pr_oracle = make_precision_recall_eval(
oracle_scores, na_probs, num_true_pos, qid_to_has_ans,
out_image=os.path.join(out_image_dir, 'pr_oracle.png'),
title='Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)')
merge_eval(main_eval, pr_exact, 'pr_exact')
merge_eval(main_eval, pr_f1, 'pr_f1')
merge_eval(main_eval, pr_oracle, 'pr_oracle')
def histogram_na_prob(na_probs, qid_list, image_dir, name):
if not qid_list:
return
x = [na_probs[k] for k in qid_list]
weights = np.ones_like(x) / float(len(x))
plt.hist(x, weights=weights, bins=20, range=(0.0, 1.0))
plt.xlabel('Model probability of no-answer')
plt.ylabel('Proportion of dataset')
plt.title('Histogram of no-answer probability: %s' % name)
plt.savefig(os.path.join(image_dir, 'na_prob_hist_%s.png' % name))
plt.clf()
def find_best_thresh(preds, scores, na_probs, qid_to_has_ans):
num_no_ans = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k])
cur_score = num_no_ans
best_score = cur_score
best_thresh = 0.0
qid_list = sorted(na_probs, key=lambda k: na_probs[k])
for i, qid in enumerate(qid_list):
if qid not in scores: continue
if qid_to_has_ans[qid]:
diff = scores[qid]
else:
if preds[qid]:
diff = -1
else:
diff = 0
cur_score += diff
if cur_score > best_score:
best_score = cur_score
best_thresh = na_probs[qid]
return 100.0 * best_score / len(scores), best_thresh
def find_best_thresh_v2(preds, scores, na_probs, qid_to_has_ans):
num_no_ans = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k])
cur_score = num_no_ans
best_score = cur_score
best_thresh = 0.0
qid_list = sorted(na_probs, key=lambda k: na_probs[k])
for i, qid in enumerate(qid_list):
if qid not in scores: continue
if qid_to_has_ans[qid]:
diff = scores[qid]
else:
if preds[qid]:
diff = -1
else:
diff = 0
cur_score += diff
if cur_score > best_score:
best_score = cur_score
best_thresh = na_probs[qid]
has_ans_score, has_ans_cnt = 0, 0
for qid in qid_list:
if not qid_to_has_ans[qid]: continue
has_ans_cnt += 1
if qid not in scores: continue
has_ans_score += scores[qid]
return 100.0 * best_score / len(scores), best_thresh, 1.0 * has_ans_score / has_ans_cnt
def find_all_best_thresh(main_eval, preds, exact_raw, f1_raw, na_probs, qid_to_has_ans):
best_exact, exact_thresh = find_best_thresh(preds, exact_raw, na_probs, qid_to_has_ans)
best_f1, f1_thresh = find_best_thresh(preds, f1_raw, na_probs, qid_to_has_ans)
main_eval['best_exact'] = best_exact
main_eval['best_exact_thresh'] = exact_thresh
main_eval['best_f1'] = best_f1
main_eval['best_f1_thresh'] = f1_thresh
def find_all_best_thresh_v2(main_eval, preds, exact_raw, f1_raw, na_probs, qid_to_has_ans):
best_exact, exact_thresh, has_ans_exact = find_best_thresh_v2(preds, exact_raw, na_probs, qid_to_has_ans)
best_f1, f1_thresh, has_ans_f1 = find_best_thresh_v2(preds, f1_raw, na_probs, qid_to_has_ans)
main_eval['best_exact'] = best_exact
main_eval['best_exact_thresh'] = exact_thresh
main_eval['best_f1'] = best_f1
main_eval['best_f1_thresh'] = f1_thresh
main_eval['has_ans_exact'] = has_ans_exact
main_eval['has_ans_f1'] = has_ans_f1
def main(OPTS):
with open(OPTS.data_file) as f:
dataset_json = json.load(f)
dataset = dataset_json['data']
with open(OPTS.pred_file) as f:
preds = json.load(f)
if OPTS.na_prob_file:
with open(OPTS.na_prob_file) as f:
na_probs = json.load(f)
else:
na_probs = {k: 0.0 for k in preds}
qid_to_has_ans = make_qid_to_has_ans(dataset) # maps qid to True/False
has_ans_qids = [k for k, v in qid_to_has_ans.items() if v]
no_ans_qids = [k for k, v in qid_to_has_ans.items() if not v]
exact_raw, f1_raw = get_raw_scores(dataset, preds)
exact_thresh = apply_no_ans_threshold(exact_raw, na_probs, qid_to_has_ans,
OPTS.na_prob_thresh)
f1_thresh = apply_no_ans_threshold(f1_raw, na_probs, qid_to_has_ans,
OPTS.na_prob_thresh)
out_eval = make_eval_dict(exact_thresh, f1_thresh)
if has_ans_qids:
has_ans_eval = make_eval_dict(exact_thresh, f1_thresh, qid_list=has_ans_qids)
merge_eval(out_eval, has_ans_eval, 'HasAns')
if no_ans_qids:
no_ans_eval = make_eval_dict(exact_thresh, f1_thresh, qid_list=no_ans_qids)
merge_eval(out_eval, no_ans_eval, 'NoAns')
if OPTS.na_prob_file:
find_all_best_thresh(out_eval, preds, exact_raw, f1_raw, na_probs, qid_to_has_ans)
if OPTS.na_prob_file and OPTS.out_image_dir:
run_precision_recall_analysis(out_eval, exact_raw, f1_raw, na_probs,
qid_to_has_ans, OPTS.out_image_dir)
histogram_na_prob(na_probs, has_ans_qids, OPTS.out_image_dir, 'hasAns')
histogram_na_prob(na_probs, no_ans_qids, OPTS.out_image_dir, 'noAns')
if OPTS.out_file:
with open(OPTS.out_file, 'w') as f:
json.dump(out_eval, f)
else:
print(json.dumps(out_eval, indent=2))
return out_eval
if __name__ == '__main__':
OPTS = parse_args()
if OPTS.out_image_dir:
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
main(OPTS)
| EXA-1-master | exa/models/unilm-master/xtune/src/pequod/data/utils_squad_evaluate.py |
"""Load examples from BUCC"""
import logging
import os
import torch
from transformers.data.processors.utils import (
DataProcessor, InputExample, InputFeatures)
from torch.utils.data import (
DataLoader, RandomSampler, SequentialSampler, TensorDataset)
logger = logging.getLogger(__name__)
def load_and_cache_examples(args, langpair, lang, tokenizer, key="", prefix="tatoeba"):
cache_dir = os.path.join(args.data_dir, "pequod_cache")
os.makedirs(cache_dir, exist_ok=True)
cache_filename = os.path.join(
cache_dir, "cached_%s_%s_%s" % (langpair, lang, key))
if os.path.exists(cache_filename) and not args.overwrite_cache:
logger.info("Loading features from cached file %s" % cache_filename)
features = torch.load(cache_filename)
else:
processer = TatoebaProcesser()
logger.info("Creating features from dataset file at %s" % args.data_dir)
examples = processer.get_examples(args.data_dir, langpair, lang, prefix)
features = TatoebaProcesser.convert_examples_to_features(
examples, tokenizer, args.max_seq_length, 0,
pad_token=tokenizer.convert_tokens_to_ids([tokenizer.pad_token])[0],)
#logger.info("Saving features to cache file %s" % cache_filename)
#torch.save(features, cache_filename)
all_input_ids = torch.tensor(
[f.input_ids for f in features], dtype=torch.long)
all_attention_mask = torch.tensor(
[f.attention_mask for f in features], dtype=torch.long)
all_token_type_ids = torch.tensor(
[f.token_type_ids for f in features], dtype=torch.long)
dataset = TensorDataset(
all_input_ids, all_attention_mask, all_token_type_ids)
return dataset
class TatoebaProcesser(DataProcessor):
@classmethod
def convert_examples_to_features(cls, examples, tokenizer, max_length, pad_token_segment_id, pad_token, mask_padding_with_zero=True):
features = []
for ex_index, example in enumerate(examples):
inputs = tokenizer.encode_plus(
example.text_a,
None,
add_special_tokens=True,
max_length=max_length,
)
input_ids, token_type_ids = inputs["input_ids"], inputs["token_type_ids"]
attention_mask = [1 if mask_padding_with_zero else 0] * len(input_ids)
padding_length = max_length - len(input_ids)
input_ids = input_ids + ([pad_token] * padding_length)
attention_mask = attention_mask + ([0 if mask_padding_with_zero else 1] * padding_length)
token_type_ids = token_type_ids + ([pad_token_segment_id] * padding_length)
assert len(input_ids) == max_length, "Error with input length {} vs {}".format(len(input_ids), max_length)
assert len(attention_mask) == max_length, "Error with input length {} vs {}".format(len(attention_mask), max_length)
assert len(token_type_ids) == max_length, "Error with input length {} vs {}".format(len(token_type_ids), max_length)
if ex_index < 3:
logger.info("*** Example ***")
logger.info("guid: %s" % (example.guid))
logger.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
logger.info("attention_mask: %s" % " ".join([str(x) for x in attention_mask]))
logger.info("token_type_ids: %s" % " ".join([str(x) for x in token_type_ids]))
features.append(InputFeatures(
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
label=None,
))
return features
def get_examples(self, data_dir, langpair, lang, prefix="tatoeba"):
examples = []
if prefix == "bucc":
fn = os.path.join(data_dir, "%s.%s.txt" % (langpair, lang))
else:
fn = os.path.join(data_dir, "%s.%s" % (langpair, lang))
#fn = os.path.join(data_dir, "%s.%s.%s" % (prefix, langpair, lang))
with open(fn, encoding='utf-8') as fp:
for i, line in enumerate(fp):
line = line.strip()
examples.append(InputExample(
guid="%s-%s-%d" % (langpair, lang, i),
text_a=line,
))
return examples
| EXA-1-master | exa/models/unilm-master/xtune/src/pequod/data/xretrieval.py |
EXA-1-master | exa/models/unilm-master/xtune/src/pequod/data/dataloader.py |
|
"""Loading examples and features for WiLI-2018 dataset"""
import logging
import os
import torch
from transformers.data.processors.utils import (DataProcessor,
InputExample, InputFeatures)
from torch.utils.data import (DataLoader, RandomSampler, SequentialSampler,
TensorDataset)
from src.data import convert_examples_to_features
from src.io import lines_gen
logger = logging.getLogger(__name__)
_alias2lang = {}
_lang2id = {}
_langs = []
def get_alias2lang(data_dir):
if len(_alias2lang) > 0: return _alias2lang, _lang2id, _langs
for line, in lines_gen(os.path.join(data_dir, "labels-new")):
value = None
for alias in line.split(";"):
alias = alias.strip()
if alias == "": continue
if value is None: value = alias
_alias2lang[alias] = value
_langs.append(value)
for i, lang in enumerate(_langs): _lang2id[lang] = i
return _alias2lang, _lang2id, _langs
def load_and_cache_examples(args, data_dir, split, run_lang2id, tokenizer, key=""):
cache_filename = os.path.join(
data_dir, "cached_%s_%s" % (split, key))
if os.path.exists(cache_filename) and not args.overwrite_cache:
logger.info("Loading features from cached file %s" % cache_filename)
features = torch.load(cache_filename)
else:
processor = WiliProcessor()
logger.info("Creating features from dataset file at %s" % data_dir)
label_list = processor.get_labels(data_dir)
examples = processor.get_examples(data_dir, split)
logger.info("%d Examples loaded" % len(examples))
features = convert_examples_to_features(
processor, examples, tokenizer, max_length=args.max_seq_length,
label_list=label_list, pad_token_segment_id=0,
pad_token=tokenizer.convert_tokens_to_ids([tokenizer.pad_token])[0])
logger.info("Saving features to cache file %s" % cache_filename)
torch.save(features, cache_filename)
# Cut dataset to test langs
alias2lang, lang2id, _ = get_alias2lang(data_dir)
test_lang_ids = {lang2id[alias2lang[lang]] for lang in run_lang2id.keys()}
wili_id2run_langid = {
lang2id[alias2lang[lang]]:val for lang, val in run_lang2id.items()}
all_input_ids, all_attention_mask = [], []
all_token_type_ids, all_labels = [], []
for f in features:
if f.label not in test_lang_ids: continue
all_input_ids.append(f.input_ids)
all_attention_mask.append(f.attention_mask)
all_token_type_ids.append(f.token_type_ids)
all_labels.append(wili_id2run_langid[f.label])
all_input_ids = torch.tensor(all_input_ids, dtype=torch.long)
all_attention_mask = torch.tensor(all_attention_mask, dtype=torch.long)
all_token_type_ids = torch.tensor(all_token_type_ids, dtype=torch.long)
all_labels = torch.tensor(all_labels, dtype=torch.long)
dataset = TensorDataset(
all_input_ids, all_attention_mask, all_token_type_ids, all_labels)
return dataset
class WiliProcessor(DataProcessor):
def get_examples(self, data_dir, split):
examples = []
filename_x = os.path.join(data_dir, "x_%s.txt" % split)
filename_y = os.path.join(data_dir, "y_%s.txt" % split)
for i, (line_x, line_y) in enumerate(lines_gen(filename_x, filename_y)):
guid = "%s-%s" % (split, i)
examples.append(
InputExample(guid=guid, text_a=line_x, text_b=None, label=line_y))
return examples
def get_labels(self, data_dir):
_, _, langs = get_alias2lang(data_dir)
return langs
| EXA-1-master | exa/models/unilm-master/xtune/src/pequod/data/wili.py |
import torch
from torch.utils.data.sampler import Sampler
class SubSampler(Sampler):
def __init__(self, data_source, num_samples):
self.data_source = data_source
self.num_samples = num_samples
def __len__(self):
return self.num_samples
def __iter__(self):
n = len(self.data_source)
if self.num_samples <= n:
return iter(torch.randperm(n).tolist()[:self.num_samples])
return iter(torch.randint(high=n, size=(self.num_samples,), dtype=torch.int64).tolist()) | EXA-1-master | exa/models/unilm-master/xtune/src/pequod/data/sampler.py |
import ast
import logging
import os
import os.path as op
import sys
from argparse import Namespace
import numpy as np
import torch
from fairseq import checkpoint_utils, options, tasks, utils
from fairseq.dataclass.utils import convert_namespace_to_omegaconf
from fairseq.logging import progress_bar
from omegaconf import DictConfig
# define function for plot prob and att_ws
def _plot_and_save(array, figname, figsize=(6, 4), dpi=150):
import matplotlib.pyplot as plt
shape = array.shape
if len(shape) == 1:
# for eos probability
plt.figure(figsize=figsize, dpi=dpi)
plt.plot(array)
plt.xlabel("Frame")
plt.ylabel("Probability")
plt.ylim([0, 1])
elif len(shape) == 2:
# for tacotron 2 attention weights, whose shape is (out_length, in_length)
plt.figure(figsize=figsize, dpi=dpi)
plt.imshow(array, aspect="auto")
elif len(shape) == 4:
# for transformer attention weights,
# whose shape is (#leyers, #heads, out_length, in_length)
plt.figure(figsize=(figsize[0] * shape[0], figsize[1] * shape[1]), dpi=dpi)
for idx1, xs in enumerate(array):
for idx2, x in enumerate(xs, 1):
plt.subplot(shape[0], shape[1], idx1 * shape[1] + idx2)
plt.imshow(x, aspect="auto")
plt.xlabel("Input")
plt.ylabel("Output")
else:
raise NotImplementedError("Support only from 1D to 4D array.")
plt.tight_layout()
if not op.exists(op.dirname(figname)):
# NOTE: exist_ok = True is needed for parallel process decoding
os.makedirs(op.dirname(figname), exist_ok=True)
plt.savefig(figname)
plt.close()
# define function to calculate focus rate
# (see section 3.3 in https://arxiv.org/abs/1905.09263)
def _calculate_focus_rete(att_ws):
if att_ws is None:
# fastspeech case -> None
return 1.0
elif len(att_ws.shape) == 2:
# tacotron 2 case -> (L, T)
return float(att_ws.max(dim=-1)[0].mean())
elif len(att_ws.shape) == 4:
# transformer case -> (#layers, #heads, L, T)
return float(att_ws.max(dim=-1)[0].mean(dim=-1).max())
else:
raise ValueError("att_ws should be 2 or 4 dimensional tensor.")
def main(cfg: DictConfig):
if isinstance(cfg, Namespace):
cfg = convert_namespace_to_omegaconf(cfg)
assert cfg.common_eval.path is not None, "--path required for generation!"
assert (
cfg.generation.replace_unk is None or cfg.dataset.dataset_impl == "raw"
), "--replace-unk requires a raw text dataset (--dataset-impl=raw)"
if cfg.common_eval.results_path is not None:
os.makedirs(cfg.common_eval.results_path, exist_ok=True)
return _main(cfg, sys.stdout)
def _main(cfg: DictConfig, output_file):
logging.basicConfig(
format="%(asctime)s | %(levelname)s | %(name)s | %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=os.environ.get("LOGLEVEL", "INFO").upper(),
stream=output_file,
)
logger = logging.getLogger("speecht5.generate_speech")
utils.import_user_module(cfg.common)
assert cfg.dataset.batch_size == 1, "only support batch size 1"
logger.info(cfg)
# Fix seed for stochastic decoding
if cfg.common.seed is not None and not cfg.generation.no_seed_provided:
np.random.seed(cfg.common.seed)
utils.set_torch_seed(cfg.common.seed)
use_cuda = torch.cuda.is_available() and not cfg.common.cpu
if not use_cuda:
logger.info("generate speech on cpu")
# build task
task = tasks.setup_task(cfg.task)
# Load ensemble
logger.info("loading model(s) from {}".format(cfg.common_eval.path))
overrides = ast.literal_eval(cfg.common_eval.model_overrides)
models, saved_cfg = checkpoint_utils.load_model_ensemble(
utils.split_paths(cfg.common_eval.path),
arg_overrides=overrides,
task=task,
suffix=cfg.checkpoint.checkpoint_suffix,
strict=(cfg.checkpoint.checkpoint_shard_count == 1),
num_shards=cfg.checkpoint.checkpoint_shard_count,
)
logger.info(saved_cfg)
# loading the dataset should happen after the checkpoint has been loaded so we can give it the saved task config
task.load_dataset(cfg.dataset.gen_subset, task_cfg=saved_cfg.task)
# optimize ensemble for generation
for model in models:
if model is None:
continue
if cfg.common.fp16:
model.half()
if use_cuda and not cfg.distributed_training.pipeline_model_parallel:
model.cuda()
model.prepare_for_inference_(cfg)
# load dataset (possibly sharded)
itr = task.get_batch_iterator(
dataset=task.dataset(cfg.dataset.gen_subset),
max_tokens=cfg.dataset.max_tokens,
max_sentences=cfg.dataset.batch_size,
max_positions=None,
ignore_invalid_inputs=cfg.dataset.skip_invalid_size_inputs_valid_test,
required_batch_size_multiple=cfg.dataset.required_batch_size_multiple,
seed=cfg.common.seed,
num_shards=cfg.distributed_training.distributed_world_size,
shard_id=cfg.distributed_training.distributed_rank,
num_workers=cfg.dataset.num_workers,
data_buffer_size=cfg.dataset.data_buffer_size,
).next_epoch_itr(shuffle=False)
progress = progress_bar.progress_bar(
itr,
log_format=cfg.common.log_format,
log_interval=cfg.common.log_interval,
default_log_format=("tqdm" if not cfg.common.no_progress_bar else "simple"),
)
for i, sample in enumerate(progress):
if "net_input" not in sample:
continue
sample = utils.move_to_cuda(sample) if use_cuda else sample
outs, _, attn = task.generate_speech(
models,
sample["net_input"],
)
focus_rate = _calculate_focus_rete(attn)
outs = outs.cpu().numpy()
audio_name = op.basename(sample['name'][0])
np.save(op.join(cfg.common_eval.results_path, audio_name.replace(".wav", "-feats.npy")), outs)
logging.info(
"{} (size: {}->{} ({}), focus rate: {:.3f})".format(
sample['name'][0],
sample['src_lengths'][0].item(),
outs.shape[0],
sample['dec_target_lengths'][0].item(),
focus_rate
)
)
if i < 6 and attn is not None:
import shutil
demo_dir = op.join(op.dirname(cfg.common_eval.results_path), "demo")
audio_dir = op.join(demo_dir, "audio")
os.makedirs(audio_dir, exist_ok=True)
shutil.copy(op.join(task.dataset(cfg.dataset.gen_subset).audio_root, sample['tgt_name'][0] if "tgt_name" in sample else sample['name'][0]), op.join(audio_dir, audio_name))
att_dir = op.join(demo_dir, "att_ws")
_plot_and_save(attn.cpu().numpy(), op.join(att_dir, f"{audio_name}_att_ws.png"))
spec_dir = op.join(demo_dir, "spec")
_plot_and_save(outs.T, op.join(spec_dir, f"{audio_name}_gen.png"))
_plot_and_save(sample["target"][0].cpu().numpy().T, op.join(spec_dir, f"{audio_name}_ori.png"))
def cli_main():
parser = options.get_generation_parser()
args = options.parse_args_and_arch(parser)
main(args)
if __name__ == "__main__":
cli_main()
| EXA-1-master | exa/models/unilm-master/speecht5/scripts/generate_speech.py |
from . import data, tasks, criterions, models # noqa | EXA-1-master | exa/models/unilm-master/speecht5/speecht5/__init__.py |
# --------------------------------------------------------
# SpeechT5: Unified-Modal Encoder-Decoder Pre-Training for Spoken Language Processing (https://arxiv.org/abs/2110.07205)
# Github source: https://github.com/microsoft/SpeechT5/tree/main/SpeechT5
# Copyright (c) 2021 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Based on fairseq and espnet code bases
# https://github.com/pytorch/fairseq; https://github.com/espnet/espnet
# --------------------------------------------------------
import math
from typing import Dict, List, Optional
import sys
import torch
import torch.nn as nn
from fairseq import search, utils
from fairseq.data import data_utils
from fairseq.models import FairseqIncrementalDecoder
from torch import Tensor
from fairseq.ngram_repeat_block import NGramRepeatBlock
from espnet.nets.ctc_prefix_score import CTCPrefixScore
import numpy
CTC_SCORING_RATIO = 7.0
class SequenceGenerator(nn.Module):
def __init__(
self,
models,
tgt_dict,
beam_size=1,
max_len_a=0,
max_len_b=200,
max_len=0,
min_len=1,
normalize_scores=True,
len_penalty=1.0,
unk_penalty=0.0,
temperature=1.0,
match_source_len=False,
no_repeat_ngram_size=0,
search_strategy=None,
eos=None,
symbols_to_strip_from_output=None,
lm_model=None,
lm_weight=1.0,
ctc_weight=0.0,
):
"""Generates translations of a given source sentence.
Args:
models (List[~fairseq.models.FairseqModel]): ensemble of models,
currently support fairseq.models.TransformerModel for scripting
beam_size (int, optional): beam width (default: 1)
max_len_a/b (int, optional): generate sequences of maximum length
ax + b, where x is the source length
max_len (int, optional): the maximum length of the generated output
(not including end-of-sentence)
min_len (int, optional): the minimum length of the generated output
(not including end-of-sentence)
normalize_scores (bool, optional): normalize scores by the length
of the output (default: True)
len_penalty (float, optional): length penalty, where <1.0 favors
shorter, >1.0 favors longer sentences (default: 1.0)
unk_penalty (float, optional): unknown word penalty, where <0
produces more unks, >0 produces fewer (default: 0.0)
temperature (float, optional): temperature, where values
>1.0 produce more uniform samples and values <1.0 produce
sharper samples (default: 1.0)
match_source_len (bool, optional): outputs should match the source
length (default: False)
"""
super().__init__()
if isinstance(models, EnsembleModel):
self.model = models
else:
self.model = EnsembleModel(models)
self.tgt_dict = tgt_dict
self.pad = tgt_dict.pad()
self.unk = tgt_dict.unk()
self.eos = tgt_dict.eos() if eos is None else eos
self.blank = self.tgt_dict.index("<ctc_blank>")
self.mask = self.tgt_dict.index("<mask>")
self.mask_idxs = []
if self.tgt_dict.index("<mask>0") != self.unk:
count = 0
while self.tgt_dict.index("<mask>" + str(count)) != self.unk:
self.mask_idxs.append(self.tgt_dict.index("<mask>" + str(count)))
count += 1
self.mask_idxs = torch.tensor(self.mask_idxs)
self.symbols_to_strip_from_output = (
symbols_to_strip_from_output.union({self.eos})
if symbols_to_strip_from_output is not None
else {self.eos}
)
self.vocab_size = len(tgt_dict)
self.beam_size = beam_size
# the max beam size is the dictionary size - 1, since we never select pad
self.beam_size = min(beam_size, self.vocab_size - 1)
self.max_len_a = max_len_a
self.max_len_b = max_len_b
self.min_len = min_len
self.max_len = max_len or self.model.max_decoder_positions()
self.normalize_scores = normalize_scores
self.len_penalty = len_penalty
self.unk_penalty = unk_penalty
self.temperature = temperature
self.match_source_len = match_source_len
if no_repeat_ngram_size > 0:
self.repeat_ngram_blocker = NGramRepeatBlock(no_repeat_ngram_size)
else:
self.repeat_ngram_blocker = None
assert temperature > 0, "--temperature must be greater than 0"
self.search = (
search.BeamSearch(tgt_dict) if search_strategy is None else search_strategy
)
# We only need to set src_lengths in LengthConstrainedBeamSearch.
# As a module attribute, setting it would break in multithread
# settings when the model is shared.
self.should_set_src_lengths = (
hasattr(self.search, "needs_src_lengths") and self.search.needs_src_lengths
)
self.model.eval()
self.lm_model = lm_model
self.lm_weight = lm_weight
self.ctc_weight = ctc_weight
if self.lm_model is not None:
self.lm_model.eval()
def cuda(self):
self.model.cuda()
return self
@torch.no_grad()
def forward(
self,
sample: Dict[str, Dict[str, Tensor]],
prefix_tokens: Optional[Tensor] = None,
bos_token: Optional[int] = None,
):
"""Generate a batch of translations.
Args:
sample (dict): batch
prefix_tokens (torch.LongTensor, optional): force decoder to begin
with these tokens
bos_token (int, optional): beginning of sentence token
(default: self.eos)
"""
return self._generate(sample, prefix_tokens, bos_token=bos_token)
# TODO(myleott): unused, deprecate after pytorch-translate migration
def generate_batched_itr(self, data_itr, beam_size=None, cuda=False, timer=None):
"""Iterate over a batched dataset and yield individual translations.
Args:
cuda (bool, optional): use GPU for generation
timer (StopwatchMeter, optional): time generations
"""
for sample in data_itr:
s = utils.move_to_cuda(sample) if cuda else sample
if "net_input" not in s:
continue
input = s["net_input"]
# model.forward normally channels prev_output_tokens into the decoder
# separately, but SequenceGenerator directly calls model.encoder
encoder_input = {
k: v for k, v in input.items() if k != "prev_output_tokens"
}
if timer is not None:
timer.start()
with torch.no_grad():
hypos = self.generate(encoder_input)
if timer is not None:
timer.stop(sum(len(h[0]["tokens"]) for h in hypos))
for i, id in enumerate(s["id"].data):
# remove padding
src = utils.strip_pad(input["src_tokens"].data[i, :], self.pad)
ref = (
utils.strip_pad(s["target"].data[i, :], self.pad)
if s["target"] is not None
else None
)
yield id, src, ref, hypos[i]
@torch.no_grad()
def generate(self, models, sample: Dict[str, Dict[str, Tensor]], **kwargs):
"""Generate translations. Match the api of other fairseq generators.
Args:
models (List[~fairseq.models.FairseqModel]): ensemble of models
sample (dict): batch
prefix_tokens (torch.LongTensor, optional): force decoder to begin
with these tokens
constraints (torch.LongTensor, optional): force decoder to include
the list of constraints
bos_token (int, optional): beginning of sentence token
(default: self.eos)
"""
return self._generate(sample, **kwargs)
def _generate(
self,
sample: Dict[str, Dict[str, Tensor]],
prefix_tokens: Optional[Tensor] = None,
constraints: Optional[Tensor] = None,
bos_token: Optional[int] = None,
):
incremental_states = torch.jit.annotate(
List[Dict[str, Dict[str, Optional[Tensor]]]],
[
torch.jit.annotate(Dict[str, Dict[str, Optional[Tensor]]], {})
for i in range(self.model.models_size)
],
)
net_input = sample["net_input"]
if "src_tokens" in net_input:
src_tokens = net_input["src_tokens"]
# length of the source text being the character length except EndOfSentence and pad
src_lengths = (
(src_tokens.ne(self.eos) & src_tokens.ne(self.pad)).long().sum(dim=1)
)
elif "source" in net_input:
src_tokens = net_input["source"]
src_lengths = (
net_input["padding_mask"].size(-1) - net_input["padding_mask"].sum(-1)
if net_input["padding_mask"] is not None
else torch.tensor(src_tokens.size(-1)).to(src_tokens)
)
elif "features" in net_input:
src_tokens = net_input["features"]
src_lengths = (
net_input["padding_mask"].size(-1) - net_input["padding_mask"].sum(-1)
if net_input["padding_mask"] is not None
else torch.tensor(src_tokens.size(-1)).to(src_tokens)
)
else:
raise Exception("expected src_tokens or source in net input. input keys: " + str(net_input.keys()))
# bsz: total number of sentences in beam
# Note that src_tokens may have more than 2 dimensions (i.e. audio features)
bsz, src_len = src_tokens.size()[:2]
beam_size = self.beam_size
if constraints is not None and not self.search.supports_constraints:
raise NotImplementedError(
"Target-side constraints were provided, but search method doesn't support them"
)
# Initialize constraints, when active
self.search.init_constraints(constraints, beam_size)
max_len: int = -1
if self.match_source_len:
max_len = src_lengths.max().item()
else:
max_len = min(
int(self.max_len_a * src_len + self.max_len_b),
self.max_len - 1,
)
assert (
self.min_len <= max_len
), "min_len cannot be larger than max_len, please adjust these!"
# compute the encoder output for each beam
encoder_outs = self.model.forward_encoder(net_input)
# Get CTC lprobs and prep ctc_scorer
if self.ctc_weight > 0:
ctc_lprobs = self.model.models[0].get_normalized_probs_for_ctc(
encoder_outs[0], log_probs=True
).contiguous().transpose(0, 1) # (B, T, C) from the encoder
hyp = {}
ctc_prefix_score = CTCPrefixScore(ctc_lprobs[0].detach().cpu().numpy(), self.blank, self.eos, numpy)
hyp["ctc_state_prev"] = ctc_prefix_score.initial_state()
hyp["ctc_score_prev"] = 0.0
ctc_beam = min(ctc_lprobs.shape[-1] - self.mask_idxs.size(-1), int(beam_size * CTC_SCORING_RATIO))
ctc_hyps = {str(self.eos): hyp}
# placeholder of indices for bsz * beam_size to hold tokens and accumulative scores
new_order = torch.arange(bsz).view(-1, 1).repeat(1, beam_size).view(-1)
new_order = new_order.to(src_tokens.device).long()
encoder_outs = self.model.reorder_encoder_out(encoder_outs, new_order)
# ensure encoder_outs is a List.
assert encoder_outs is not None
# initialize buffers
scores = (
torch.zeros(bsz * beam_size, max_len + 1).to(src_tokens).float()
) # +1 for eos; pad is never chosen for scoring
tokens = (
torch.zeros(bsz * beam_size, max_len + 2)
.to(src_tokens)
.long()
.fill_(self.pad)
) # +2 for eos and pad
tokens[:, 0] = self.eos if bos_token is None else bos_token
attn: Optional[Tensor] = None
# A list that indicates candidates that should be ignored.
# For example, suppose we're sampling and have already finalized 2/5
# samples. Then cands_to_ignore would mark 2 positions as being ignored,
# so that we only finalize the remaining 3 samples.
cands_to_ignore = (
torch.zeros(bsz, beam_size).to(src_tokens).eq(-1)
) # forward and backward-compatible False mask
# list of completed sentences
finalized = torch.jit.annotate(
List[List[Dict[str, Tensor]]],
[torch.jit.annotate(List[Dict[str, Tensor]], []) for i in range(bsz)],
) # contains lists of dictionaries of infomation about the hypothesis being finalized at each step
# a boolean array indicating if the sentence at the index is finished or not
finished = [False for i in range(bsz)]
num_remaining_sent = bsz # number of sentences remaining
# number of candidate hypos per step
cand_size = 2 * beam_size # 2 x beam size in case half are EOS
# offset arrays for converting between different indexing schemes
bbsz_offsets = (
(torch.arange(0, bsz) * beam_size)
.unsqueeze(1)
.type_as(tokens)
.to(src_tokens.device)
)
cand_offsets = torch.arange(0, cand_size).type_as(tokens).to(src_tokens.device)
reorder_state: Optional[Tensor] = None
ctc_state = None
batch_idxs: Optional[Tensor] = None
original_batch_idxs: Optional[Tensor] = None
if "id" in sample and isinstance(sample["id"], Tensor):
original_batch_idxs = sample["id"]
else:
original_batch_idxs = torch.arange(0, bsz).type_as(tokens)
for step in range(max_len + 1): # one extra step for EOS marker
# reorder decoder internal states based on the prev choice of beams
if reorder_state is not None:
if batch_idxs is not None:
# update beam indices to take into account removed sentences
corr = batch_idxs - torch.arange(batch_idxs.numel()).type_as(
batch_idxs
)
reorder_state.view(-1, beam_size).add_(
corr.unsqueeze(-1) * beam_size
)
original_batch_idxs = original_batch_idxs[batch_idxs]
self.model.reorder_incremental_state(incremental_states, reorder_state)
encoder_outs = self.model.reorder_encoder_out(
encoder_outs, reorder_state
)
lprobs, avg_attn_scores = self.model.forward_decoder(
tokens[:, : step + 1],
encoder_outs,
incremental_states,
self.temperature,
)
if self.ctc_weight > 0 and step != 0:
# lprobs[:, self.blank] = -math.inf # never select blank
ctc_lprobs = lprobs.clone()
ctc_lprobs[:, self.blank] = -math.inf # never select blank
if self.mask != self.unk:
ctc_lprobs[:, self.mask] = -math.inf # never select mask
if self.mask_idxs.size(0) != 0:
ctc_lprobs[:, self.mask_idxs] = -math.inf # never select mask
local_best_scores, local_best_ids = torch.topk(ctc_lprobs, ctc_beam, dim=-1)
for b in range(tokens.size(0)):
hyp_key = " ".join(str(x) for x in tokens[b, : step + 1].tolist())
ctc_scores, ctc_states = ctc_prefix_score(
tokens[b, : step + 1].cpu(), local_best_ids[b].cpu(), ctc_hyps[hyp_key]["ctc_state_prev"]
)
lprobs[b] = lprobs[b]
lprobs[b, local_best_ids[b]] = (1 - self.ctc_weight) * (lprobs[b, local_best_ids[b]]) + self.ctc_weight * torch.from_numpy(
ctc_scores - ctc_hyps[hyp_key]["ctc_score_prev"]
).to(device="cuda")
for j in range(len(local_best_ids[b])):
ctc_hyps[hyp_key + " " + str(local_best_ids[b][j].item())] = {}
ctc_hyps[hyp_key + " " + str(local_best_ids[b][j].item())]["ctc_score_prev"] = ctc_scores[j]
ctc_hyps[hyp_key + " " + str(local_best_ids[b][j].item())]["ctc_state_prev"] = ctc_states[j]
# local_ctc_scores, ctc_state = ctc_scorer(
# tokens[:, : step + 1], ctc_state, part_ids
# )
# lprobs += local_ctc_scores * self.ctc_weight
elif self.ctc_weight > 0 and step == 0:
ctc_lprobs = lprobs.clone()
ctc_lprobs[:, self.blank] = -math.inf # never select blank
if self.mask != self.unk:
ctc_lprobs[:, self.mask] = -math.inf # never select mask
if self.mask_idxs.size(0) != 0:
ctc_lprobs[:, self.mask_idxs] = -math.inf # never select mask
local_best_scores, local_best_ids = torch.topk(ctc_lprobs, ctc_beam, dim=-1)
for b in range(tokens.size(0)):
hyp_key = " ".join(str(x) for x in tokens[b, : step + 1].tolist())
ctc_scores, ctc_states = ctc_prefix_score(
tokens[b, : step + 1].cpu(), local_best_ids[b].cpu(), ctc_hyps[hyp_key]["ctc_state_prev"]
)
lprobs[b] = lprobs[b]
lprobs[b, local_best_ids[b]] = (1 - self.ctc_weight) * (lprobs[b, local_best_ids[b]]) + self.ctc_weight * torch.from_numpy(
ctc_scores - ctc_hyps[hyp_key]["ctc_score_prev"]
).to(device="cuda")
for j in range(len(local_best_ids[b])):
if b == 0:
ctc_hyps[hyp_key + " " + str(local_best_ids[b][j].item())] = {}
ctc_hyps[hyp_key + " " + str(local_best_ids[b][j].item())]["ctc_score_prev"] = ctc_scores[j]
ctc_hyps[hyp_key + " " + str(local_best_ids[b][j].item())]["ctc_state_prev"] = ctc_states[j]
if self.lm_model is not None:
lm_out = self.lm_model(tokens[:, : step + 1])
probs = self.lm_model.get_normalized_probs(
lm_out, log_probs=True, sample=None
)
probs = probs[:, -1, :] * self.lm_weight
lprobs[:, :probs.size(1)] += probs
# handle prefix tokens (possibly with different lengths)
if (
prefix_tokens is not None
and step < prefix_tokens.size(1)
and step < max_len
):
lprobs, tokens, scores = self._prefix_tokens(
step, lprobs, scores, tokens, prefix_tokens, beam_size
)
elif step < self.min_len:
# minimum length constraint (does not apply if using prefix_tokens)
lprobs[:, self.eos] = -math.inf
lprobs[lprobs != lprobs] = torch.tensor(-math.inf).to(lprobs)
lprobs[:, self.pad] = -math.inf # never select pad
lprobs[:, self.unk] -= self.unk_penalty # apply unk penalty
lprobs[:, self.blank] = -math.inf # never select blank
if self.mask != self.unk:
lprobs[:, self.mask] = -math.inf # never select mask
if self.mask_idxs.size(0) != 0:
lprobs[:, self.mask_idxs] = -math.inf # never select mask
# handle max length constraint
if step >= max_len:
lprobs[:, : self.eos] = -math.inf
lprobs[:, self.eos + 1 :] = -math.inf
# Record attention scores, only support avg_attn_scores is a Tensor
if avg_attn_scores is not None:
if attn is None:
attn = torch.empty(
bsz * beam_size, avg_attn_scores.size(1), max_len + 2
).to(scores)
attn[:, :, step + 1].copy_(avg_attn_scores)
scores = scores.type_as(lprobs)
eos_bbsz_idx = torch.empty(0).to(
tokens
) # indices of hypothesis ending with eos (finished sentences)
eos_scores = torch.empty(0).to(
scores
) # scores of hypothesis ending with eos (finished sentences)
if self.should_set_src_lengths:
self.search.set_src_lengths(src_lengths)
if self.repeat_ngram_blocker is not None:
lprobs = self.repeat_ngram_blocker(tokens, lprobs, bsz, beam_size, step)
# Shape: (batch, cand_size)
cand_scores, cand_indices, cand_beams = self.search.step(
step,
lprobs.view(bsz, -1, self.vocab_size),
scores.view(bsz, beam_size, -1)[:, :, :step],
tokens[:, : step + 1],
original_batch_idxs,
)
# cand_bbsz_idx contains beam indices for the top candidate
# hypotheses, with a range of values: [0, bsz*beam_size),
# and dimensions: [bsz, cand_size]
cand_bbsz_idx = cand_beams.add(bbsz_offsets)
# finalize hypotheses that end in eos
# Shape of eos_mask: (batch size, beam size)
eos_mask = cand_indices.eq(self.eos) & cand_scores.ne(-math.inf)
eos_mask[:, :beam_size][cands_to_ignore] = torch.tensor(0).to(eos_mask)
# only consider eos when it's among the top beam_size indices
# Now we know what beam item(s) to finish
# Shape: 1d list of absolute-numbered
eos_bbsz_idx = torch.masked_select(
cand_bbsz_idx[:, :beam_size], mask=eos_mask[:, :beam_size]
)
finalized_sents: List[int] = []
if eos_bbsz_idx.numel() > 0:
eos_scores = torch.masked_select(
cand_scores[:, :beam_size], mask=eos_mask[:, :beam_size]
)
finalized_sents = self.finalize_hypos(
step,
eos_bbsz_idx,
eos_scores,
tokens,
scores,
finalized,
finished,
beam_size,
attn,
src_lengths,
max_len,
)
num_remaining_sent -= len(finalized_sents)
assert num_remaining_sent >= 0
if num_remaining_sent == 0:
break
if self.search.stop_on_max_len and step >= max_len:
break
assert step < max_len, f"{step} < {max_len}"
# Remove finalized sentences (ones for which {beam_size}
# finished hypotheses have been generated) from the batch.
if len(finalized_sents) > 0:
new_bsz = bsz - len(finalized_sents)
# construct batch_idxs which holds indices of batches to keep for the next pass
batch_mask = torch.ones(
bsz, dtype=torch.bool, device=cand_indices.device
)
batch_mask[finalized_sents] = False
# TODO replace `nonzero(as_tuple=False)` after TorchScript supports it
batch_idxs = torch.arange(
bsz, device=cand_indices.device
).masked_select(batch_mask)
# Choose the subset of the hypothesized constraints that will continue
self.search.prune_sentences(batch_idxs)
eos_mask = eos_mask[batch_idxs]
cand_beams = cand_beams[batch_idxs]
bbsz_offsets.resize_(new_bsz, 1)
cand_bbsz_idx = cand_beams.add(bbsz_offsets)
cand_scores = cand_scores[batch_idxs]
cand_indices = cand_indices[batch_idxs]
if prefix_tokens is not None:
prefix_tokens = prefix_tokens[batch_idxs]
src_lengths = src_lengths[batch_idxs]
cands_to_ignore = cands_to_ignore[batch_idxs]
scores = scores.view(bsz, -1)[batch_idxs].view(new_bsz * beam_size, -1)
tokens = tokens.view(bsz, -1)[batch_idxs].view(new_bsz * beam_size, -1)
if attn is not None:
attn = attn.view(bsz, -1)[batch_idxs].view(
new_bsz * beam_size, attn.size(1), -1
)
bsz = new_bsz
else:
batch_idxs = None
# Set active_mask so that values > cand_size indicate eos hypos
# and values < cand_size indicate candidate active hypos.
# After, the min values per row are the top candidate active hypos
# Rewrite the operator since the element wise or is not supported in torchscript.
eos_mask[:, :beam_size] = ~((~cands_to_ignore) & (~eos_mask[:, :beam_size]))
active_mask = torch.add(
eos_mask.type_as(cand_offsets) * cand_size,
cand_offsets[: eos_mask.size(1)],
)
# get the top beam_size active hypotheses, which are just
# the hypos with the smallest values in active_mask.
# {active_hypos} indicates which {beam_size} hypotheses
# from the list of {2 * beam_size} candidates were
# selected. Shapes: (batch size, beam size)
new_cands_to_ignore, active_hypos = torch.topk(
active_mask, k=beam_size, dim=1, largest=False
)
# update cands_to_ignore to ignore any finalized hypos.
cands_to_ignore = new_cands_to_ignore.ge(cand_size)[:, :beam_size]
# Make sure there is at least one active item for each sentence in the batch.
assert (~cands_to_ignore).any(dim=1).all()
# update cands_to_ignore to ignore any finalized hypos
# {active_bbsz_idx} denotes which beam number is continued for each new hypothesis (a beam
# can be selected more than once).
active_bbsz_idx = torch.gather(cand_bbsz_idx, dim=1, index=active_hypos)
active_scores = torch.gather(cand_scores, dim=1, index=active_hypos)
active_bbsz_idx = active_bbsz_idx.view(-1)
active_scores = active_scores.view(-1)
# copy tokens and scores for active hypotheses
# Set the tokens for each beam (can select the same row more than once)
tokens[:, : step + 1] = torch.index_select(
tokens[:, : step + 1], dim=0, index=active_bbsz_idx
)
# Select the next token for each of them
tokens.view(bsz, beam_size, -1)[:, :, step + 1] = torch.gather(
cand_indices, dim=1, index=active_hypos
)
if step > 0:
scores[:, :step] = torch.index_select(
scores[:, :step], dim=0, index=active_bbsz_idx
)
scores.view(bsz, beam_size, -1)[:, :, step] = torch.gather(
cand_scores, dim=1, index=active_hypos
)
# Update constraints based on which candidates were selected for the next beam
self.search.update_constraints(active_hypos)
# copy attention for active hypotheses
if attn is not None:
attn[:, :, : step + 2] = torch.index_select(
attn[:, :, : step + 2], dim=0, index=active_bbsz_idx
)
# reorder incremental state in decoder
reorder_state = active_bbsz_idx
# if self.ctc_weight > 0:
# accum_best_id = torch.gather(cand_indices, dim=1, index=active_hypos)
# ctc_state = ctc_scorer.index_select_state(
# ctc_state, accum_best_id
# )
# sort by score descending
for sent in range(len(finalized)):
scores = torch.tensor(
[float(elem["score"].item()) for elem in finalized[sent]]
)
_, sorted_scores_indices = torch.sort(scores, descending=True)
finalized[sent] = [finalized[sent][ssi] for ssi in sorted_scores_indices]
finalized[sent] = torch.jit.annotate(
List[Dict[str, Tensor]], finalized[sent]
)
return finalized
def _prefix_tokens(
self, step: int, lprobs, scores, tokens, prefix_tokens, beam_size: int
):
"""Handle prefix tokens"""
prefix_toks = prefix_tokens[:, step].unsqueeze(-1).repeat(1, beam_size).view(-1)
prefix_lprobs = lprobs.gather(-1, prefix_toks.unsqueeze(-1))
prefix_mask = prefix_toks.ne(self.pad)
lprobs[prefix_mask] = torch.min(prefix_lprobs) - 1
lprobs[prefix_mask] = lprobs[prefix_mask].scatter(
-1, prefix_toks[prefix_mask].unsqueeze(-1), prefix_lprobs[prefix_mask]
)
# if prefix includes eos, then we should make sure tokens and
# scores are the same across all beams
eos_mask = prefix_toks.eq(self.eos)
if eos_mask.any():
# validate that the first beam matches the prefix
first_beam = tokens[eos_mask].view(-1, beam_size, tokens.size(-1))[
:, 0, 1 : step + 1
]
eos_mask_batch_dim = eos_mask.view(-1, beam_size)[:, 0]
target_prefix = prefix_tokens[eos_mask_batch_dim][:, :step]
assert (first_beam == target_prefix).all()
# copy tokens, scores and lprobs from the first beam to all beams
tokens = self.replicate_first_beam(tokens, eos_mask_batch_dim, beam_size)
scores = self.replicate_first_beam(scores, eos_mask_batch_dim, beam_size)
lprobs = self.replicate_first_beam(lprobs, eos_mask_batch_dim, beam_size)
return lprobs, tokens, scores
def replicate_first_beam(self, tensor, mask, beam_size: int):
tensor = tensor.view(-1, beam_size, tensor.size(-1))
tensor[mask] = tensor[mask][:, :1, :]
return tensor.view(-1, tensor.size(-1))
def finalize_hypos(
self,
step: int,
bbsz_idx,
eos_scores,
tokens,
scores,
finalized: List[List[Dict[str, Tensor]]],
finished: List[bool],
beam_size: int,
attn: Optional[Tensor],
src_lengths,
max_len: int,
):
"""Finalize hypothesis, store finalized information in `finalized`, and change `finished` accordingly.
A sentence is finalized when {beam_size} finished items have been collected for it.
Returns number of sentences (not beam items) being finalized.
These will be removed from the batch and not processed further.
Args:
bbsz_idx (Tensor):
"""
assert bbsz_idx.numel() == eos_scores.numel()
# clone relevant token and attention tensors.
# tokens is (batch * beam, max_len). So the index_select
# gets the newly EOS rows, then selects cols 1..{step + 2}
tokens_clone = tokens.index_select(0, bbsz_idx)[
:, 1 : step + 2
] # skip the first index, which is EOS
tokens_clone[:, step] = self.eos
attn_clone = (
attn.index_select(0, bbsz_idx)[:, :, 1 : step + 2]
if attn is not None
else None
)
# compute scores per token position
pos_scores = scores.index_select(0, bbsz_idx)[:, : step + 1]
pos_scores[:, step] = eos_scores
# convert from cumulative to per-position scores
pos_scores[:, 1:] = pos_scores[:, 1:] - pos_scores[:, :-1]
# normalize sentence-level scores
if self.normalize_scores:
eos_scores /= (step + 1) ** self.len_penalty
# cum_unfin records which sentences in the batch are finished.
# It helps match indexing between (a) the original sentences
# in the batch and (b) the current, possibly-reduced set of
# sentences.
cum_unfin: List[int] = []
prev = 0
for f in finished:
if f:
prev += 1
else:
cum_unfin.append(prev)
cum_fin_tensor = torch.tensor(cum_unfin, dtype=torch.int).to(bbsz_idx)
unfin_idx = bbsz_idx // beam_size
sent = unfin_idx + torch.index_select(cum_fin_tensor, 0, unfin_idx)
# Create a set of "{sent}{unfin_idx}", where
# "unfin_idx" is the index in the current (possibly reduced)
# list of sentences, and "sent" is the index in the original,
# unreduced batch
# For every finished beam item
# sentence index in the current (possibly reduced) batch
seen = (sent << 32) + unfin_idx
unique_seen: List[int] = torch.unique(seen).tolist()
if self.match_source_len:
condition = step > torch.index_select(src_lengths, 0, unfin_idx)
eos_scores = torch.where(condition, torch.tensor(-math.inf), eos_scores)
sent_list: List[int] = sent.tolist()
for i in range(bbsz_idx.size()[0]):
# An input sentence (among those in a batch) is finished when
# beam_size hypotheses have been collected for it
if len(finalized[sent_list[i]]) < beam_size:
if attn_clone is not None:
# remove padding tokens from attn scores
hypo_attn = attn_clone[i]
else:
hypo_attn = torch.empty(0)
finalized[sent_list[i]].append(
{
"tokens": tokens_clone[i],
"score": eos_scores[i],
"attention": hypo_attn, # src_len x tgt_len
"alignment": torch.empty(0),
"positional_scores": pos_scores[i],
}
)
newly_finished: List[int] = []
for unique_s in unique_seen:
# check termination conditions for this sentence
unique_sent: int = unique_s >> 32
unique_unfin_idx: int = unique_s - (unique_sent << 32)
if not finished[unique_sent] and self.is_finished(
step, unique_unfin_idx, max_len, len(finalized[unique_sent]), beam_size
):
finished[unique_sent] = True
newly_finished.append(unique_unfin_idx)
return newly_finished
def is_finished(
self,
step: int,
unfin_idx: int,
max_len: int,
finalized_sent_len: int,
beam_size: int,
):
"""
Check whether decoding for a sentence is finished, which
occurs when the list of finalized sentences has reached the
beam size, or when we reach the maximum length.
"""
assert finalized_sent_len <= beam_size
if finalized_sent_len == beam_size or step == max_len:
return True
return False
class EnsembleModel(nn.Module):
"""A wrapper around an ensemble of models."""
def __init__(self, models):
super().__init__()
self.models_size = len(models)
# method '__len__' is not supported in ModuleList for torch script
self.single_model = models[0]
self.models = nn.ModuleList(models)
self.has_incremental: bool = False
if all(
hasattr(m, "decoder") and isinstance(m.decoder, FairseqIncrementalDecoder)
for m in models
):
self.has_incremental = True
def forward(self):
pass
def has_encoder(self):
return hasattr(self.single_model, "encoder")
def is_t5_structure(self):
t5_structure = hasattr(self.single_model, "text_encoder_prenet") and hasattr(self.single_model, "speech_encoder_prenet") or \
hasattr(self.single_model, "encoder_prenet") and hasattr(self.single_model, "encoder_prenet")
return t5_structure
def has_incremental_states(self):
return self.has_incremental
def max_decoder_positions(self):
return min([m.max_decoder_positions() for m in self.models if hasattr(m, "max_decoder_positions")] + [sys.maxsize])
@torch.jit.export
def forward_encoder(self, net_input: Dict[str, Tensor]):
if not self.has_encoder():
return None
elif self.is_t5_structure():
return [model.forward_encoder_torchscript(net_input) for model in self.models]
else:
return [model.encoder.forward_torchscript(net_input) for model in self.models]
@torch.jit.export
def forward_decoder(
self,
tokens,
encoder_outs: List[Dict[str, List[Tensor]]],
incremental_states: List[Dict[str, Dict[str, Optional[Tensor]]]],
temperature: float = 1.0,
):
log_probs = []
avg_attn: Optional[Tensor] = None
encoder_out: Optional[Dict[str, List[Tensor]]] = None
for i, model in enumerate(self.models):
if self.has_encoder():
encoder_out = encoder_outs[i]
# decode each model
if self.has_incremental_states():
if self.is_t5_structure:
decoder_out = model.forward_decoder(
tokens,
encoder_out=encoder_out,
incremental_state=incremental_states[i]
)
else:
decoder_out = model.decoder.forward(
tokens,
encoder_out=encoder_out,
incremental_state=incremental_states[i],
)
else:
if hasattr(model, "decoder"):
decoder_out = model.decoder.forward(tokens, encoder_out=encoder_out)
else:
decoder_out = model.forward(tokens)
attn: Optional[Tensor] = None
decoder_len = len(decoder_out)
if decoder_len > 1 and decoder_out[1] is not None:
if isinstance(decoder_out[1], Tensor):
attn = decoder_out[1]
else:
attn_holder = decoder_out[1]["attn"]
if isinstance(attn_holder, Tensor):
attn = attn_holder
elif attn_holder is not None:
attn = attn_holder[0]
if attn is not None:
attn = attn[:, -1, :]
decoder_out_tuple = (
decoder_out[0][:, -1:, :].div_(temperature),
None if decoder_len <= 1 else decoder_out[1],
)
probs = model.get_normalized_probs(
decoder_out_tuple, log_probs=True, sample=None
)
probs = probs[:, -1, :]
if self.models_size == 1:
return probs, attn
log_probs.append(probs)
if attn is not None:
if avg_attn is None:
avg_attn = attn
else:
avg_attn.add_(attn)
avg_probs = torch.logsumexp(torch.stack(log_probs, dim=0), dim=0) - math.log(
self.models_size
)
if avg_attn is not None:
avg_attn.div_(self.models_size)
return avg_probs, avg_attn
@torch.jit.export
def reorder_encoder_out(
self, encoder_outs: Optional[List[Dict[str, List[Tensor]]]], new_order
):
"""
Reorder encoder output according to *new_order*.
Args:
encoder_out: output from the ``forward()`` method
new_order (LongTensor): desired order
Returns:
*encoder_out* rearranged according to *new_order*
"""
new_outs: List[Dict[str, List[Tensor]]] = []
if not self.has_encoder():
return new_outs
for i, model in enumerate(self.models):
assert encoder_outs is not None
new_outs.append(
model.encoder.reorder_encoder_out(encoder_outs[i], new_order)
)
return new_outs
@torch.jit.export
def reorder_incremental_state(
self,
incremental_states: List[Dict[str, Dict[str, Optional[Tensor]]]],
new_order,
):
if not self.has_incremental_states():
return
for i, model in enumerate(self.models):
model.decoder.reorder_incremental_state_scripting(
incremental_states[i], new_order
)
class SequenceGeneratorWithAlignment(SequenceGenerator):
def __init__(
self, models, tgt_dict, left_pad_target=False, print_alignment="hard", **kwargs
):
"""Generates translations of a given source sentence.
Produces alignments following "Jointly Learning to Align and
Translate with Transformer Models" (Garg et al., EMNLP 2019).
Args:
left_pad_target (bool, optional): Whether or not the
hypothesis should be left padded or not when they are
teacher forced for generating alignments.
"""
super().__init__(EnsembleModelWithAlignment(models), tgt_dict, **kwargs)
self.left_pad_target = left_pad_target
if print_alignment == "hard":
self.extract_alignment = utils.extract_hard_alignment
elif print_alignment == "soft":
self.extract_alignment = utils.extract_soft_alignment
@torch.no_grad()
def generate(self, models, sample, **kwargs):
finalized = super()._generate(sample, **kwargs)
src_tokens = sample["net_input"]["src_tokens"]
bsz = src_tokens.shape[0]
beam_size = self.beam_size
(
src_tokens,
src_lengths,
prev_output_tokens,
tgt_tokens,
) = self._prepare_batch_for_alignment(sample, finalized)
if any(getattr(m, "full_context_alignment", False) for m in self.model.models):
attn = self.model.forward_align(src_tokens, src_lengths, prev_output_tokens)
else:
attn = [
finalized[i // beam_size][i % beam_size]["attention"].transpose(1, 0)
for i in range(bsz * beam_size)
]
if src_tokens.device != "cpu":
src_tokens = src_tokens.to("cpu")
tgt_tokens = tgt_tokens.to("cpu")
attn = [i.to("cpu") for i in attn]
# Process the attn matrix to extract hard alignments.
for i in range(bsz * beam_size):
alignment = self.extract_alignment(
attn[i], src_tokens[i], tgt_tokens[i], self.pad, self.eos
)
finalized[i // beam_size][i % beam_size]["alignment"] = alignment
return finalized
def _prepare_batch_for_alignment(self, sample, hypothesis):
src_tokens = sample["net_input"]["src_tokens"]
bsz = src_tokens.shape[0]
src_tokens = (
src_tokens[:, None, :]
.expand(-1, self.beam_size, -1)
.contiguous()
.view(bsz * self.beam_size, -1)
)
src_lengths = sample["net_input"]["src_lengths"]
src_lengths = (
src_lengths[:, None]
.expand(-1, self.beam_size)
.contiguous()
.view(bsz * self.beam_size)
)
prev_output_tokens = data_utils.collate_tokens(
[beam["tokens"] for example in hypothesis for beam in example],
self.pad,
self.eos,
self.left_pad_target,
move_eos_to_beginning=True,
)
tgt_tokens = data_utils.collate_tokens(
[beam["tokens"] for example in hypothesis for beam in example],
self.pad,
self.eos,
self.left_pad_target,
move_eos_to_beginning=False,
)
return src_tokens, src_lengths, prev_output_tokens, tgt_tokens
class EnsembleModelWithAlignment(EnsembleModel):
"""A wrapper around an ensemble of models."""
def __init__(self, models):
super().__init__(models)
def forward_align(self, src_tokens, src_lengths, prev_output_tokens):
avg_attn = None
for model in self.models:
decoder_out = model(src_tokens, src_lengths, prev_output_tokens)
attn = decoder_out[1]["attn"][0]
if avg_attn is None:
avg_attn = attn
else:
avg_attn.add_(attn)
if len(self.models) > 1:
avg_attn.div_(len(self.models))
return avg_attn
| EXA-1-master | exa/models/unilm-master/speecht5/speecht5/sequence_generator.py |
EXA-1-master | exa/models/unilm-master/speecht5/speecht5/tasks/__init__.py |
|
# --------------------------------------------------------
# SpeechT5: Unified-Modal Encoder-Decoder Pre-Training for Spoken Language Processing (https://arxiv.org/abs/2110.07205)
# Github source: https://github.com/microsoft/SpeechT5/tree/main/SpeechT5
# Copyright (c) 2021 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Based on fairseq and espnet code bases
# https://github.com/pytorch/fairseq; https://github.com/espnet/espnet
# --------------------------------------------------------
import logging
import os.path as op
from argparse import Namespace
from collections import OrderedDict
import torch
from fairseq.data import (
Dictionary,
encoders,
PrependTokenDataset,
AppendTokenDataset,
data_utils,
StripTokenDataset,
TokenBlockDataset,
)
from fairseq.data.encoders.utils import get_whole_word_mask
from fairseq import utils
from speecht5.data.multitask_dataset import MultitaskDataset
from speecht5.data.speech_to_text_dataset import SpeechToTextDataset
from speecht5.data.text_to_speech_dataset import TextToSpeechDataset
from speecht5.data.speech_to_speech_dataset import SpeechToSpeechDataset
from speecht5.data.speech_dataset import SpeechPretrainDataset
from speecht5.data.text_dataset import TextPretrainDataset
from fairseq.data.shorten_dataset import maybe_shorten_dataset
from fairseq.tasks import LegacyFairseqTask, register_task
from fairseq.tasks.hubert_pretraining import LabelEncoder
logger = logging.getLogger(__name__)
TASK_NAME = ["s2t", "t2s", "s2s", "s2c", "pretrain"]
@register_task("speecht5")
class SpeechT5Task(LegacyFairseqTask):
@staticmethod
def add_args(parser):
parser.add_argument("data", help="manifest root path")
parser.add_argument(
"--config-yaml",
type=str,
default="config.yaml",
help="Configuration YAML filename (under manifest root)",
)
parser.add_argument(
"--max-speech-sample-size",
default=None,
type=int,
metavar="N",
help="max speech sample size",
)
parser.add_argument(
"--min-speech-sample-size",
default=None,
type=int,
metavar="N",
help="min speech sample size",
)
parser.add_argument(
"--max-speech-positions",
default=4000,
type=int,
metavar="N",
help="max number of tokens in the source sequence",
)
parser.add_argument(
"--max-text-positions",
default=450,
type=int,
metavar="N",
help="max number of tokens in the target sequence",
)
parser.add_argument(
'--t5-task',
choices=TASK_NAME,
help='task for training'
)
parser.add_argument(
"--bpe-tokenizer",
type=str,
default=None,
help="bpe tokenizer for s2t",
)
# Speaker Identification (SID)
parser.add_argument(
"--finetune-from-modules",
default=None,
# choices=[
# "encoder-decoder", "encoder", "decoder",
# "speech_encoder_prenet-encoder-decoder-text_decoder_prenet-text_decoder_postnet", # ASR, T5 SID
# "speech_encoder_prenet-encoder-decoder-text_decoder_prenet-speaker_decoder_postnet", # SID
# "speech_encoder_prenet-encoder-decoder-speech_decoder_prenet-speech_decoder_postnet", # VC, SE
# "text_encoder_prenet-encoder-decoder-speech_decoder_prenet-speech_decoder_postnet", # TTS
# ],
help="If set, using part modules of finetune model.",
)
parser.add_argument(
"--finetune-out-of-modules",
default=None,
# choices=[
# "speaker_decoder_postnet", # SID
# "speech_decoder_postnet", # SE with reduction factor 1
# ],
help="If set, remove part modules of finetune model.",
)
# BART
parser.add_argument(
"--shorten-method",
default="none",
choices=["none", "truncate", "random_crop"],
help="if not none, shorten sequences that exceed --tokens-per-sample",
)
parser.add_argument(
"--shorten-data-split-list",
default="",
help="comma-separated list of dataset splits to apply shortening to, "
'e.g., "train,valid" (default: all dataset splits)',
)
parser.add_argument(
"--tokens-per-sample",
default=512,
type=int,
help="max number of total tokens over all segments"
" per sample for dataset",
)
parser.add_argument(
"--sample-break-mode",
default="eos",
type=str,
help="mode for breaking sentence",
)
parser.add_argument(
"--mask",
default=0.3,
type=float,
help="fraction of words/subwords that will be masked",
)
parser.add_argument(
"--mask-random",
default=0.1,
type=float,
help="instead of using [MASK], use random token this often",
)
parser.add_argument(
"--insert",
default=0.0,
type=float,
help="insert this percentage of additional random tokens",
)
parser.add_argument(
"--permute",
default=0.0,
type=float,
help="take this proportion of subwords and permute them",
)
parser.add_argument(
"--rotate",
default=0.0,
type=float,
help="rotate this proportion of inputs",
)
parser.add_argument(
"--poisson-lambda",
default=3.5,
type=float,
help="randomly shuffle sentences for this proportion of inputs",
)
parser.add_argument(
"--permute-sentences",
default=0.0,
type=float,
help="shuffle this proportion of sentences in all inputs",
)
parser.add_argument(
"--mask-length",
default="span-poisson",
type=str,
choices=["subword", "word", "span-poisson"],
help="mask length to choose",
)
parser.add_argument(
"--replace-length",
default=1,
type=int,
help="when masking N tokens, replace with 0, 1, or N tokens (use -1 for N)",
)
parser.add_argument(
"--iid-noise-target",
action="store_true",
help="whether to use t5 form target",
)
# Hubert
parser.add_argument(
"--hubert-labels",
nargs="*",
type=str,
default=['km'],
help="extension of the label files to load, frame-level labels for pre-training, and sequence-level label for fine-tuning",
)
parser.add_argument(
"--hubert-label-dir",
type=str,
default=None,
help="if set, looks for labels in this directory instead",
)
parser.add_argument(
"--sample-rate",
default=100,
type=float,
help="target sample rate. audio files will be up/down sampled to this rate",
)
parser.add_argument(
"--label-rates",
default=-1,
type=float,
help="if set, looks for labels in this directory instead",
)
parser.add_argument(
"--normalize",
action="store_true",
help="if set, normalizes input to have 0 mean and unit variance",
)
parser.add_argument(
"--enable-padding",
action="store_true",
help="pad shorter samples instead of cropping",
)
parser.add_argument(
"--pad-audio",
action="store_true",
help="pad audio to the longest one in the batch if true",
)
parser.add_argument(
"--random-crop",
action="store_true",
help="always crop from the beginning if false",
)
parser.add_argument(
"--single-target",
action="store_true",
help="if set, AddTargetDatasets outputs same keys "
"as AddTargetDataset",
)
parser.add_argument(
"--batch-ratio",
default=None,
type=str,
help="ratio of bach size for each dataset",
)
parser.add_argument(
"--sample-ratios",
default=None,
type=str,
help="ratio of sample for each dataset",
)
parser.add_argument(
"--ctc-weight",
type=float,
default=0.0,
help="ctc weight for inference",
)
def __init__(self, args, dicts, config):
super().__init__(args)
self.dicts = dicts
self.config = config
self.t5_task = args.t5_task
# Used for filter size
if self.t5_task in ['s2t', 't2s', 's2s']:
self.max_pos = [self.args.max_speech_positions * 256]
elif self.t5_task == 'pretrain':
self.max_pos = [self.args.max_speech_positions * 256, self.args.max_text_positions]
self.mask_idx = self.dicts["text"].add_symbol("<mask>")
# add blank token for ctc
# if args.ctc_weight > 0:
self.blank_symbol_idx = self.dicts["text"].add_symbol("<ctc_blank>")
self.blank_symbol = "<ctc_blank>"
# add mask token
if hasattr(args, "iid_noise_target") and args.iid_noise_target:
self.uni_mask_idxs = []
for i in range(600):
self.uni_mask_idxs.append(self.dicts["text"].add_symbol("<mask>" + str(i)))
self.uni_mask_idxs = torch.tensor(self.uni_mask_idxs)
self.seed = args.seed
@classmethod
def setup_task(cls, args, **kwargs):
# load dictionaries and config
dicts = OrderedDict()
if args.t5_task == 'pretrain' and not hasattr(args, "shuffle_instance"):
args.shuffle_instance = False
# Prepare config
config = None
logger.info('No config file for ' + args.t5_task)
if args.t5_task == "pretrain":
dicts["hubert"] = [Dictionary.load(f"{args.hubert_label_dir}/dict.{label}.txt") for label in args.hubert_labels]
dicts["text"] = Dictionary.load(op.join(args.data, "dict.txt"))
else:
if config is None:
dicts["text"] = Dictionary.load(op.join(args.data, "dict.txt"))
else:
dicts["text"] = Dictionary.load(op.join(args.data, config.vocab_filename))
return cls(args, dicts, config)
def build_criterion(self, args):
from fairseq import criterions
return criterions.build_criterion(args, self)
def load_dataset(self, split, epoch=1, combine=False, **kwargs):
sample_ratios = []
if self.t5_task == "s2t":
## For speech to text task
bpe_tokenizer = self.build_bpe(self.args)
manifest = f"{self.args.data}/{split}.tsv"
procs = [LabelEncoder(self.dicts["text"])]
paths = [f"{self.args.hubert_label_dir}/{split}.txt"]
self.datasets[split] = SpeechToTextDataset(
manifest,
sample_rate=self.args.sample_rate,
label_paths=paths,
label_processors=procs,
max_keep_sample_size=self.max_pos[0] if self.args.max_speech_sample_size is None else self.args.max_speech_sample_size,
min_keep_sample_size=self.args.min_speech_sample_size,
normalize=self.args.normalize,
store_labels=False,
tgt_dict=self.dicts["text"],
tokenizer=bpe_tokenizer,
)
elif self.t5_task == "t2s":
## For text to speech task
from fairseq.data import ConcatDataset
bpe_tokenizer = self.build_bpe(self.args)
procs = [LabelEncoder(self.dicts["text"])]
t2s_datasets = [
TextToSpeechDataset(
manifest_path=f"{self.args.data}/{name}.tsv",
sample_rate=self.args.sample_rate,
label_paths=[f"{self.args.hubert_label_dir}/{name}.txt"],
label_processors=procs,
max_keep_sample_size=self.max_pos[0],
normalize=self.args.normalize,
store_labels=False,
src_dict=self.dicts["text"],
tokenizer=bpe_tokenizer,
reduction_factor=self.args.reduction_factor,
)
for name in split.split(",")
]
self.datasets[split] = ConcatDataset(t2s_datasets) if len(t2s_datasets) > 1 else t2s_datasets[0]
elif self.t5_task == "s2s":
manifest = f"{self.args.data}/{split}.tsv"
self.datasets[split] = SpeechToSpeechDataset(
manifest_path=manifest,
sample_rate=self.args.sample_rate,
max_keep_sample_size=self.max_pos[0] if self.args.max_speech_sample_size is None else self.args.max_speech_sample_size,
min_keep_sample_size=self.args.min_speech_sample_size,
normalize=self.args.normalize,
reduction_factor=self.args.reduction_factor,
)
elif self.t5_task == "pretrain":
is_train_split = ("train" in split)
pretrain_datasets = []
speech_split, text_split = split.split('|')
## Speech pre-train
manifest = f"{self.args.data}/{speech_split}.tsv"
dicts = self.dicts["hubert"]
pad_list = [dict.pad() for dict in dicts]
eos_list = [dict.eos() for dict in dicts]
procs = [LabelEncoder(dict) for dict in dicts]
paths = [
f"{self.args.hubert_label_dir}/{speech_split}.{l}" for l in self.args.hubert_labels
]
# hubert v1: pad_audio=True, random_crop=False;
self.args.dec_weight = getattr(self.args, "dec_weight", 1.0)
pretrain_datasets.append(
SpeechPretrainDataset(
manifest,
sample_rate=self.args.sample_rate,
label_paths=paths,
label_rates=self.args.label_rates,
pad_list=pad_list,
eos_list=eos_list,
label_processors=procs,
max_keep_sample_size=None,
min_keep_sample_size=32000,
max_sample_size=self.args.max_speech_sample_size,
pad_audio=self.args.pad_audio,
normalize=self.args.normalize,
store_labels=False,
random_crop=self.args.random_crop,
single_target=self.args.single_target,
reduction_factor=self.args.reduction_factor,
)
)
sample_ratios.append(sum([pretrain_datasets[0].size(i) for i in range(len(pretrain_datasets[0]))]))
## Text pre-train
paths = utils.split_paths(self.args.data)
assert len(paths) > 0
data_path = paths[(epoch - 1) % len(paths)]
split_path = op.join(data_path, text_split)
bart_dataset = data_utils.load_indexed_dataset(
split_path,
self.dicts["text"],
self.args.dataset_impl,
combine=combine,
)
if bart_dataset is None:
raise FileNotFoundError(
"Dataset not found: {} ({})".format(text_split, split_path)
)
bart_dataset = StripTokenDataset(bart_dataset, self.dicts["text"].eos())
bart_dataset = maybe_shorten_dataset(
bart_dataset,
text_split,
self.args.shorten_data_split_list,
self.args.shorten_method,
self.args.tokens_per_sample,
self.args.seed,
)
# create continuous blocks of tokens
bart_dataset = TokenBlockDataset(
bart_dataset,
bart_dataset.sizes,
self.args.tokens_per_sample - 2, # one less for <s> and one for </s>
pad=self.dicts["text"].pad(),
eos=self.dicts["text"].eos(),
break_mode=self.args.sample_break_mode,
document_sep_len=0,
)
# prepend beginning-of-sentence token (<s>, equiv. to [CLS] in BERT)
bart_dataset = PrependTokenDataset(bart_dataset, self.dicts["text"].bos())
bart_dataset = AppendTokenDataset(bart_dataset, self.dicts["text"].eos())
mask_whole_words = (
get_whole_word_mask(self.args, self.dicts["text"])
if self.args.mask_length != "subword"
else None
)
self.args.bert_weight = getattr(self.args, "bert_weight", 0.0)
pretrain_datasets.append(
TextPretrainDataset(
bart_dataset,
bart_dataset.sizes,
self.dicts["text"],
self.mask_idx,
mask_whole_words,
shuffle=self.args.shuffle_instance,
seed=self.seed,
args=self.args,
iid_noise_target=self.args.iid_noise_target,
uni_mask_idxs=self.uni_mask_idxs if self.args.iid_noise_target else None,
)
)
sample_ratios.append(sum(pretrain_datasets[1].sizes))
logger.info(
"Task: {0}, Loaded {1} samples of denoising_dataset".format(
'bart',
len(pretrain_datasets[1]),
)
)
logger.info('token ratio is ' + str(sample_ratios))
if self.args.batch_ratio is not None:
batch_ratio = eval(self.args.batch_ratio)
assert len(batch_ratio) == len(sample_ratios)
sample_ratios = [sample_ratios[i] / batch_ratio[i] for i in range(len(sample_ratios))]
else:
batch_ratio = None
max_size = max(sample_ratios)
sample_ratios = [max_size / r for r in sample_ratios]
if hasattr(self.args, "sample_ratios") and self.args.sample_ratios is not None:
sample_ratios = eval(self.args.sample_ratios)
if is_train_split:
self.datasets[split] = MultitaskDataset(
pretrain_datasets, sample_ratios, batch_ratio
)
else:
self.datasets[split] = MultitaskDataset(
pretrain_datasets, batch_ratio=batch_ratio
)
def train_step(
self, sample, model, criterion, optimizer, update_num, ignore_grad=False
):
model.train()
model.set_num_updates(update_num)
# Junyi: not use sample_size, but normalize the loss locally
agg_loss, agg_sample_size, agg_logging_output = 0.0, 1.0, {}
agg_logging_output['sample_size'] = 1
def forward_backward(model, samples, weight=1.0):
nonlocal agg_loss, agg_logging_output
if samples is None or len(samples) == 0:
return
loss, sample_size, logging_output = criterion(model, samples)
if ignore_grad:
loss *= 0
else:
loss *= weight
loss = loss / sample_size
optimizer.backward(loss)
agg_loss += loss.detach().item()
# # TODO make summing of the sample sizes configurable
for k in logging_output:
if k == 'ntokens' or k == 'nsentences':
if k not in agg_logging_output:
agg_logging_output[k] = 0
agg_logging_output[k] += logging_output[k]
# continue
# agg_logging_output[k] += logging_output[k]
# agg_logging_output[task_name] += logging_output[k]
agg_logging_output[samples['task_name']] = logging_output
forward_backward(model, sample)
agg_logging_output["loss"] = agg_loss
return agg_loss, agg_sample_size, agg_logging_output
def valid_step(self, sample, model, criterion):
model.eval()
with torch.no_grad():
from collections import defaultdict
agg_loss, agg_sample_size, agg_logging_output = 0.0, 1.0, defaultdict(float)
agg_logging_output['sample_size'] = 1
loss, sample_size, logging_output = criterion(model, sample)
loss = loss / sample_size
# agg_loss += loss.data.item() if isinstance(loss, torch.Tensor) else loss
agg_loss += loss.item() if isinstance(loss, torch.Tensor) else loss
agg_logging_output[sample['task_name']] = logging_output
agg_logging_output["loss"] = agg_loss
return agg_loss, agg_sample_size, agg_logging_output
@property
def target_dictionary(self):
return self.dicts["text"]
@property
def source_dictionary(self):
return None
def build_model(self, args):
try:
args.input_feat_per_channel = self.config.input_feat_per_channel
args.input_channels = self.config.input_channels
except Exception as e:
args.input_feat_per_channel = 80
args.input_channels = 1
logger.info(f"Cannot set input_feat_per_channel, input_channels, since: ")
logger.warn(e)
logger.info(f"Set to: {args.input_feat_per_channel} and {args.input_channels}")
args.speech_odim = args.input_feat_per_channel * args.input_channels
args.label_rates = self.args.label_rates
args.sample_rate = self.args.sample_rate
self.args.reduction_factor = args.reduction_factor
return super(SpeechT5Task, self).build_model(args)
def build_generator(
self,
models,
args,
seq_gen_cls=None,
extra_gen_cls_kwargs=None,
):
from speecht5.sequence_generator import SequenceGenerator
extra_gen_cls_kwargs = {
"ctc_weight": self.args.ctc_weight,
**extra_gen_cls_kwargs
}
return super().build_generator(
models, args, seq_gen_cls=SequenceGenerator, extra_gen_cls_kwargs=extra_gen_cls_kwargs
)
def build_tokenizer(self, args):
if self.config is None:
logger.info(f"pre-tokenizer: None")
return encoders.build_tokenizer(Namespace(**{"tokenizer": None}))
else:
logger.info(f"pre-tokenizer: {self.config.pre_tokenizer}")
return encoders.build_tokenizer(Namespace(**self.config.pre_tokenizer))
def build_bpe(self, args):
if self.config is not None:
logger.info(f"tokenizer: {self.config.bpe_tokenizer}")
return encoders.build_bpe(Namespace(**self.config.bpe_tokenizer))
else:
logger.info(f"tokenizer: {self.args.bpe_tokenizer}")
return encoders.build_bpe(Namespace(**{"bpe": "sentencepiece", "sentencepiece_model": self.args.bpe_tokenizer}))
def generate_speech(self, models, net_input, **kwargs):
with torch.no_grad():
encoder_input = {
k: v for k, v in net_input.items() if k != "prev_output_tokens" and k != "task_name"
}
encoder_input.update(kwargs)
return models[0].generate_speech(**encoder_input)
def inference_t2s(
self, models, sample
):
with torch.no_grad():
xs = sample['net_input']['src_tokens']
spkemb = sample['net_input']['spkembs']
return models[0].inference(xs, spkemb)
def inference_s2s(
self, models, sample, force_equal_length=False
):
with torch.no_grad():
x = sample['net_input']['src_tokens']
xlen = sample['net_input']['src_lengths']
spkemb = sample['net_input']['spkembs']
prev_output_tokens = sample['net_input']['prev_output_tokens']
padding_mask = sample['net_input']['padding_mask']
tgt_lengths = sample['net_input']['tgt_lengths']
return models[0].inference_s2s(x, xlen, spkemb, prev_output_tokens, tgt_lengths, force_equal_length=force_equal_length, padding_mask=padding_mask)
def inference_s2c(
self, models, sample
):
with torch.no_grad():
x = sample['net_input']['src_tokens']
xlen = sample['net_input']['src_lengths']
prev_output_tokens = sample['net_input']['prev_output_tokens']
padding_mask = sample['net_input']['padding_mask']
assert prev_output_tokens.size(1) == 1, prev_output_tokens.size()
return models[0].inference_s2c(x, xlen, prev_output_tokens, padding_mask=padding_mask)
def filter_indices_by_size(
self, indices, dataset, max_positions=None, ignore_invalid_inputs=False
):
"""
Filter examples that are too large
Args:
indices (np.array): original array of sample indices
dataset (~fairseq.data.FairseqDataset): dataset to batch
max_positions (optional): max sentence length supported by the
model (default: None).
ignore_invalid_inputs (bool, optional): don't raise Exception for
sentences that are too long (default: False).
Returns:
np.array: array of filtered sample indices
"""
indices, ignored = dataset.filter_indices_by_size(
indices,
self.max_pos
)
return indices
| EXA-1-master | exa/models/unilm-master/speecht5/speecht5/tasks/speecht5.py |
# --------------------------------------------------------
# SpeechT5: Unified-Modal Encoder-Decoder Pre-Training for Spoken Language Processing (https://arxiv.org/abs/2110.07205)
# Github source: https://github.com/microsoft/SpeechT5/tree/main/SpeechT5
# Copyright (c) 2021 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Based on fairseq and espnet code bases
# https://github.com/pytorch/fairseq; https://github.com/espnet/espnet
# --------------------------------------------------------
from fairseq.models import (
register_model_architecture,
)
from fairseq.models.transformer_lm import base_lm_architecture
@register_model_architecture(model_name="transformer_lm", arch_name="transformer_lm_t5")
def transformer_lm_t5(args):
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 1280)
args.decoder_ffn_embed_dim = getattr(args, "decoder_ffn_embed_dim", 6144)
args.decoder_layers = getattr(args, "decoder_layers", 20)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 16)
args.dropout = getattr(args, "dropout", 0.1)
args.attention_dropout = getattr(args, "attention_dropout", 0.1)
args.activation_fn = getattr(args, "activation_fn", "gelu")
base_lm_architecture(args)
| EXA-1-master | exa/models/unilm-master/speecht5/speecht5/models/t5_transformer_lm.py |
from .speecht5 import * # noqa
from .t5_transformer_lm import * # noqa
| EXA-1-master | exa/models/unilm-master/speecht5/speecht5/models/__init__.py |
# --------------------------------------------------------
# SpeechT5: Unified-Modal Encoder-Decoder Pre-Training for Spoken Language Processing (https://arxiv.org/abs/2110.07205)
# Github source: https://github.com/microsoft/SpeechT5/tree/main/SpeechT5
# Copyright (c) 2021 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Based on fairseq and espnet code bases
# https://github.com/pytorch/fairseq; https://github.com/espnet/espnet
# --------------------------------------------------------
import logging
from ast import literal_eval
from typing import Dict, List, Optional, Tuple
import torch
import torch.nn.functional as F
from fairseq import utils
from fairseq.models import (
FairseqEncoderDecoderModel,
FairseqIncrementalDecoder,
register_model,
register_model_architecture,
)
from .modules.text_encoder_prenet import TextEncoderPrenet
from .modules.text_decoder_prenet import TextDecoderPrenet
from .modules.text_decoder_postnet import TextDecoderPostnet
from .modules.speech_encoder_prenet import SpeechEncoderPrenet
from .modules.speech_encoder_postnet import SpeechEncoderPostnet
from .modules.speech_decoder_prenet import SpeechDecoderPrenet
from .modules.speech_decoder_postnet import SpeechDecoderPostnet
from .modules.speaker_decoder_postnet import SpeakerDecoderPostnet
from .modules.encoder import TransformerEncoder
from .modules.decoder import TransformerDecoder
from fairseq.modules.transformer_sentence_encoder import init_bert_params
from fairseq.models.transformer import Embedding
from fairseq.modules import (
GumbelVectorQuantizer,
)
from torch import Tensor
logger = logging.getLogger(__name__)
DEFAULT_MAX_TEXT_POSITIONS = 450
DEFAULT_MAX_SPEECH_POSITIONS = 4000
@register_model("t5_transformer")
class T5TransformerModel(FairseqEncoderDecoderModel):
"""Adapted Transformer model (https://arxiv.org/abs/1706.03762) for
speech-to-text tasks. The Transformer encoder/decoder remains the same.
A trainable input subsampler is prepended to the Transformer encoder to
project inputs into the encoder dimension as well as downsample input
sequence for computational efficiency."""
def __init__(
self,
args,
encoder, decoder,
text_encoder_prenet, speech_encoder_prenet,
text_decoder_prenet, speech_decoder_prenet,
text_decoder_postnet, speech_decoder_postnet,
speaker_decoder_postnet, speech_encoder_postnet,
):
super().__init__(encoder, decoder)
self.encoder = encoder
self.decoder = decoder
self.text_encoder_prenet = text_encoder_prenet
self.speech_encoder_prenet = speech_encoder_prenet
self.text_decoder_prenet = text_decoder_prenet
self.speech_decoder_prenet = speech_decoder_prenet
self.text_decoder_postnet = text_decoder_postnet
self.speech_decoder_postnet = speech_decoder_postnet
self.speaker_decoder_postnet = speaker_decoder_postnet
self.hubert_layer = speech_encoder_postnet
self.reduction_factor = args.reduction_factor
self.spk_embed_dim = args.spk_embed_dim
# define projection layer
self.spk_embed_integration_type = args.spk_embed_integration_type
if self.spk_embed_dim is not None and self.spk_embed_integration_type != 'pre':
if self.spk_embed_integration_type == "add":
self.projection = torch.nn.Linear(self.spk_embed_dim, args.decoder_embed_dim)
else:
self.projection = torch.nn.Linear(
args.decoder_embed_dim + self.spk_embed_dim, args.decoder_embed_dim
)
self.use_codebook = args.use_codebook
self.codebook_prob = getattr(args, "codebook_prob", 0.5) # args.codebook_prob
if self.use_codebook:
vq_dim = args.latent_dim if args.latent_dim > 0 else args.encoder_embed_dim
self.quantizer = GumbelVectorQuantizer(
dim=args.encoder_embed_dim,
num_vars=args.latent_vars,
temp=args.latent_temp,
groups=args.latent_groups,
combine_groups=False,
vq_dim=vq_dim,
time_first=True,
weight_proj_depth=args.quantizer_depth,
weight_proj_factor=args.quantizer_factor,
)
self.num_updates = 0
# # Follow BERT's random weight initialization (for BART)
if args.bert_init:
self.apply(init_bert_params)
self.args = args
self.prune_modules(args.modules_filter)
@staticmethod
def add_args(parser):
"""Add model-specific arguments to the parser."""
# Transformer
parser.add_argument(
"--activation-fn",
type=str,
choices=utils.get_available_activation_fns(),
help="activation function to use",
)
parser.add_argument(
"--dropout", type=float, metavar="D", help="dropout probability"
)
parser.add_argument(
"--attention-dropout",
type=float,
metavar="D",
help="dropout probability for attention weights",
)
parser.add_argument(
"--activation-dropout",
"--relu-dropout",
type=float,
metavar="D",
help="dropout probability after activation in FFN.",
)
parser.add_argument(
"--encoder-embed-dim",
type=int,
metavar="N",
help="encoder embedding dimension",
)
parser.add_argument(
"--encoder-ffn-embed-dim",
type=int,
metavar="N",
help="encoder embedding dimension for FFN",
)
parser.add_argument(
"--encoder-layers", type=int, metavar="N", help="num encoder layers"
)
parser.add_argument(
"--encoder-attention-heads",
type=int,
metavar="N",
help="num encoder attention heads",
)
parser.add_argument(
"--encoder-normalize-before",
action="store_true",
help="apply layernorm before each encoder block",
)
parser.add_argument(
"--decoder-normalize-before",
action="store_true",
help="apply layernorm before each decoder block",
)
parser.add_argument(
"--decoder-embed-dim",
type=int,
metavar="N",
help="decoder embedding dimension",
)
parser.add_argument(
"--decoder-ffn-embed-dim",
type=int,
metavar="N",
help="decoder embedding dimension for FFN",
)
parser.add_argument(
"--decoder-layers", type=int, metavar="N", help="num decoder layers"
)
parser.add_argument(
"--decoder-attention-heads",
type=int,
metavar="N",
help="num decoder attention heads",
)
parser.add_argument(
"--reduction-factor",
type=int,
help="reduction factor for decoder",
)
parser.add_argument(
"--spk-embed-dim",
type=int,
help="speaker embedding dimension",
)
parser.add_argument(
"--layernorm-embedding",
action="store_true",
help="add layernorm to embedding",
)
parser.add_argument(
"--load-pretrained-encoder-from",
type=str,
metavar="STR",
help="model to take encoder weights from (for initialization)",
)
parser.add_argument(
'--freeze-encoder-updates',
type=int,
help='number of steps to freeze encoder before finetune'
)
parser.add_argument(
'--freeze-decoder-updates',
type=int,
help='number of steps to freeze decoder before finetune'
)
parser.add_argument(
'--no-freeze-encoder-layer',
type=str,
help='which encoder layer not freeze during finetune'
)
parser.add_argument(
"--share-input-output-embed",
action="store_true",
help="share decoder input and output embeddings",
)
parser.add_argument(
"--share-ctc-embed",
action="store_true",
help="share ctc embed and decoder embed",
)
parser.add_argument(
"--encoder-sliding-window-attn",
default=None,
type=int,
help="If not None but a even number, set sliding window attention to encoder's attn_mask, e.g., 4, 10, and 20",
)
# Convolutional subsampler
parser.add_argument(
"--encoder-speech-prenet",
default="conv",
type=str,
choices=["conv", "linear"],
help="The type of encoder speech prenet, e.g., conv or linear."
)
parser.add_argument(
"--conv-kernel-sizes",
default="5,5",
type=str,
help="The layer of convolution of encoder speech prenet."
)
parser.add_argument(
"--conv-channels",
default=1024,
type=int,
help="The channels of encoder speech prenet."
)
parser.add_argument(
"--subsample-stride",
default="2,2",
type=str,
help="The subsample stride for conv1dsubsample."
)
parser.add_argument(
"--spk-embed-integration-type",
type=str,
choices=["pre", "add"],
help="speaker embedding integration type"
)
parser.add_argument(
"--dprenet-dropout-rate",
default=0.5,
type=float,
help="The dropout rate of decoder speech prenet."
)
## SE
parser.add_argument(
"--se-predict",
default=None,
choices=["masking", "target", "delta"],
help="If set, source speech inputs decoder to predict the masking/target/delta of corresponding inputs."
+ "masking is [0, 1], target is predicted output, delta is difference between inputs and outputs",
)
parser.add_argument(
"--se-decoder-input",
type=str,
default="previous_target",
choices=["previous_target", "source"],
)
## SID
parser.add_argument(
"--modules-filter",
default=None,
type=str,
help="Remove unused modules for, e.g., SID.",
)
parser.add_argument(
"--sid-pad-prenet",
action="store_true",
help="If set, the size of text dictionary is as small as for <pad> token.",
)
parser.add_argument(
"--encoder-attn-branch",
type=str,
default="identity,full",
help="encoder attention branch sliding window, e.g., 'identity,0,2,4,full'",
)
parser.add_argument(
"--encoder-block-branch",
type=str,
help="average the output of encoder, e.g., '4,5,6'",
)
parser.add_argument(
"--sid-encoder-cls",
default=None,
choices=["encoder"],
help="If set, add cls vector to the encoder input, e.g., constant vector.",
)
parser.add_argument(
"--sid-shuffle-encoder-input",
action="store_true",
help="If set, shuffle encoder input in time.",
)
parser.add_argument(
"--sid-decoder-speaker",
action="store_true",
help="If set, apply speaker decoder as transformer decoder.",
)
parser.add_argument(
"--sid-decoder-attn-dim",
default=128,
type=int,
help="Attention dimension in attensive statistics pooling of speaker decoder.",
)
parser.add_argument(
"--sid-t5-postnet",
action="store_true",
help="If set, apply TextDecoderPostnet as speaker classification.",
)
parser.add_argument(
"--sid-embed-dim",
default=128,
type=int,
help="Embedding dimension in speaker postnet for speaker identification if embed postnet.",
)
parser.add_argument(
"--sid-pooling-layer",
default="decoder",
type=str,
choices=["decoder-las", "decoder", "encoder", "encoder-cls", "encoder-speaker"],
help="The output of decoder or encoder uses as SID pooling layer over temporal dimension.",
)
parser.add_argument(
"--sid-no-pooling-bn",
action="store_true",
help="If set, not attention batchnorm.",
)
parser.add_argument(
"--sid-no-embed-postnet",
action="store_true",
help="If set, no layer between decoder output and classification layer.",
)
parser.add_argument(
"--sid-normalize-postnet",
action="store_true",
help="If set, normalize input and weight in postnet/classifier.",
)
parser.add_argument(
"--sid-softmax-type",
default="softmax",
choices=["softmax", "amsoftmax", "aamsoftmax"],
help="If using amsoftmax or aamsoftmax, the target should be given.",
)
parser.add_argument(
"--softmax-scale",
default=1.0,
type=float,
help="Scale for AMSoftmax or AAMSoftmax.",
)
parser.add_argument(
"--softmax-margin",
default=0.0,
type=float,
help="Margin for AMSoftmax or AAMSoftmax.",
)
parser.add_argument(
"--softmax-easy-margin",
action="store_true",
help="Enable easy margin for AAMSoftmax.",
)
parser.add_argument(
"--encoder-layerdrop",
type=float,
metavar="D",
help="LayerDrop probability for encoder",
)
parser.add_argument(
"--decoder-layerdrop",
type=float,
metavar="D",
help="LayerDrop probability for decoder",
)
## Hubert
parser.add_argument(
'--feature-grad-mult',
type=float,
help='multiply feature extractor var grads by this'
)
parser.add_argument(
'--logit-temp',
type=float,
help='temperature to divide logits by'
)
parser.add_argument(
'--final-dim',
type=int,
help="project final representations and targets to this many "
"dimensions. set to encoder_embed_dim is <= 0"
)
# mask
parser.add_argument(
'--hubert-mask-length',
type=int,
help='mask length'
)
parser.add_argument(
'--mask-prob',
type=float,
help='probability of replacing a token with mask'
)
parser.add_argument(
"--mask-selection",
choices=["static", "uniform", "normal", "poisson"],
help="how to choose mask length",
)
parser.add_argument(
'--mask-other',
type=float,
help="secondary mask argument "
"(used for more complex distributions), "
"see help in compute_mask_indices"
)
parser.add_argument(
'--mask-min-space',
type=int,
help='min space between spans (if no overlap is enabled)'
)
# channel masking
parser.add_argument(
'--mask-channel-length',
type=int,
help='length of the mask for features (channels)'
)
parser.add_argument(
'--mask-channel-prob',
type=float,
help="probability of replacing a feature with 0"
)
parser.add_argument(
"--mask-channel-selection",
choices=["static", "uniform", "normal", "poisson"],
help="how to choose mask length for channel masking",
)
parser.add_argument(
'--mask-channel-other',
type=float,
help="secondary mask argument "
"(used for more complex distributions), "
"see help in compute_mask_indices"
)
parser.add_argument(
'--mask-channel-min-space',
type=int,
help='min space between spans (if no overlap is enabled)'
)
# abs positional embeddings
parser.add_argument(
'--conv-pos',
type=int,
help='number of filters for convolutional positional embeddings'
)
parser.add_argument(
'--conv-pos-groups',
type=int,
help='number of groups for convolutional positional embedding'
)
# codebook related
parser.add_argument(
"--use-codebook",
action="store_true",
help="whether to use codebook",
)
parser.add_argument(
"--codebook-prob",
type=float,
help="probability to use codebook",
)
parser.add_argument(
"--latent-vars",
type=int,
help="number of latent variables V in each group of the codebook",
)
parser.add_argument(
"--latent-groups",
type=int,
help="number of groups G of latent variables in the codebook",
)
parser.add_argument(
"--latent-dim",
type=int,
help="if > 0, uses this dimensionality for latent variables. "
"otherwise uses final_dim / latent_groups",
)
parser.add_argument(
"--latent-temp",
type=literal_eval,
help="temperature for latent variable sampling. "
"can be tuple of 3 values (start, end, decay)",
)
parser.add_argument(
"--quantizer-depth",
type=int,
help="number of quantizer layers",
)
parser.add_argument(
"--quantizer-factor",
type=int,
help="number of quantizer layers",
)
parser.add_argument(
"--get-code-distribution",
action='store_true',
help="whether to get the code distribution (for test)",
)
# relative pos enc
parser.add_argument(
"--relative-position-embedding",
action='store_true',
help="whether to use relative position embedding",
)
parser.add_argument(
"--num-buckets",
type=int,
default=320,
help="num of buckets for relative position embedding",
)
parser.add_argument(
"--max-distance",
type=int,
default=1280,
help="max distance for relative position embedding",
)
parser.add_argument(
"--encoder-max-relative-position",
type=int,
help="max distance for relative position embedding in encoder",
)
parser.add_argument(
"--decoder-max-relative-position",
type=int,
help="max distance for relative position embedding in decoder",
)
# hubert feature extractor
parser.add_argument(
"--conv-feature-layers",
type=str,
help= "string describing convolutional feature extraction "
"layers in form of a python list that contains "
"[(dim, kernel_size, stride), ...]",
)
parser.add_argument(
"--conv-bias",
action='store_true',
help="include bias in conv encoder",
)
parser.add_argument(
"--extractor-mode",
choices=["default", "layer_norm"],
help="mode for feature extractor. default has a single group "
"norm with d groups in the first conv block, whereas layer_norm "
"has layer norms in every block (meant to use with normalize=True)"
)
# others
parser.add_argument(
"--bert-init",
action='store_true',
help="initilize as bert",
)
parser.add_argument(
"--unb-enc-layer",
type=int,
default=-1,
help="which layer's output is used as the input of decoder",
)
# Encoder, Decoder
@classmethod
def build_encoder(cls, args, dictionary=None, embed_tokens=None):
return TransformerEncoder(args, dictionary, embed_tokens)
@classmethod
def build_decoder(cls, args):
return TransformerDecoder(args)
# Encoder Prenet
@classmethod
def build_text_encoder_prenet(cls, embed_tokens, args):
return TextEncoderPrenet(embed_tokens, args)
@classmethod
def build_speech_encoder_prenet(cls, args):
return SpeechEncoderPrenet(args)
# Decoder Prenet
@classmethod
def build_text_decoder_prenet(cls, embed_tokens, args):
return TextDecoderPrenet(embed_tokens, args)
@classmethod
def build_speech_decoder_prenet(cls, odim, args):
return SpeechDecoderPrenet(odim, args)
# Decoder Postnet
@classmethod
def build_text_decoder_postnet(cls, embed_tokens, dictionary, args):
return TextDecoderPostnet(embed_tokens, dictionary, args)
@classmethod
def build_speaker_decoder_postnet(cls, embed_dim, class_num, args):
return SpeakerDecoderPostnet(embed_dim, class_num, args)
@classmethod
def build_speech_decoder_postnet(cls, odim, args):
return SpeechDecoderPostnet(odim, args)
@classmethod
def build_speech_encoder_postnet(cls, dictionaries, args):
return SpeechEncoderPostnet(dictionaries, args)
@classmethod
def build_model(cls, args, task):
"""Build a new model instance."""
# make sure all arguments are present in older models
base_architecture(args)
def build_embedding(dictionary, embed_dim, max_num_embeddings=None):
num_embeddings = len(dictionary)
if max_num_embeddings is not None and isinstance(max_num_embeddings, int):
num_embeddings = min(num_embeddings, max_num_embeddings)
padding_idx = dictionary.pad()
return Embedding(num_embeddings, embed_dim, padding_idx)
if hasattr(args, "sid_pad_prenet") and args.sid_pad_prenet:
max_num_embeddings = 3 # <pad> at index 2
else:
max_num_embeddings = None
text_decoder_embed_tokens = build_embedding(
task.dicts["text"], args.decoder_embed_dim, max_num_embeddings
)
if args.share_input_output_embed:
text_encoder_embed_tokens = text_decoder_embed_tokens
else:
text_encoder_embed_tokens = build_embedding(
task.dicts["text"], args.encoder_embed_dim
)
speech_odim = args.speech_odim
if "text" in task.dicts:
encoder = cls.build_encoder(args, task.dicts["text"], text_encoder_embed_tokens)
else:
encoder = cls.build_encoder(args)
decoder = cls.build_decoder(args)
text_encoder_prenet = cls.build_text_encoder_prenet(text_encoder_embed_tokens, args)
speech_encoder_prenet = cls.build_speech_encoder_prenet(args)
text_decoder_prenet = cls.build_text_decoder_prenet(text_decoder_embed_tokens, args)
if getattr(args, "sid_pooling_layer", None) == "decoder-las":
speech_decoder_prenet = cls.build_speech_encoder_prenet(args)
else:
speech_decoder_prenet = cls.build_speech_decoder_prenet(speech_odim, args)
text_decoder_postnet = cls.build_text_decoder_postnet(text_decoder_embed_tokens, task.dicts['text'], args)
speech_decoder_postnet = cls.build_speech_decoder_postnet(speech_odim, args)
if getattr(args, "sid_t5_postnet", False):
speaker_decoder_postnet = None
else:
if task.t5_task == "s2c":
speaker_decoder_postnet = cls.build_speaker_decoder_postnet(args.sid_embed_dim, len(task.dicts['text']), args)
else:
speaker_decoder_postnet = None
if "hubert" in task.dicts:
speech_encoder_postnet = cls.build_speech_encoder_postnet(task.dicts['hubert'], args)
else:
speech_encoder_postnet = None
return cls(
args,
encoder, decoder,
text_encoder_prenet, speech_encoder_prenet,
text_decoder_prenet, speech_decoder_prenet,
text_decoder_postnet, speech_decoder_postnet,
speaker_decoder_postnet, speech_encoder_postnet,
)
def get_normalized_probs(
self,
net_output: Tuple[Tensor, Optional[Dict[str, List[Optional[Tensor]]]]],
log_probs: bool,
sample: Optional[Dict[str, Tensor]] = None,
):
# net_output['encoder_out'] is a (B, T, D) tensor
lprobs = self.get_normalized_probs_scriptable(net_output, log_probs, sample)
lprobs.batch_first = True
return lprobs
def get_normalized_probs_for_ctc(self, net_output, log_probs):
"""Get normalized probabilities (or log probs) from a net's output."""
logits = net_output["encoder_out_for_ctc"][0]
if log_probs:
return utils.log_softmax(logits.float(), dim=-1)
else:
return utils.softmax(logits.float(), dim=-1)
def get_logits(self, net_output, is_masked=True):
if is_masked:
logits_list = net_output["logit_m_list"]
else:
logits_list = net_output["logit_u_list"]
logits_list = [x.float() for x in logits_list if x is not None]
return logits_list
def get_targets(self, sample, net_output, is_masked=True):
if "logit_m_list" in net_output:
logits_list = self.get_logits(net_output, is_masked)
targets_list = [
x.new_zeros(x.size(0), dtype=torch.long) for x in logits_list
]
return targets_list
else:
return sample["target"]
def get_extra_losses(self, net_output):
extra_losses = []
names = []
if "features_pen" in net_output:
extra_losses.append(net_output["features_pen"])
names.append("features_pen")
if "prob_perplexity" in net_output:
extra_losses.append(
(net_output["num_vars"] - net_output["prob_perplexity"])
/ net_output["num_vars"]
)
names.append("prob_perplexity")
return extra_losses, names
def forward(self, source=None, src_tokens=None, src_lengths=None, prev_output_tokens=None, tgt_lengths=None, spkembs=None, target_list=None, task_name=None, padding_mask=None, only_hubert=False, only_ctc=False, feature_only=False, tgt_enc_layer=None, mask=True):
"""
The forward method inherited from the base class has a **kwargs
argument in its input, which is not supported in torchscript. This
method overwrites the forward method definition without **kwargs.
"""
assert source is not None or src_tokens is not None
# padding_mask is not none only when input is waveform
if source is None and padding_mask is None and not feature_only:
input_type = 'text'
else:
input_type = 'speech'
if prev_output_tokens is not None and len(prev_output_tokens.size()) == 2:
output_type = 'text'
codebook_out = {}
else:
output_type = 'speech'
if task_name is not None and task_name == "s2c":
if target_list is not None and target_list.size(1) == 1 and not getattr(self.args, "sid_t5_postnet", False):
sid_target = F.one_hot(target_list.squeeze(1), num_classes=self.speaker_decoder_postnet.class_num)
else:
sid_target = None
target_list = None
# Encoder Prenet
if input_type == 'text':
encoder_input, encoder_padding_mask = self.text_encoder_prenet(src_tokens)
else:
if target_list is not None:
encoder_input, encoder_padding_mask = self.speech_encoder_prenet(source, require_feat_pen=True, target_list=target_list, padding_mask=padding_mask, mask=mask)
encoder_input, features_pen, mask_indices, target_list = encoder_input
else:
encoder_input, encoder_padding_mask = self.speech_encoder_prenet(source, padding_mask=padding_mask, mask=self.training)
# shuffle a batch of inputs of encoder
if self.training and hasattr(self.args, "sid_shuffle_encoder_input") and getattr(self.args, "sid_shuffle_encoder_input", False):
shuffle_index = torch.randperm(encoder_padding_mask.size(1), device=encoder_padding_mask.device)
encoder_input = torch.index_select(encoder_input, 1, shuffle_index)
encoder_padding_mask = torch.index_select(encoder_padding_mask, 1, shuffle_index)
if getattr(self.args, "sid_encoder_cls", None) == "encoder":
prev_output_tokens = torch.zeros_like(prev_output_tokens)
encoder_input, encoder_padding_mask = self._integrate_with_speaker_cls(prev_output_tokens, encoder_input, encoder_padding_mask)
# Encoder: T x B x C
encoder_output = self.encoder(encoder_input, encoder_padding_mask, tgt_layer=tgt_enc_layer)
if task_name is not None and task_name == 'speech_pretrain' and feature_only:
return encoder_output["encoder_out"][0].transpose(0, 1)
if task_name is not None and task_name == 's2c':
if self.args.sid_pooling_layer == "encoder":
return self.speaker_decoder_postnet(encoder_output["encoder_out"][0].transpose(0, 1).mean(1), sid_target), None
elif self.args.sid_pooling_layer == "encoder-cls":
return self.speaker_decoder_postnet(encoder_output["encoder_out"][0].transpose(0, 1)[:,0], sid_target), None
elif self.args.sid_pooling_layer == "encoder-speaker" or getattr(self.args, "sid_decoder_speaker", False):
return self.speaker_decoder_postnet(encoder_output["encoder_out"][0].transpose(0, 1), sid_target), None
if target_list is not None:
hubert_results = self.hubert_layer(
encoder_output["encoder_out"][0].transpose(0, 1),
encoder_padding_mask,
mask_indices,
target_list
)
hubert_results['features_pen'] = features_pen
if "decoder_input" in encoder_output and encoder_output["decoder_input"][0] is not None:
# Change the encoder output to decoder input once set unb-enc-layer
encoder_output["encoder_out"] = encoder_output["decoder_input"]
if self.use_codebook:
q = self.quantizer(encoder_output["encoder_out"][0].transpose(0, 1))
# q["x"]: B x T x C
# Sample indexs according to the codebook prob
random_idx = torch.randperm(q["x"].size(1))[:int(q["x"].size(1) * self.codebook_prob)]
# Make weight for q
q_w = q["x"].new_zeros(q["x"].size(1))
q_w[random_idx] = 1.0
# Combine quantized codes and encoder output
encoder_output["encoder_out"][0] = (
q_w.view(-1, 1) * q["x"] + (- q_w + 1).view(-1, 1) * encoder_output["encoder_out"][0].transpose(0, 1)
).transpose(0, 1)
# encoder_output["encoder_out"][0] = q["x"].transpose(0, 1)
if output_type == 'speech':
hubert_results["prob_perplexity"] = q["prob_perplexity"]
hubert_results["code_perplexity"] = q["code_perplexity"]
hubert_results["num_vars"] = q["num_vars"]
hubert_results["temp"] = q["temp"]
elif output_type == 'text':
codebook_out["prob_perplexity"] = q["prob_perplexity"]
codebook_out["code_perplexity"] = q["code_perplexity"]
codebook_out["num_vars"] = q["num_vars"]
codebook_out["temp"] = q["temp"]
if only_hubert and target_list is not None:
return hubert_results, None
if only_ctc and task_name is not None and task_name == "s2t":
return None, encoder_output
elif not self.training and prev_output_tokens is None and task_name == "s2t" and task_name is not None:
return encoder_output
# Decoder Prenet
if output_type == 'text':
# _ is the incremental state
prev_output_tokens, tgt_mask, _ = self.text_decoder_prenet(prev_output_tokens)
if task_name is not None and task_name == 's2c':
prev_output_tokens = torch.zeros_like(prev_output_tokens)
else:
# integrate speaker embedding
if self.spk_embed_integration_type == "pre" and self.spk_embed_dim is not None:
# Decoder Prenet
prev_output_tokens, tgt_mask = self.speech_decoder_prenet(prev_output_tokens, tgt_lengths, spkembs)
else:
if self.spk_embed_dim is not None:
encoder_output["encoder_out"] = [self._integrate_with_spk_embed(
encoder_output["encoder_out"][0].transpose(0, 1), spkembs
).transpose(0, 1)]
prev_output_tokens, tgt_mask = self.speech_decoder_prenet(prev_output_tokens, tgt_lengths)
# BART Sequence Classification: cat <pad> + feature before decoder
if task_name is not None and task_name == 's2c' and self.args.sid_pooling_layer == "decoder-las":
decoder_feat_input, decoder_feat_mask = self.speech_decoder_prenet(src_tokens, src_lengths)
prev_output_tokens, tgt_mask = self._integrate_with_speaker_cls((prev_output_tokens, tgt_mask), decoder_feat_input, decoder_feat_mask, cls_first=False)
# SE predict masking to corresponding inputs and source speech replaces the prev_output_tokens as the input of decoder
if task_name is not None and task_name == "s2s" and getattr(self.args, "se_decoder_input", "previous_target") == "source":
prev_output_tokens, tgt_mask = self.speech_decoder_prenet(src_tokens, src_lengths)
# Decoder
decoder_output, extra = self.decoder(prev_output_tokens, tgt_mask, encoder_output,
full_context_alignment=getattr(self.args, "decoder_full_context_alignment", False),
alignment_layer=(-1 if target_list is None and output_type == 'speech' else None))
# Decoder Postnet
if task_name is not None and task_name == 's2c':
if not getattr(self.args, "sid_t5_postnet", False):
if self.args.sid_pooling_layer == "decoder":
return self.speaker_decoder_postnet(decoder_output.mean(1), sid_target), None
elif self.args.sid_pooling_layer == "decoder-las":
indices = (tgt_mask.eq(False).float().sum(1) - 1.0).type(torch.int64)
indices = indices.unsqueeze(1).unsqueeze(2).expand(-1, -1, decoder_output.size(2))
return self.speaker_decoder_postnet(decoder_output.gather(1, indices), sid_target), None
else:
return (self.text_decoder_postnet(decoder_output), None), encoder_output
# SE predict: masking, target, delta. Ensure reduction factor 1
if task_name is not None and task_name == 's2s' and getattr(self.args, "se_predict", None) is not None:
assert self.reduction_factor == 1, f"{self.reduction_factor} != 1"
before_outs, after_outs, logits = self.speech_decoder_postnet(decoder_output)
se_predict = getattr(self.args, "se_predict")
if se_predict == "masking":
before_outs = torch.sigmoid(before_outs) * src_tokens
after_outs = torch.sigmoid(after_outs) * src_tokens
return before_outs, after_outs, logits, extra['attn'][0]
elif se_predict == "target":
return before_outs, after_outs, logits, extra['attn'][0]
elif se_predict == "delta":
before_outs = before_outs - src_tokens
after_outs = after_outs - src_tokens
return before_outs, after_outs, logits, extra['attn'][0]
else:
raise ValueError(f"{se_predict} not in [masking, target, delta]")
if task_name is not None and task_name == 's2t':
#return self.text_decoder_postnet(decoder_output), None
return (self.text_decoder_postnet(decoder_output), None), encoder_output
if output_type == 'text':
return (self.text_decoder_postnet(decoder_output), None), codebook_out, encoder_output
else:
if target_list is not None:
return hubert_results, (self.speech_decoder_postnet(decoder_output) + (extra['attn'][0],))
else:
return self.speech_decoder_postnet(decoder_output) + (extra['attn'][0],)
def _integrate_with_speaker_cls(self, pad_input, encoder_input, encoder_padding_mask=None, cls_first=True):
"""
encoder_input: [B, T, C]
encoder_padding_mask: [B, T]
"""
if hasattr(self, "text_decoder_prenet"):
if isinstance(pad_input, tuple):
repeat_cls_vector, repeat_cls_mask = pad_input
else:
repeat_cls_vector, repeat_cls_mask, _ = self.text_decoder_prenet(pad_input)
if encoder_padding_mask is not None:
bsz = encoder_input.size(0)
tsz = encoder_input.size(1)
encoder_padding_mask = encoder_input.new_zeros((bsz, tsz)) == 1.0
if repeat_cls_mask is None:
mask_size = (encoder_padding_mask.size(0), 1)
mask_type = encoder_padding_mask.dtype
repeat_cls_mask = encoder_padding_mask.new_zeros(mask_size) == 1.0
ret_encoder_padding_mask = torch.cat([repeat_cls_mask, encoder_padding_mask], dim=1)
if cls_first:
ret_encoder_input = torch.cat([repeat_cls_vector, encoder_input], dim=1)
else:
ret_encoder_input = torch.cat([encoder_input, encoder_input[:,-1:,:]], dim=1)
mask_size = (encoder_padding_mask.size(0), 1)
mask_type = encoder_padding_mask.dtype
repeat_cls_mask_ = encoder_padding_mask.new_ones(mask_size) == 1.0
encoder_padding_mask_ = torch.cat([encoder_padding_mask, repeat_cls_mask_], dim=1)
indices = encoder_padding_mask.eq(False).float().sum(1).type(torch.int64).unsqueeze(1)
indices_mask = torch.zeros_like(ret_encoder_padding_mask).scatter(1, indices, 1.0)
ret_encoder_input = ret_encoder_input * (1.0 - encoder_padding_mask_.type(ret_encoder_input.dtype).unsqueeze(2)) \
+ repeat_cls_vector * indices_mask.type(repeat_cls_vector.dtype).unsqueeze(2)
return ret_encoder_input, ret_encoder_padding_mask
def _integrate_with_spk_embed(self, hs, spembs):
"""Integrate speaker embedding with hidden states.
Args:
hs (Tensor): Batch of hidden state sequences (B, Tmax, adim).
spembs (Tensor): Batch of speaker embeddings (B, spk_embed_dim).
Returns:
Tensor: Batch of integrated hidden state sequences (B, Tmax, adim)
"""
if self.spk_embed_integration_type == "add":
# apply projection and then add to hidden states
spembs = self.projection(F.normalize(spembs))
hs = hs + spembs.unsqueeze(1)
elif self.spk_embed_integration_type == "concat":
# concat hidden states with spk embeds and then apply projection
spembs = F.normalize(spembs).unsqueeze(1).expand(-1, hs.size(1), -1)
hs = self.projection(torch.cat([hs, spembs], dim=-1))
else:
raise NotImplementedError("support only add or concat.")
return hs
def load_state_dict(
self,
state_dict,
strict=True,
model_cfg=None,
args=None,
):
"""NOT STRICT Copies parameters and buffers from *state_dict* into this module and
its descendants.
Overrides the method in :class:`nn.Module`. Compared with that method
this additionally "upgrades" *state_dicts* from old checkpoints.
"""
# self.prune_modules(model_cfg.modules_filter)
model_dict_size = self.text_decoder_postnet.output_projection.out_features
ckpt_dict_size = state_dict["text_decoder_postnet.output_projection.weight"].size(0)
if model_dict_size != ckpt_dict_size:
# reset dictionary-related modules, such as embedding table and encoder ctc embed
logger.warn(f"not equal dictionary between model and checkpoint: {model_dict_size} vs {ckpt_dict_size}")
logger.info(f"reset model dictionary with size of {model_dict_size}")
removed_keys = [
key for key in state_dict.keys() if any(
key.startswith(previ) for previ in [
"encoder.proj", "text_encoder_prenet", "text_decoder_prenet", "text_decoder_postnet"
]
)
]
for key in removed_keys:
state_dict.pop(key, None)
logger.info(f"removed loaded checkpoint: {key}")
for m in self._modules.keys():
m_state_dict = {
key.replace(f"{m}.", ""): value for key, value in state_dict.items() if key.startswith(f"{m}.")
}
if hasattr(self, m):
self._modules[m].load_state_dict(m_state_dict, False)
return self
def prune_modules(self, modules_filter=None):
"""Prune unused modules for specific tasks."""
if modules_filter is None:
return
elif modules_filter == "s2c":
if hasattr(self, "text_encoder_prenet"): del self.text_encoder_prenet
if hasattr(self, "speech_decoder_prenet") and getattr(self.args, "sid_pooling_layer", None) != "decoder-las":
del self.speech_decoder_prenet
if hasattr(self, "speech_decoder_postnet"): del self.speech_decoder_postnet
if hasattr(self, "text_decoder_postnet"): del self.text_decoder_postnet
if hasattr(self, "speech_encoder_postnet"): del self.speech_encoder_postnet
if hasattr(self.encoder, "proj"): self.encoder.proj = None
if hasattr(self, "projection"): del self.projection
if hasattr(self, "quantizer"): del self.quantizer
if getattr(self.args, "sid_pooling_layer", "decoder").startswith("encoder") or getattr(self.args, "sid_decoder_speaker", False):
if hasattr(self.decoder, "dropout_module"): del self.decoder.dropout_module
if hasattr(self.decoder, "layers"): del self.decoder.layers
if hasattr(self.decoder, "layer_norm"): del self.decoder.layer_norm
if hasattr(self, "text_decoder_prenet"): del self.text_decoder_prenet
elif modules_filter == "s2s":
if hasattr(self, "speaker_decoder_postnet"): del self.speaker_decoder_postnet
if hasattr(self, "text_encoder_prenet"): del self.text_encoder_prenet
if hasattr(self, "text_decoder_prenet"): del self.text_decoder_prenet
if hasattr(self, "text_decoder_postnet"): del self.text_decoder_postnet
if hasattr(self, "speech_encoder_postnet"): del self.speech_encoder_postnet
if hasattr(self.encoder, "proj"): self.encoder.proj = None
if hasattr(self, "projection"): del self.projection
if hasattr(self, "quantizer"): del self.quantizer
elif modules_filter == "t2s":
if hasattr(self, "speaker_decoder_postnet"): del self.speaker_decoder_postnet
if hasattr(self, "speech_encoder_prenet"): del self.speech_encoder_prenet
if hasattr(self, "text_decoder_prenet"): del self.text_decoder_prenet
if hasattr(self, "text_decoder_postnet"): del self.text_decoder_postnet
if hasattr(self, "speech_encoder_postnet"): del self.speech_encoder_postnet
if hasattr(self.encoder, "proj"): self.encoder.proj = None
if hasattr(self, "projection"): del self.projection
if hasattr(self, "quantizer"): del self.quantizer
elif modules_filter == "s3prl":
# remain the encoder and the pre/post net
if hasattr(self.decoder, "dropout_module"): del self.decoder.dropout_module
if hasattr(self.decoder, "layers"): del self.decoder.layers
if hasattr(self.decoder, "layer_norm"): del self.decoder.layer_norm
if hasattr(self, "speaker_decoder_postnet"): del self.speaker_decoder_postnet
if hasattr(self, "text_decoder_prenet"): del self.text_decoder_prenet
if hasattr(self, "text_decoder_postnet"): del self.text_decoder_postnet
if hasattr(self, "speech_decoder_prenet"): del self.speech_decoder_prenet
if hasattr(self, "speech_decoder_postnet"): del self.speech_decoder_postnet
if hasattr(self, "speech_encoder_postnet"): del self.speech_encoder_postnet
if hasattr(self.encoder, "proj"): self.encoder.proj = None
if hasattr(self, "projection"): del self.projection
if hasattr(self, "quantizer"): del self.quantizer
def forward_encoder_torchscript(self, net_input: Dict[str, Tensor]):
"""A TorchScript-compatible version of forward.
Encoders which use additional arguments may want to override
this method for TorchScript compatibility.
"""
if torch.jit.is_scripting():
return self.forward_encoder(
source=net_input["source"],
padding_mask=net_input["padding_mask"]
)
else:
return self.forward_encoder_non_torchscript(net_input)
@torch.jit.unused
def forward_encoder_non_torchscript(self, net_input: Dict[str, Tensor]):
encoder_input = {
k: v for k, v in net_input.items() if k != "prev_output_tokens" and k != "task_name"
}
return self.forward_encoder(**encoder_input)
def forward_encoder(self, source, padding_mask=None):
# Encoder Prenet
encoder_input, encoder_padding_mask = self.speech_encoder_prenet(source, padding_mask=padding_mask, mask=False)
# Encoder
encoder_output = self.encoder(encoder_input, encoder_padding_mask)
return encoder_output
def forward_text_encoder(self, src_tokens):
# Text Encoder Prenet
encoder_input, encoder_padding_mask = self.text_encoder_prenet(src_tokens)
# Encoder
encoder_output = self.encoder(encoder_input, encoder_padding_mask)
return encoder_output
def forward_decoder(self, tokens, encoder_out, incremental_state):
# Decoder Prenet
prev_output_tokens, tgt_mask, incremental_state = self.text_decoder_prenet(tokens, incremental_state)
# Decoder
decoder_output, extra = self.decoder(
prev_output_tokens,
tgt_mask,
encoder_out=encoder_out,
incremental_state=incremental_state,
)
# Decoder Postnet
return self.text_decoder_postnet(decoder_output), extra
def set_num_updates(self, num_updates):
"""Set the number of parameters updates."""
super().set_num_updates(num_updates)
self.num_updates = num_updates
def generate_speech(self, source=None, src_tokens=None, spkembs=None, **kwargs):
assert source is not None or src_tokens is not None
threshold = kwargs.get("threshold", 0.5)
minlenratio = kwargs.get("threshold", 0.0)
if source is None:
assert src_tokens.size(0) == 1
encoder_out = self.forward_text_encoder(src_tokens)
maxlenratio = kwargs.get("threshold", 20.0)
else:
assert source.size(0) == 1
encoder_out = self.forward_encoder(source, padding_mask=kwargs["padding_mask"])
maxlenratio = kwargs.get("threshold", 10.0)
if spkembs is not None and self.spk_embed_integration_type != "pre":
encoder_out["encoder_out"] = [self._integrate_with_spk_embed(
encoder_out["encoder_out"][0].transpose(0, 1), spkembs
).transpose(0, 1)]
spkembs = None
maxlen = int(encoder_out["encoder_out"][0].size(0) * maxlenratio / self.reduction_factor)
minlen = int(encoder_out["encoder_out"][0].size(0) * minlenratio / self.reduction_factor)
idx = 0
ys = encoder_out["encoder_out"][0].new_zeros(1, 1, self.speech_decoder_postnet.odim)
outs, probs = [], []
# forward decoder step-by-step
if isinstance(self.decoder, FairseqIncrementalDecoder):
incremental_states = {}
else:
incremental_states = None
attns = []
while True:
# update index
idx += 1
# calculate output and stop prob at idx-th step
decoder_in, _ = self.speech_decoder_prenet(ys, spkembs=spkembs)
z, extra = self.decoder(decoder_in[:,-1:], None, encoder_out, incremental_states, alignment_layer=-1)
outs += [self.speech_decoder_postnet.feat_out(z[0, -1]).view(self.reduction_factor, self.speech_decoder_postnet.odim)] # [(r, odim), ...]
probs += [torch.sigmoid(self.speech_decoder_postnet.prob_out(z[0, -1]))] # [(r), ...]
# update next inputs
ys = torch.cat((ys, outs[-1][-1].view(1, 1, self.speech_decoder_postnet.odim)), dim=1) # (1, idx + 1, odim)
attns.append(torch.stack([att_l[0] for att_l in extra['attn'][0]], dim=0))
# check whether to finish generation
if int(sum(probs[-1] >= threshold)) > 0 or idx >= maxlen:
# check mininum length
if idx < minlen:
continue
outs = (torch.cat(outs, dim=0).unsqueeze(0).transpose(1, 2)) # (L, odim) -> (1, L, odim) -> (1, odim, L)
if self.speech_decoder_postnet.postnet is not None:
outs = outs + self.speech_decoder_postnet.postnet(outs) # (1, odim, L)
outs = outs.transpose(2, 1).squeeze(0) # (L, odim)
probs = torch.cat(probs, dim=0)
attn = torch.cat(attns, dim=2)
break
if outs.size(0) == maxlen:
logging.warning("output length reaches maximum length")
return outs, probs, attn
@register_model_architecture(model_name="t5_transformer", arch_name="t5_transformer")
def base_architecture(args):
# Transformer
args.bert_init = getattr(args, "bert_init", False)
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 768)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 768 * 4)
args.encoder_layers = getattr(args, "encoder_layers", 12)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 12)
args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", args.encoder_embed_dim)
args.decoder_ffn_embed_dim = getattr(
args, "decoder_ffn_embed_dim", args.encoder_ffn_embed_dim
)
args.decoder_layers = getattr(args, "decoder_layers", 6)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 12)
args.decoder_normalize_before = getattr(args, "decoder_normalize_before", False)
args.dropout = getattr(args, "dropout", 0.1)
args.attention_dropout = getattr(args, "attention_dropout", args.dropout)
args.activation_dropout = getattr(args, "activation_dropout", args.dropout)
args.activation_fn = getattr(args, "activation_fn", "gelu")
args.decoder_layerdrop = getattr(args, "decoder_layerdrop", 0.0)
args.decoder_output_dim = getattr(
args, "decoder_output_dim", args.decoder_embed_dim
)
args.decoder_input_dim = getattr(args, "decoder_input_dim", args.decoder_embed_dim)
args.encoder_layerdrop = getattr(args, "encoder_layerdrop", 0)
args.decoder_layerdrop = getattr(args, "decoder_layerdrop", 0)
args.max_text_positions = getattr(args, "max_text_positions", DEFAULT_MAX_TEXT_POSITIONS)
args.max_speech_positions = getattr(args, "max_speech_positions", DEFAULT_MAX_SPEECH_POSITIONS)
# Espnet related, including prenet, postnet
args.eprenet_conv_layers = getattr(args, "eprenet_conv_layers", 0)
args.eprenet_conv_filts = getattr(args, "eprenet_conv_filts", 0)
args.eprenet_conv_chans = getattr(args, "eprenet_conv_chans", 0)
args.use_batch_norm = getattr(args, "use_batch_norm", True)
args.eprenet_dropout_rate = getattr(args, "eprenet_dropout_rate", 0.0)
args.enc_use_scaled_pos_enc = getattr(args, "enc_use_scaled_pos_enc", True)
args.dec_use_scaled_pos_enc = getattr(args, "dec_use_scaled_pos_enc", True)
args.postnet_layers = getattr(args, "postnet_layers", 5)
args.postnet_chans = getattr(args, "postnet_chans", 256)
args.postnet_filts = getattr(args, "postnet_filts", 5)
args.postnet_dropout_rate = getattr(args, "postnet_dropout_rate", 0.5)
args.dprenet_dropout_rate = getattr(args, "dprenet_dropout_rate", 0.5)
args.dprenet_layers = getattr(args, "dprenet_layers", 2)
args.dprenet_units = getattr(args, "dprenet_units", 256)
args.initial_encoder_alpha = getattr(args, "initial_encoder_alpha", 1.0)
args.initial_decoder_alpha = getattr(args, "initial_decoder_alpha", 1.0)
args.spk_embed_integration_type = getattr(args, "spk_embed_integration_type", "pre")
args.spk_embed_dim = getattr(args, "spk_embed_dim", 512)
args.encoder_reduction_factor = getattr(args, "encoder_reduction_factor", 1)
args.reduction_factor = getattr(args, "reduction_factor", 2)
args.transformer_enc_positional_dropout_rate = getattr(args, "transformer_enc_positional_dropout_rate", 0.1)
args.transformer_dec_positional_dropout_rate = getattr(args, "transformer_dec_positional_dropout_rate", 0.1)
args.layer_norm_eps = getattr(args, "layer_norm_eps", 1e-5)
args.no_scale_embedding = getattr(args, "no_scale_embedding", True)
# Convolutional subsampler
args.encoder_speech_prenet = getattr(args, "encoder_speech_prenet", "conv")
args.conv_kernel_sizes = getattr(args, "conv_kernel_sizes", "5,5")
args.conv_channels = getattr(args, "conv_channels", 1024)
args.quant_noise_pq = getattr(args, "quant_noise_pq", 0)
args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None)
args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0)
args.no_token_positional_embeddings = getattr(
args, "no_token_positional_embeddings", False
)
args.adaptive_input = getattr(args, "adaptive_input", False)
args.decoder_learned_pos = getattr(args, "decoder_learned_pos", False)
args.share_input_output_embed = getattr(args, "share_input_output_embed", False)
args.share_ctc_embed = getattr(args, "share_ctc_embed", False)
args.freeze_encoder_updates = getattr(args, "freeze_encoder_updates", 0)
args.freeze_decoder_updates = getattr(args, "freeze_decoder_updates", 0)
args.no_freeze_encoder_layer = getattr(args, "no_freeze_encoder_layer", None)
## sid
args.sid_embed_dim = getattr(args, "sid_embed_dim", 128)
args.sid_pooling_layer = getattr(args, "sid_pooling_layer", "decoder")
args.softmax_scale = getattr(args, "softmax_scale", 1)
args.softmax_margin = getattr(args, "softmax_margin", 0)
args.softmax_easy_margin = getattr(args, "softmax_easy_margin", False)
args.modules_filter = getattr(args, "modules_filter", None)
## Hubert
args.conv_pos = getattr(args, "conv_pos", 128)
args.conv_pos_groups = getattr(args, "conv_pos_groups", 16)
args.target_glu = getattr(args, "target_glu", False)
args.logit_temp = getattr(args, "logit_temp", 0.1)
args.final_dim = getattr(args, "final_dim", 256)
args.untie_final_proj = getattr(args, "untie_final_proj", True)
args.feature_grad_mult = getattr(args, "feature_grad_mult", 0.1)
args.use_sent_enc_layer = getattr(args, "use_sent_enc_layer", True)
# hubert feature extractor
args.extractor_mode = getattr(args, "extractor_mode", "default")
args.conv_feature_layers = getattr(args, "conv_feature_layers", "[(512,10,5)] + [(512,3,2)] * 4 + [(512,2,2)] * 2")
args.conv_bias = getattr(args, "conv_bias", False)
# mask
args.hubert_mask_length = getattr(args, "hubert_mask_length", 10)
args.mask_prob = getattr(args, "mask_prob", 0.0)
args.mask_selection = getattr(args, "mask_selection", "static")
args.mask_other = getattr(args, "mask_other", 0)
args.no_mask_overlap = getattr(args, "no_mask_overlap", False)
args.mask_min_space = getattr(args, "mask_min_space", 1)
# channel mask
args.mask_channel_length = getattr(args, "mask_channel_length", 10)
args.mask_channel_prob = getattr(args, "mask_channel_prob", 0.0)
args.mask_channel_selection = getattr(args, "mask_channel_selection", "static")
args.mask_channel_other = getattr(args, "mask_channel_other", 0)
args.no_mask_channel_overlap = getattr(args, "no_mask_channel_overlap", False)
args.mask_channel_min_space = getattr(args, "mask_channel_min_space", 1)
# loss computation
args.skip_masked = getattr(args, "skip_masked", False)
args.skip_nomask = getattr(args, "skip_nomask", False)
# conv Pos
args.use_conv_pos = getattr(args, "use_conv_pos", False)
args.use_sinc_pos = getattr(args, "use_sinc_pos", False)
# codebook
args.use_codebook = getattr(args, "use_codebook", False)
args.latent_vars = getattr(args, "latent_vars", 100)
args.latent_groups = getattr(args, "latent_groups", 2)
args.latent_dim = getattr(args, "latent_dim", 0)
args.latent_temp = getattr(args, "latent_temp", (2, 0.5, 0.999995))
args.quantizer_depth = getattr(args, "quantizer_depth", 1)
args.quantizer_factor = getattr(args, "quantizer_factor", 3)
args.codebook_prob = getattr(args, "codebook_prob", 0.5)
# Relative pos embed
args.relative_position_embedding = getattr(args, "relative_position_embedding", False)
args.num_buckets = getattr(args, "num_buckets", 320)
args.max_distance = getattr(args, "max_distance", 1280)
args.encoder_max_relative_position = getattr(args, "encoder_max_relative_position", 160)
args.decoder_max_relative_position = getattr(args, "decoder_max_relative_position", 160)
@register_model_architecture("t5_transformer", "t5_transformer_base")
def t5_transformer_base(args):
args.use_conv_pos = getattr(args, "use_conv_pos", True)
args.use_sinc_pos = getattr(args, "use_sinc_pos", True)
args.layernorm_embedding = getattr(args, "layernorm_embedding", False)
args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False)
args.decoder_normalize_before = getattr(args, "decoder_normalize_before", False)
args.layer_norm_first = getattr(args, "layer_norm_first", False)
args.relative_position_embedding = getattr(args, "relative_position_embedding", True)
args.dropout = getattr(args, "dropout", 0.1)
args.activation_dropout = getattr(args, "activation_dropout", 0.0)
args.attention_dropout = getattr(args, "attention_dropout", 0.1)
args.encoder_layerdrop = getattr(args, "encoder_layerdrop", 0.05)
args.decoder_layerdrop = getattr(args, "decoder_layerdrop", 0.05)
args.mask_prob = getattr(args, "mask_prob", 0.80)
base_architecture(args)
@register_model_architecture("t5_transformer", "t5_transformer_large")
def t5_transformer_large(args):
args.use_conv_pos = getattr(args, "use_conv_pos", True)
args.use_sinc_pos = getattr(args, "use_sinc_pos", True)
args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False)
args.decoder_normalize_before = getattr(args, "decoder_normalize_before", True)
args.layer_norm_first = getattr(args, "layer_norm_first", True)
args.relative_position_embedding = getattr(args, "relative_position_embedding", True)
args.dropout = getattr(args, "dropout", 0.0)
args.activation_dropout = getattr(args, "activation_dropout", 0.0)
args.attention_dropout = getattr(args, "attention_dropout", 0.0)
args.encoder_layerdrop = getattr(args, "encoder_layerdrop", 0.0)
args.decoder_layerdrop = getattr(args, "decoder_layerdrop", 0.0)
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 1024)
args.encoder_layers = getattr(args, "encoder_layers", 24)
args.decoder_layers = getattr(args, "decoder_layers", 6)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 4096)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 16)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 16)
args.feature_grad_mult = getattr(args, "feature_grad_mult", 1.0)
args.extractor_mode = getattr(args, "extractor_mode", "layer_norm")
args.final_dim = getattr(args, "final_dim", 768)
args.mask_prob = getattr(args, "mask_prob", 0.80)
base_architecture(args)
@register_model_architecture("t5_transformer", "t5_transformer_base_asr")
def t5_transformer_base_asr(args):
args.use_conv_pos = getattr(args, "use_conv_pos", True)
args.use_sinc_pos = getattr(args, "use_sinc_pos", True)
args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False)
args.decoder_normalize_before = getattr(args, "decoder_normalize_before", False)
args.layer_norm_first = getattr(args, "layer_norm_first", False)
args.relative_position_embedding = getattr(args, "relative_position_embedding", True)
args.dropout = getattr(args, "dropout", 0.1)
args.activation_dropout = getattr(args, "activation_dropout", 0.1)
args.attention_dropout = getattr(args, "attention_dropout", 0.1)
args.feature_grad_mult = getattr(args, "feature_grad_mult", 0.0)
args.encoder_layerdrop = getattr(args, "encoder_layerdrop", 0.1)
args.decoder_layerdrop = getattr(args, "decoder_layerdrop", 0.1)
args.mask_prob = getattr(args, "mask_prob", 0.75)
args.mask_selection = getattr(args, "mask_selection", "static")
args.mask_channel_length = getattr(args, "mask_channel_length", 64)
args.mask_channel_prob = getattr(args, "mask_channel_prob", 0.5)
args.mask_channel_selection = getattr(args, "mask_channel_selection", "static")
args.max_text_positions = getattr(args, "max_text_positions", 600)
base_architecture(args)
| EXA-1-master | exa/models/unilm-master/speecht5/speecht5/models/speecht5.py |
# --------------------------------------------------------
# SpeechT5: Unified-Modal Encoder-Decoder Pre-Training for Spoken Language Processing (https://arxiv.org/abs/2110.07205)
# Github source: https://github.com/microsoft/SpeechT5/tree/main/SpeechT5
# Copyright (c) 2021 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Based on fairseq and espnet code bases
# https://github.com/pytorch/fairseq; https://github.com/espnet/espnet
# --------------------------------------------------------
from typing import Any, Dict, List, Optional
import torch
import torch.nn as nn
from fairseq import utils
from fairseq.distributed import fsdp_wrap
from fairseq.models import (
FairseqIncrementalDecoder,
)
from fairseq.modules import (
FairseqDropout,
LayerDropModuleList,
LayerNorm,
)
from fairseq.modules.checkpoint_activations import checkpoint_wrapper
from torch import Tensor
from .encoder import RelativePositionalEncoding
from .transformer_layer import TransformerDecoderLayer
DEFAULT_MIN_PARAMS_TO_WRAP = int(1e8)
class TransformerDecoder(FairseqIncrementalDecoder):
"""
Transformer decoder consisting of *args.decoder_layers* layers. Each layer
is a :class:`TransformerDecoderLayer`.
Args:
args (argparse.Namespace): parsed command-line arguments
dictionary (~fairseq.data.Dictionary): decoding dictionary
embed_tokens (torch.nn.Embedding): output embedding
no_encoder_attn (bool, optional): whether to attend to encoder outputs
(default: False).
"""
def __init__(
self,
args,
no_encoder_attn=False,
):
self.args = args
super().__init__(None)
self.register_buffer("version", torch.Tensor([3]))
self._future_mask = torch.empty(0)
self.dropout_module = FairseqDropout(
args.dropout, module_name=self.__class__.__name__
)
self.decoder_layerdrop = args.decoder_layerdrop
# self.max_s_positions = args.max_target_positions
export = getattr(args, "export", False)
self.cross_self_attention = getattr(args, "cross_self_attention", False)
if self.decoder_layerdrop > 0.0:
self.layers = LayerDropModuleList(p=self.decoder_layerdrop)
else:
self.layers = nn.ModuleList([])
self.layers.extend(
[
self.build_decoder_layer(args, no_encoder_attn)
for _ in range(args.decoder_layers)
]
)
self.num_layers = len(self.layers)
if args.decoder_normalize_before and not getattr(
args, "no_decoder_final_norm", False
):
self.layer_norm = LayerNorm(args.decoder_embed_dim, eps=args.layer_norm_eps, export=export)
else:
self.layer_norm = None
if args.relative_position_embedding:
self.pos_emb = RelativePositionalEncoding(args.encoder_embed_dim//args.encoder_attention_heads, args.decoder_max_relative_position)
def build_decoder_layer(self, args, no_encoder_attn=False):
layer = TransformerDecoderLayer(args, no_encoder_attn=no_encoder_attn, has_relative_attention_bias=args.relative_position_embedding)
checkpoint = getattr(args, "checkpoint_activations", False)
if checkpoint:
offload_to_cpu = getattr(args, "offload_activations", False)
layer = checkpoint_wrapper(layer, offload_to_cpu=offload_to_cpu)
# if we are checkpointing, enforce that FSDP always wraps the
# checkpointed layer, regardless of layer size
min_params_to_wrap = (
getattr(args, "min_params_to_wrap", DEFAULT_MIN_PARAMS_TO_WRAP)
if not checkpoint
else 0
)
layer = fsdp_wrap(layer, min_num_params=min_params_to_wrap)
return layer
def forward(
self,
prev_output_tokens,
tgt_mask,
encoder_out: Optional[Dict[str, List[Tensor]]] = None,
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None,
full_context_alignment: bool = False,
alignment_layer: Optional[int] = None,
alignment_heads: Optional[int] = None,
src_lengths: Optional[Any] = None,
return_all_hiddens: bool = False,
):
"""
Args:
prev_output_tokens (LongTensor): previous decoder outputs of shape
`(batch, tgt_len)`, for teacher forcing
encoder_out (optional): output from the encoder, used for
encoder-side attention, should be of size T x B x C
incremental_state (dict): dictionary used for storing state during
:ref:`Incremental decoding`
features_only (bool, optional): only return features without
applying output layer (default: False).
full_context_alignment (bool, optional): don't apply
auto-regressive mask to self-attention (default: False).
Returns:
tuple:
- the decoder's output of shape `(batch, tgt_len, vocab)`
- a dictionary with any model-specific outputs
"""
x, extra = self.extract_features(
prev_output_tokens,
tgt_mask,
encoder_out=encoder_out,
incremental_state=incremental_state,
full_context_alignment=full_context_alignment,
alignment_layer=alignment_layer,
alignment_heads=alignment_heads,
)
return x, extra
def extract_features(
self,
prev_output_tokens,
tgt_mask,
encoder_out: Optional[Dict[str, List[Tensor]]],
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None,
full_context_alignment: bool = False,
alignment_layer: Optional[int] = None,
alignment_heads: Optional[int] = None,
):
return self.extract_features_scriptable(
prev_output_tokens,
tgt_mask,
encoder_out,
incremental_state,
full_context_alignment,
alignment_layer,
alignment_heads,
)
"""
A scriptable subclass of this class has an extract_features method and calls
super().extract_features, but super() is not supported in torchscript. A copy of
this function is made to be used in the subclass instead.
"""
def extract_features_scriptable(
self,
prev_output_tokens,
tgt_mask,
encoder_out: Optional[Dict[str, List[Tensor]]],
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None,
full_context_alignment: bool = False,
alignment_layer: Optional[int] = None,
alignment_heads: Optional[int] = None,
):
"""
Similar to *forward* but only return features.
Includes several features from "Jointly Learning to Align and
Translate with Transformer Models" (Garg et al., EMNLP 2019).
Args:
full_context_alignment (bool, optional): don't apply
auto-regressive mask to self-attention (default: False).
alignment_layer (int, optional): return mean alignment over
heads at this layer (default: last layer).
alignment_heads (int, optional): only average alignment over
this many heads (default: all heads).
Returns:
tuple:
- the decoder's features of shape `(batch, tgt_len, embed_dim)`
- a dictionary with any model-specific outputs
"""
bs = prev_output_tokens.size(0)
if alignment_layer is None:
alignment_layer = self.num_layers - 1
enc: Optional[Tensor] = None
padding_mask: Optional[Tensor] = None
if encoder_out is not None and len(encoder_out["encoder_out"]) > 0:
enc = encoder_out["encoder_out"][0]
assert (
enc.size()[1] == bs
), f"Expected enc.shape == (t, {bs}, c) got {enc.shape}"
if encoder_out is not None and len(encoder_out["encoder_padding_mask"]) > 0:
padding_mask = encoder_out["encoder_padding_mask"][0]
# B x T x C -> T x B x C
x = prev_output_tokens.transpose(0, 1)
self_attn_padding_mask: Optional[Tensor] = None
if self.cross_self_attention or tgt_mask is not None:
self_attn_padding_mask = tgt_mask
## relative position embedding
if self.args.relative_position_embedding:
x_len = x.shape[0]
pos_seq = torch.arange(0, x_len).long().to(x.device)
pos_seq = pos_seq[:, None] - pos_seq[None, :]
pos_k, pos_v = self.pos_emb(pos_seq)
else:
pos_k = None
# decoder layers
attn_list = []
attn: Optional[Tensor] = None
inner_states: List[Optional[Tensor]] = [x]
for idx, layer in enumerate(self.layers):
if incremental_state is None and not full_context_alignment:
self_attn_mask = self.buffered_future_mask(x)
else:
self_attn_mask = None
x, layer_attn, _ = layer(
x,
enc,
padding_mask,
incremental_state,
self_attn_mask=self_attn_mask,
self_attn_padding_mask=self_attn_padding_mask,
need_attn=bool((idx == alignment_layer or alignment_layer == -1)),
need_head_weights=bool((idx == alignment_layer or alignment_layer == -1)),
pos_bias=pos_k,
)
inner_states.append(x)
if layer_attn is not None and (idx == alignment_layer or alignment_layer == -1):
attn = layer_attn.float().to(x)
attn_list.append(attn.transpose(0, 1))
if attn is not None and len(attn_list) == 1:
if alignment_heads is not None:
attn = attn[:alignment_heads]
# average probabilities over heads
attn = attn.mean(dim=0)
if self.layer_norm is not None:
x = self.layer_norm(x)
# T x B x C -> B x T x C
x = x.transpose(0, 1)
return x, {"attn": [attn if len(attn_list) <= 1 else attn_list], "inner_states": inner_states}
# def max_positions(self):
# """Maximum output length supported by the decoder."""
# return self.max_target_positions
def buffered_future_mask(self, tensor):
dim = tensor.size(0)
# self._future_mask.device != tensor.device is not working in TorchScript. This is a workaround.
if (
self._future_mask.size(0) == 0
or (not self._future_mask.device == tensor.device)
or self._future_mask.size(0) < dim
):
self._future_mask = torch.triu(
utils.fill_with_neg_inf(torch.zeros([dim, dim], device=tensor.device)), 1,
)
else:
self._future_mask = self._future_mask.to(tensor)
return self._future_mask[:dim, :dim]
def upgrade_state_dict_named(self, state_dict, name):
"""Upgrade a (possibly old) state dict for new versions of fairseq."""
for i in range(self.num_layers):
# update layer norms
layer_norm_map = {
"0": "self_attn_layer_norm",
"1": "encoder_attn_layer_norm",
"2": "final_layer_norm",
}
for old, new in layer_norm_map.items():
for m in ("weight", "bias"):
k = "{}.layers.{}.layer_norms.{}.{}".format(name, i, old, m)
if k in state_dict:
state_dict[
"{}.layers.{}.{}.{}".format(name, i, new, m)
] = state_dict[k]
del state_dict[k]
version_key = "{}.version".format(name)
if utils.item(state_dict.get(version_key, torch.Tensor([1]))[0]) <= 2:
# earlier checkpoints did not normalize after the stack of layers
self.layer_norm = None
self.normalize = False
state_dict[version_key] = torch.Tensor([1])
return state_dict
def set_num_updates(self, num_updates):
"""State from trainer to pass along to model at every update."""
def _apply(m):
if hasattr(m, "set_num_updates") and m != self:
m.set_num_updates(num_updates)
self.apply(_apply)
| EXA-1-master | exa/models/unilm-master/speecht5/speecht5/models/modules/decoder.py |
# --------------------------------------------------------
# SpeechT5: Unified-Modal Encoder-Decoder Pre-Training for Spoken Language Processing (https://arxiv.org/abs/2110.07205)
# Github source: https://github.com/microsoft/SpeechT5/tree/main/SpeechT5
# Copyright (c) 2021 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Based on fairseq and espnet code bases
# https://github.com/pytorch/fairseq; https://github.com/espnet/espnet
# --------------------------------------------------------
import math
from typing import Dict, Optional, Tuple
import torch
import torch.nn.functional as F
from fairseq import utils
from fairseq.incremental_decoding_utils import with_incremental_state
from fairseq.modules.fairseq_dropout import FairseqDropout
from fairseq.modules.quant_noise import quant_noise
from torch import Tensor, nn
from torch.nn import Parameter
@with_incremental_state
class MultiheadAttention(nn.Module):
"""Multi-headed attention.
See "Attention Is All You Need" for more details.
"""
def __init__(
self,
embed_dim,
num_heads,
kdim=None,
vdim=None,
dropout=0.0,
bias=True,
add_bias_kv=False,
add_zero_attn=False,
self_attention=False,
encoder_decoder_attention=False,
q_noise=0.0,
qn_block_size=8,
has_relative_attention_bias=False,
):
super().__init__()
self.embed_dim = embed_dim
self.kdim = kdim if kdim is not None else embed_dim
self.vdim = vdim if vdim is not None else embed_dim
self.qkv_same_dim = self.kdim == embed_dim and self.vdim == embed_dim
self.num_heads = num_heads
self.dropout_module = FairseqDropout(
dropout, module_name=self.__class__.__name__
)
self.has_relative_attention_bias = has_relative_attention_bias
self.head_dim = embed_dim // num_heads
assert (
self.head_dim * num_heads == self.embed_dim
), "embed_dim must be divisible by num_heads"
self.scaling = self.head_dim ** -0.5
self.self_attention = self_attention
self.encoder_decoder_attention = encoder_decoder_attention
assert not self.self_attention or self.qkv_same_dim, (
"Self-attention requires query, key and " "value to be of the same size"
)
self.k_proj = quant_noise(
nn.Linear(self.kdim, embed_dim, bias=bias), q_noise, qn_block_size
)
self.v_proj = quant_noise(
nn.Linear(self.vdim, embed_dim, bias=bias), q_noise, qn_block_size
)
self.q_proj = quant_noise(
nn.Linear(embed_dim, embed_dim, bias=bias), q_noise, qn_block_size
)
self.out_proj = quant_noise(
nn.Linear(embed_dim, embed_dim, bias=bias), q_noise, qn_block_size
)
if add_bias_kv:
self.bias_k = Parameter(torch.Tensor(1, 1, embed_dim))
self.bias_v = Parameter(torch.Tensor(1, 1, embed_dim))
else:
self.bias_k = self.bias_v = None
self.add_zero_attn = add_zero_attn
self.reset_parameters()
self.onnx_trace = False
def prepare_for_onnx_export_(self):
self.onnx_trace = True
def reset_parameters(self):
if self.qkv_same_dim:
# Empirically observed the convergence to be much better with
# the scaled initialization
nn.init.xavier_uniform_(self.k_proj.weight, gain=1 / math.sqrt(2))
nn.init.xavier_uniform_(self.v_proj.weight, gain=1 / math.sqrt(2))
nn.init.xavier_uniform_(self.q_proj.weight, gain=1 / math.sqrt(2))
else:
nn.init.xavier_uniform_(self.k_proj.weight)
nn.init.xavier_uniform_(self.v_proj.weight)
nn.init.xavier_uniform_(self.q_proj.weight)
nn.init.xavier_uniform_(self.out_proj.weight)
if self.out_proj.bias is not None:
nn.init.constant_(self.out_proj.bias, 0.0)
if self.bias_k is not None:
nn.init.xavier_normal_(self.bias_k)
if self.bias_v is not None:
nn.init.xavier_normal_(self.bias_v)
def forward(
self,
query,
key: Optional[Tensor],
value: Optional[Tensor],
key_padding_mask: Optional[Tensor] = None,
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None,
need_weights: bool = True,
static_kv: bool = False,
attn_mask: Optional[Tensor] = None,
before_softmax: bool = False,
need_head_weights: bool = False,
position_bias: Optional[Tensor] = None
) -> Tuple[Tensor, Optional[Tensor]]:
"""Input shape: Time x Batch x Channel
Args:
key_padding_mask (ByteTensor, optional): mask to exclude
keys that are pads, of shape `(batch, src_len)`, where
padding elements are indicated by 1s.
need_weights (bool, optional): return the attention weights,
averaged over heads (default: False).
attn_mask (ByteTensor, optional): typically used to
implement causal attention, where the mask prevents the
attention from looking forward in time (default: None).
before_softmax (bool, optional): return the raw attention
weights and values before the attention softmax.
need_head_weights (bool, optional): return the attention
weights for each head. Implies *need_weights*. Default:
return the average attention weights over all heads.
"""
if need_head_weights:
need_weights = True
is_tpu = query.device.type == "xla"
tgt_len, bsz, embed_dim = query.size()
src_len = tgt_len
assert embed_dim == self.embed_dim
assert list(query.size()) == [tgt_len, bsz, embed_dim]
if key is not None:
src_len, key_bsz, _ = key.size()
if not torch.jit.is_scripting():
assert key_bsz == bsz
assert value is not None
assert src_len, bsz == value.shape[:2]
if (
not self.onnx_trace
and not is_tpu # don't use PyTorch version on TPUs
and incremental_state is None
and not static_kv
# A workaround for quantization to work. Otherwise JIT compilation
# treats bias in linear module as method.
and not torch.jit.is_scripting()
and not self.has_relative_attention_bias
):
assert key is not None and value is not None
return F.multi_head_attention_forward(
query,
key,
value,
self.embed_dim,
self.num_heads,
torch.empty([0]),
torch.cat((self.q_proj.bias, self.k_proj.bias, self.v_proj.bias)),
self.bias_k,
self.bias_v,
self.add_zero_attn,
self.dropout_module.p,
self.out_proj.weight,
self.out_proj.bias,
self.training or self.dropout_module.apply_during_inference,
key_padding_mask,
need_weights,
attn_mask,
use_separate_proj_weight=True,
q_proj_weight=self.q_proj.weight,
k_proj_weight=self.k_proj.weight,
v_proj_weight=self.v_proj.weight,
)
if incremental_state is not None:
saved_state = self._get_input_buffer(incremental_state)
if saved_state is not None and "prev_key" in saved_state:
# previous time steps are cached - no need to recompute
# key and value if they are static
if static_kv:
assert self.encoder_decoder_attention and not self.self_attention
key = value = None
else:
saved_state = None
if self.self_attention:
q = self.q_proj(query)
k = self.k_proj(query)
v = self.v_proj(query)
elif self.encoder_decoder_attention:
# encoder-decoder attention
q = self.q_proj(query)
if key is None:
assert value is None
k = v = None
else:
k = self.k_proj(key)
v = self.v_proj(key)
else:
assert key is not None and value is not None
q = self.q_proj(query)
k = self.k_proj(key)
v = self.v_proj(value)
q *= self.scaling
if self.bias_k is not None:
assert self.bias_v is not None
k = torch.cat([k, self.bias_k.repeat(1, bsz, 1)])
v = torch.cat([v, self.bias_v.repeat(1, bsz, 1)])
if attn_mask is not None:
attn_mask = torch.cat(
[attn_mask, attn_mask.new_zeros(attn_mask.size(0), 1)], dim=1
)
if key_padding_mask is not None:
key_padding_mask = torch.cat(
[
key_padding_mask,
key_padding_mask.new_zeros(key_padding_mask.size(0), 1),
],
dim=1,
)
q = (
q.contiguous()
.view(tgt_len, bsz * self.num_heads, self.head_dim)
.transpose(0, 1)
)
if k is not None:
k = (
k.contiguous()
.view(-1, bsz * self.num_heads, self.head_dim)
.transpose(0, 1)
)
if v is not None:
v = (
v.contiguous()
.view(-1, bsz * self.num_heads, self.head_dim)
.transpose(0, 1)
)
if saved_state is not None:
# saved states are stored with shape (bsz, num_heads, seq_len, head_dim)
if "prev_key" in saved_state:
_prev_key = saved_state["prev_key"]
assert _prev_key is not None
prev_key = _prev_key.view(bsz * self.num_heads, -1, self.head_dim)
if static_kv:
k = prev_key
else:
assert k is not None
k = torch.cat([prev_key, k], dim=1)
src_len = k.size(1)
if "prev_value" in saved_state:
_prev_value = saved_state["prev_value"]
assert _prev_value is not None
prev_value = _prev_value.view(bsz * self.num_heads, -1, self.head_dim)
if static_kv:
v = prev_value
else:
assert v is not None
v = torch.cat([prev_value, v], dim=1)
prev_key_padding_mask: Optional[Tensor] = None
if "prev_key_padding_mask" in saved_state:
prev_key_padding_mask = saved_state["prev_key_padding_mask"]
assert k is not None and v is not None
key_padding_mask = MultiheadAttention._append_prev_key_padding_mask(
key_padding_mask=key_padding_mask,
prev_key_padding_mask=prev_key_padding_mask,
batch_size=bsz,
src_len=k.size(1),
static_kv=static_kv,
)
saved_state["prev_key"] = k.view(bsz, self.num_heads, -1, self.head_dim)
saved_state["prev_value"] = v.view(bsz, self.num_heads, -1, self.head_dim)
saved_state["prev_key_padding_mask"] = key_padding_mask
# In this branch incremental_state is never None
assert incremental_state is not None
incremental_state = self._set_input_buffer(incremental_state, saved_state)
assert k is not None
assert k.size(1) == src_len
# This is part of a workaround to get around fork/join parallelism
# not supporting Optional types.
if key_padding_mask is not None and key_padding_mask.dim() == 0:
key_padding_mask = None
if key_padding_mask is not None:
assert key_padding_mask.size(0) == bsz
assert key_padding_mask.size(1) == src_len
if self.add_zero_attn:
assert v is not None
src_len += 1
k = torch.cat([k, k.new_zeros((k.size(0), 1) + k.size()[2:])], dim=1)
v = torch.cat([v, v.new_zeros((v.size(0), 1) + v.size()[2:])], dim=1)
if attn_mask is not None:
attn_mask = torch.cat(
[attn_mask, attn_mask.new_zeros(attn_mask.size(0), 1)], dim=1
)
if key_padding_mask is not None:
key_padding_mask = torch.cat(
[
key_padding_mask,
torch.zeros(key_padding_mask.size(0), 1).type_as(
key_padding_mask
),
],
dim=1,
)
attn_weights = torch.bmm(q, k.transpose(1, 2))
attn_weights = self.apply_sparse_mask(attn_weights, tgt_len, src_len, bsz)
if position_bias is not None and self.has_relative_attention_bias: ## first order
## position_bias: [241, 241, 64]
#print ("attn_weights: ", attn_weights.size()) # [492, 241, 241]
reshape_q = q.contiguous().view(bsz * self.num_heads, -1, self.head_dim).transpose(0,1) #[241, 492, 64]
#print ("reshape_q: ", reshape_q.size())
B = torch.matmul(reshape_q, position_bias.transpose(-2, -1))
#print ("B: ", B.size()) ## [241, 492, 241]
#B = B.transpose(0, 1).view(bsz, self.num_heads, position_bias.size(0), position_bias.size(1))
B = B.transpose(0, 1).view(bsz*self.num_heads, position_bias.size(0), position_bias.size(1))
#print ("B 2: ", B.size())
attn_weights += B
else:
position_bias = None
assert list(attn_weights.size()) == [bsz * self.num_heads, tgt_len, src_len]
if attn_mask is not None:
attn_mask = attn_mask.unsqueeze(0)
if self.onnx_trace:
attn_mask = attn_mask.repeat(attn_weights.size(0), 1, 1)
attn_weights += attn_mask
if key_padding_mask is not None:
# don't attend to padding symbols
attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
if not is_tpu:
attn_weights = attn_weights.masked_fill(
key_padding_mask.unsqueeze(1).unsqueeze(2).to(torch.bool),
float("-inf"),
)
else:
attn_weights = attn_weights.transpose(0, 2)
attn_weights = attn_weights.masked_fill(key_padding_mask, float("-inf"))
attn_weights = attn_weights.transpose(0, 2)
attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
if before_softmax:
return attn_weights, v
attn_weights_float = utils.softmax(
attn_weights, dim=-1, onnx_trace=self.onnx_trace
)
attn_weights = attn_weights_float.type_as(attn_weights)
attn_probs = self.dropout_module(attn_weights)
assert v is not None
attn = torch.bmm(attn_probs, v)
assert list(attn.size()) == [bsz * self.num_heads, tgt_len, self.head_dim]
if self.onnx_trace and attn.size(1) == 1:
# when ONNX tracing a single decoder step (sequence length == 1)
# the transpose is a no-op copy before view, thus unnecessary
attn = attn.contiguous().view(tgt_len, bsz, embed_dim)
else:
attn = attn.transpose(0, 1).contiguous().view(tgt_len, bsz, embed_dim)
attn = self.out_proj(attn)
attn_weights: Optional[Tensor] = None
if need_weights:
attn_weights = attn_weights_float.view(
bsz, self.num_heads, tgt_len, src_len
).transpose(1, 0)
if not need_head_weights:
# average attention weights over heads
attn_weights = attn_weights.mean(dim=0)
return attn, attn_weights
@staticmethod
def _append_prev_key_padding_mask(
key_padding_mask: Optional[Tensor],
prev_key_padding_mask: Optional[Tensor],
batch_size: int,
src_len: int,
static_kv: bool,
) -> Optional[Tensor]:
# saved key padding masks have shape (bsz, seq_len)
if prev_key_padding_mask is not None and static_kv:
new_key_padding_mask = prev_key_padding_mask
elif prev_key_padding_mask is not None and key_padding_mask is not None:
new_key_padding_mask = torch.cat(
[prev_key_padding_mask.float(), key_padding_mask.float()], dim=1
)
# During incremental decoding, as the padding token enters and
# leaves the frame, there will be a time when prev or current
# is None
elif prev_key_padding_mask is not None:
if src_len > prev_key_padding_mask.size(1):
filler = torch.zeros(
(batch_size, src_len - prev_key_padding_mask.size(1)),
device=prev_key_padding_mask.device,
)
new_key_padding_mask = torch.cat(
[prev_key_padding_mask.float(), filler.float()], dim=1
)
else:
new_key_padding_mask = prev_key_padding_mask.float()
elif key_padding_mask is not None:
if src_len > key_padding_mask.size(1):
filler = torch.zeros(
(batch_size, src_len - key_padding_mask.size(1)),
device=key_padding_mask.device,
)
new_key_padding_mask = torch.cat(
[filler.float(), key_padding_mask.float()], dim=1
)
else:
new_key_padding_mask = key_padding_mask.float()
else:
new_key_padding_mask = prev_key_padding_mask
return new_key_padding_mask
@torch.jit.export
def reorder_incremental_state(
self,
incremental_state: Dict[str, Dict[str, Optional[Tensor]]],
new_order: Tensor,
):
"""Reorder buffered internal state (for incremental generation)."""
input_buffer = self._get_input_buffer(incremental_state)
if input_buffer is not None:
for k in input_buffer.keys():
input_buffer_k = input_buffer[k]
if input_buffer_k is not None:
if self.encoder_decoder_attention and input_buffer_k.size(
0
) == new_order.size(0):
break
input_buffer[k] = input_buffer_k.index_select(0, new_order)
incremental_state = self._set_input_buffer(incremental_state, input_buffer)
return incremental_state
def _get_input_buffer(
self, incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]]
) -> Dict[str, Optional[Tensor]]:
result = self.get_incremental_state(incremental_state, "attn_state")
if result is not None:
return result
else:
empty_result: Dict[str, Optional[Tensor]] = {}
return empty_result
def _set_input_buffer(
self,
incremental_state: Dict[str, Dict[str, Optional[Tensor]]],
buffer: Dict[str, Optional[Tensor]],
):
return self.set_incremental_state(incremental_state, "attn_state", buffer)
def apply_sparse_mask(self, attn_weights, tgt_len: int, src_len: int, bsz: int):
return attn_weights
def upgrade_state_dict_named(self, state_dict, name):
prefix = name + "." if name != "" else ""
items_to_add = {}
keys_to_remove = []
for k in state_dict.keys():
if k.endswith(prefix + "in_proj_weight"):
# in_proj_weight used to be q + k + v with same dimensions
dim = int(state_dict[k].shape[0] / 3)
items_to_add[prefix + "q_proj.weight"] = state_dict[k][:dim]
items_to_add[prefix + "k_proj.weight"] = state_dict[k][dim : 2 * dim]
items_to_add[prefix + "v_proj.weight"] = state_dict[k][2 * dim :]
keys_to_remove.append(k)
k_bias = prefix + "in_proj_bias"
if k_bias in state_dict.keys():
dim = int(state_dict[k].shape[0] / 3)
items_to_add[prefix + "q_proj.bias"] = state_dict[k_bias][:dim]
items_to_add[prefix + "k_proj.bias"] = state_dict[k_bias][
dim : 2 * dim
]
items_to_add[prefix + "v_proj.bias"] = state_dict[k_bias][2 * dim :]
keys_to_remove.append(prefix + "in_proj_bias")
for k in keys_to_remove:
del state_dict[k]
for key, value in items_to_add.items():
state_dict[key] = value
| EXA-1-master | exa/models/unilm-master/speecht5/speecht5/models/modules/multihead_attention.py |
# --------------------------------------------------------
# SpeechT5: Unified-Modal Encoder-Decoder Pre-Training for Spoken Language Processing (https://arxiv.org/abs/2110.07205)
# Github source: https://github.com/microsoft/SpeechT5/tree/main/SpeechT5
# Copyright (c) 2021 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Based on fairseq and espnet code bases
# https://github.com/pytorch/fairseq; https://github.com/espnet/espnet
# --------------------------------------------------------
import torch.nn as nn
import math
import torch
import torch.nn.functional as F
class AngularMargin(nn.Module):
"""
An implementation of Angular Margin (AM) proposed in the following
paper: '''Margin Matters: Towards More Discriminative Deep Neural Network
Embeddings for Speaker Recognition''' (https://arxiv.org/abs/1906.07317)
Arguments
---------
margin : float
The margin for cosine similiarity
scale : float
The scale for cosine similiarity
Return
---------
predictions : torch.Tensor
Example
-------
>>> pred = AngularMargin()
>>> outputs = torch.tensor([ [1., -1.], [-1., 1.], [0.9, 0.1], [0.1, 0.9] ])
>>> targets = torch.tensor([ [1., 0.], [0., 1.], [ 1., 0.], [0., 1.] ])
>>> predictions = pred(outputs, targets)
>>> predictions[:,0] > predictions[:,1]
tensor([ True, False, True, False])
"""
def __init__(self, margin=0.0, scale=1.0):
super(AngularMargin, self).__init__()
self.margin = margin
self.scale = scale
def forward(self, outputs, targets):
"""Compute AM between two tensors
Arguments
---------
outputs : torch.Tensor
The outputs of shape [N, C], cosine similarity is required.
targets : torch.Tensor
The targets of shape [N, C], where the margin is applied for.
Return
---------
predictions : torch.Tensor
"""
outputs = outputs - self.margin * targets
return self.scale * outputs
class AdditiveAngularMargin(AngularMargin):
"""
An implementation of Additive Angular Margin (AAM) proposed
in the following paper: '''Margin Matters: Towards More Discriminative Deep
Neural Network Embeddings for Speaker Recognition'''
(https://arxiv.org/abs/1906.07317)
Arguments
---------
margin : float
The margin for cosine similiarity, usually 0.2.
scale: float
The scale for cosine similiarity, usually 30.
Returns
-------
predictions : torch.Tensor
Tensor.
Example
-------
>>> outputs = torch.tensor([ [1., -1.], [-1., 1.], [0.9, 0.1], [0.1, 0.9] ])
>>> targets = torch.tensor([ [1., 0.], [0., 1.], [ 1., 0.], [0., 1.] ])
>>> pred = AdditiveAngularMargin()
>>> predictions = pred(outputs, targets)
>>> predictions[:,0] > predictions[:,1]
tensor([ True, False, True, False])
"""
def __init__(self, margin=0.0, scale=1.0, easy_margin=False):
super(AdditiveAngularMargin, self).__init__(margin, scale)
self.easy_margin = easy_margin
self.cos_m = math.cos(self.margin)
self.sin_m = math.sin(self.margin)
self.th = math.cos(math.pi - self.margin)
self.mm = math.sin(math.pi - self.margin) * self.margin
def forward(self, outputs, targets):
"""
Compute AAM between two tensors
Arguments
---------
outputs : torch.Tensor
The outputs of shape [N, C], cosine similarity is required.
targets : torch.Tensor
The targets of shape [N, C], where the margin is applied for.
Return
---------
predictions : torch.Tensor
"""
cosine = outputs.float()
sine = torch.sqrt((1.0 - torch.pow(cosine, 2)).clamp(0, 1))
phi = cosine * self.cos_m - sine * self.sin_m # cos(theta + m)
if self.easy_margin:
phi = torch.where(cosine > 0, phi, cosine)
else:
phi = torch.where(cosine > self.th, phi, cosine - self.mm)
outputs = (targets * phi) + ((1.0 - targets) * cosine)
return self.scale * outputs
class SpeakerDecoderPostnet(nn.Module):
"""Speaker Identification Postnet.
Arguments
---------
embed_dim : int
The size of embedding.
class_num: int
The number of classes.
args : Namespace
Return
---------
embed : torch.Tensor
output : torch.Tensor
"""
def __init__(self, embed_dim, class_num, args):
super(SpeakerDecoderPostnet, self).__init__()
self.embed_dim = embed_dim
self.class_num = class_num
self.no_pooling_bn = getattr(args, "sid_no_pooling_bn", False)
self.no_embed_postnet = getattr(args, "sid_no_embed_postnet", False)
self.normalize_postnet = getattr(args, "sid_normalize_postnet", False)
self.softmax_head = getattr(args, "sid_softmax_type", "softmax")
if not self.no_pooling_bn:
self.bn_pooling = nn.BatchNorm1d(args.decoder_output_dim)
else:
self.bn_pooling = None
if not self.no_embed_postnet:
self.output_embedding = nn.Linear(args.decoder_output_dim, embed_dim, bias=False)
self.bn_embedding = nn.BatchNorm1d(embed_dim)
else:
self.output_embedding = None
self.bn_embedding = None
self.embed_dim = args.decoder_output_dim
self.output_projection = nn.Linear(self.embed_dim, class_num, bias=False)
if self.softmax_head == "amsoftmax":
self.output_layer = AngularMargin(args.softmax_margin, args.softmax_scale)
elif self.softmax_head == "aamsoftmax":
self.output_layer = AdditiveAngularMargin(args.softmax_margin, args.softmax_scale, args.softmax_easy_margin)
else:
self.output_layer = None
if self.output_embedding is not None:
nn.init.normal_(self.output_embedding.weight, mean=0, std=embed_dim ** -0.5)
nn.init.normal_(self.output_projection.weight, mean=0, std=class_num ** -0.5)
def forward(self, x, target=None):
"""
Parameters
----------
x : torch.Tensor of shape [batch, channel] or [batch, time, channel]
target : torch.Tensor of shape [batch, channel]
"""
if self.bn_pooling is not None:
x = self.bn_pooling(x)
if self.output_embedding is not None and self.bn_embedding is not None:
embed = self.bn_embedding(self.output_embedding(x))
else:
embed = x
if self.output_layer is not None or self.normalize_postnet:
x_norm = F.normalize(embed, p=2, dim=1)
w_norm = F.normalize(self.output_projection.weight, p=2, dim=1) # [out_dim, in_dim]
output = F.linear(x_norm, w_norm)
if self.training and target is not None and self.output_layer is not None:
output = self.output_layer(output, target)
else:
output = self.output_projection(embed)
return output, embed
| EXA-1-master | exa/models/unilm-master/speecht5/speecht5/models/modules/speaker_decoder_postnet.py |
# --------------------------------------------------------
# SpeechT5: Unified-Modal Encoder-Decoder Pre-Training for Spoken Language Processing (https://arxiv.org/abs/2110.07205)
# Github source: https://github.com/microsoft/SpeechT5/tree/main/SpeechT5
# Copyright (c) 2021 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Based on fairseq and espnet code bases
# https://github.com/pytorch/fairseq; https://github.com/espnet/espnet
# --------------------------------------------------------
import logging
import torch.nn as nn
import torch
logger = logging.getLogger(__name__)
class SpeechEncoderPostnet(nn.Module):
"""
Args:
in_channels (int): the number of input channels
mid_channels (int): the number of intermediate channels
out_channels (int): the number of output channels
kernel_sizes (List[int]): the kernel size for each convolutional layer
"""
def __init__(self, dictionaries, args):
super(SpeechEncoderPostnet, self).__init__()
# modules below are not needed during fine-tuning
self.target_glu = args.target_glu
self.skip_masked = args.skip_masked
self.skip_nomask = args.skip_nomask
self.logit_temp = args.logit_temp
final_dim = (
args.final_dim if args.final_dim > 0 else args.encoder_embed_dim
)
if any([d is None for d in dictionaries]):
logger.info(
"cannot find dictionary. assume will be used for fine-tuning"
)
else:
self.num_classes = [len(d) for d in dictionaries]
self.label_embs_concat = nn.Parameter(
torch.FloatTensor(sum(self.num_classes), final_dim)
)
nn.init.uniform_(self.label_embs_concat)
self.untie_final_proj = args.untie_final_proj
if self.untie_final_proj:
self.final_proj = nn.Linear(
args.encoder_embed_dim, final_dim * len(dictionaries)
)
else:
self.final_proj = nn.Linear(args.encoder_embed_dim, final_dim)
def compute_nce(self, x, pos, negs):
neg_is_pos = (pos == negs).all(-1)
pos = pos.unsqueeze(0)
targets = torch.cat([pos, negs], dim=0)
logits = torch.cosine_similarity(
x.float(), targets.float(), dim=-1
).type_as(x)
logits /= self.logit_temp
if neg_is_pos.any():
logits[1:][neg_is_pos] = float("-inf")
logits = logits.transpose(0, 1) # (num_x, num_cls+1)
return logits
def forward(self, x, padding_mask, mask_indices, target_list):
def compute_pred(proj_x, target, label_embs):
# compute logits for the i-th label set
y = torch.index_select(label_embs, 0, target.long())
negs = label_embs.unsqueeze(1).expand(-1, proj_x.size(0), -1)
if self.target_glu:
y = self.target_glu(y)
negs = self.target_glu(negs)
# proj_x: (S, D)
# y: (S, D)
# negs: (Neg, S, D)
return self.compute_nce(proj_x, y, negs)
label_embs_list = self.label_embs_concat.split(self.num_classes, 0)
if not self.skip_masked:
masked_indices = torch.logical_and(~padding_mask, mask_indices)
proj_x_m = self.final_proj(x[masked_indices])
if self.untie_final_proj:
proj_x_m_list = proj_x_m.chunk(len(target_list), dim=-1)
else:
proj_x_m_list = [proj_x_m for _ in range(len(target_list))]
logit_m_list = [
compute_pred(proj_x_m, t[masked_indices], label_embs_list[i])
for i, (proj_x_m, t) in enumerate(
zip(proj_x_m_list, target_list)
)
]
else:
logit_m_list = [None for _ in target_list]
if not self.skip_nomask:
nomask_indices = torch.logical_and(~padding_mask, ~mask_indices)
proj_x_u = self.final_proj(x[nomask_indices])
if self.untie_final_proj:
proj_x_u_list = proj_x_u.chunk(len(target_list), dim=-1)
else:
proj_x_u_list = [proj_x_u for _ in range(len(target_list))]
logit_u_list = [
compute_pred(proj_x_u, t[nomask_indices], label_embs_list[i])
for i, (proj_x_u, t) in enumerate(
zip(proj_x_u_list, target_list)
)
]
else:
logit_u_list = [None for _ in target_list]
result = {
"logit_m_list": logit_m_list,
"logit_u_list": logit_u_list,
"padding_mask": padding_mask,
}
return result
| EXA-1-master | exa/models/unilm-master/speecht5/speecht5/models/modules/speech_encoder_postnet.py |
EXA-1-master | exa/models/unilm-master/speecht5/speecht5/models/modules/__init__.py |
|
# --------------------------------------------------------
# SpeechT5: Unified-Modal Encoder-Decoder Pre-Training for Spoken Language Processing (https://arxiv.org/abs/2110.07205)
# Github source: https://github.com/microsoft/SpeechT5/tree/main/SpeechT5
# Copyright (c) 2021 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Based on fairseq and espnet code bases
# https://github.com/pytorch/fairseq; https://github.com/espnet/espnet
# --------------------------------------------------------
from typing import Dict, List, Optional
import torch
import torch.nn as nn
import contextlib
from fairseq import utils
from fairseq.modules import LayerNorm
from .multihead_attention import MultiheadAttention
from fairseq.modules.fairseq_dropout import FairseqDropout
from fairseq.modules.quant_noise import quant_noise
from torch import Tensor
class TransformerSentenceEncoderLayer(nn.Module):
"""
Implements a Transformer Encoder Layer used in BERT/XLM style pre-trained
models.
"""
def __init__(
self,
embedding_dim: float = 768,
ffn_embedding_dim: float = 3072,
num_attention_heads: float = 8,
dropout: float = 0.1,
attention_dropout: float = 0.1,
activation_dropout: float = 0.1,
activation_fn: str = "relu",
layer_norm_first: bool = False,
has_relative_attention_bias: bool = False,
) -> None:
super().__init__()
# Initialize parameters
self.embedding_dim = embedding_dim
self.dropout = dropout
self.activation_dropout = activation_dropout
# Initialize blocks
self.activation_fn = utils.get_activation_fn(activation_fn)
self.self_attn = MultiheadAttention(
self.embedding_dim,
num_attention_heads,
dropout=attention_dropout,
self_attention=True,
has_relative_attention_bias=has_relative_attention_bias,
)
self.dropout1 = nn.Dropout(dropout)
self.dropout2 = nn.Dropout(self.activation_dropout)
self.dropout3 = nn.Dropout(dropout)
self.layer_norm_first = layer_norm_first
# layer norm associated with the self attention layer
self.self_attn_layer_norm = LayerNorm(self.embedding_dim)
self.fc1 = nn.Linear(self.embedding_dim, ffn_embedding_dim)
self.fc2 = nn.Linear(ffn_embedding_dim, self.embedding_dim)
# layer norm associated with the position wise feed-forward NN
self.final_layer_norm = LayerNorm(self.embedding_dim)
if has_relative_attention_bias:
self.norm_k = LayerNorm(self.embedding_dim//num_attention_heads)
def forward(
self,
x: torch.Tensor,
self_attn_mask: torch.Tensor = None,
self_attn_padding_mask: torch.Tensor = None,
need_weights: bool = False,
att_args=None,
pos_bias=None,
):
"""
LayerNorm is applied either before or after the self-attention/ffn
modules similar to the original Transformer imlementation.
"""
residual = x
if self.layer_norm_first:
x = self.self_attn_layer_norm(x)
if pos_bias is not None:
pos_bias = self.norm_k(pos_bias)
x, attn = self.self_attn(
query=x,
key=x,
value=x,
key_padding_mask=self_attn_padding_mask,
attn_mask=self_attn_mask,
position_bias=pos_bias,
)
x = self.dropout1(x)
x = residual + x
residual = x
x = self.final_layer_norm(x)
x = self.activation_fn(self.fc1(x))
x = self.dropout2(x)
x = self.fc2(x)
x = self.dropout3(x)
x = residual + x
else:
x, attn = self.self_attn(
query=x,
key=x,
value=x,
key_padding_mask=self_attn_padding_mask,
position_bias=pos_bias,
)
x = self.dropout1(x)
x = residual + x
x = self.self_attn_layer_norm(x)
residual = x
x = self.activation_fn(self.fc1(x))
x = self.dropout2(x)
x = self.fc2(x)
x = self.dropout3(x)
x = residual + x
x = self.final_layer_norm(x)
return x, attn
class TransformerDecoderLayer(nn.Module):
"""Decoder layer block.
In the original paper each operation (multi-head attention, encoder
attention or FFN) is postprocessed with: `dropout -> add residual ->
layernorm`. In the tensor2tensor code they suggest that learning is more
robust when preprocessing each layer with layernorm and postprocessing with:
`dropout -> add residual`. We default to the approach in the paper, but the
tensor2tensor approach can be enabled by setting
*args.decoder_normalize_before* to ``True``.
Args:
args (argparse.Namespace): parsed command-line arguments
no_encoder_attn (bool, optional): whether to attend to encoder outputs
(default: False).
"""
def __init__(
self, args, no_encoder_attn=False, add_bias_kv=False, add_zero_attn=False, has_relative_attention_bias=False
):
super().__init__()
self.embed_dim = args.decoder_embed_dim
self.num_updates = 0
self.dropout_module = FairseqDropout(
args.dropout, module_name=self.__class__.__name__
)
self.quant_noise = getattr(args, "quant_noise_pq", 0)
self.quant_noise_block_size = getattr(args, "quant_noise_pq_block_size", 8)
self.cross_self_attention = getattr(args, "cross_self_attention", False)
self.freeze_decoder_updates = getattr(args, "freeze_decoder_updates", 0)
self.self_attn = self.build_self_attention(
self.embed_dim,
args,
add_bias_kv=add_bias_kv,
add_zero_attn=add_zero_attn,
)
self.activation_fn = utils.get_activation_fn(
activation=str(args.activation_fn)
if getattr(args, "activation_fn", None) is not None
else "relu"
)
activation_dropout_p = getattr(args, "activation_dropout", 0) or 0
if activation_dropout_p == 0:
# for backwards compatibility with models that use args.relu_dropout
activation_dropout_p = getattr(args, "relu_dropout", 0) or 0
self.activation_dropout_module = FairseqDropout(
float(activation_dropout_p), module_name=self.__class__.__name__
)
self.normalize_before = args.decoder_normalize_before
export = getattr(args, "export", False)
self.self_attn_layer_norm = LayerNorm(self.embed_dim, export=export)
if no_encoder_attn:
self.encoder_attn = None
self.encoder_attn_layer_norm = None
else:
self.encoder_attn = self.build_encoder_attention(self.embed_dim, args)
self.encoder_attn_layer_norm = LayerNorm(self.embed_dim, export=export)
self.fc1 = self.build_fc1(
self.embed_dim,
args.decoder_ffn_embed_dim,
self.quant_noise,
self.quant_noise_block_size,
)
self.fc2 = self.build_fc2(
args.decoder_ffn_embed_dim,
self.embed_dim,
self.quant_noise,
self.quant_noise_block_size,
)
self.final_layer_norm = LayerNorm(self.embed_dim, export=export)
self.need_attn = True
self.onnx_trace = False
self.has_relative_attention_bias = has_relative_attention_bias
if self.has_relative_attention_bias:
self.norm_k = LayerNorm(self.embed_dim//args.decoder_attention_heads)
def build_fc1(self, input_dim, output_dim, q_noise, qn_block_size):
return quant_noise(nn.Linear(input_dim, output_dim), q_noise, qn_block_size)
def build_fc2(self, input_dim, output_dim, q_noise, qn_block_size):
return quant_noise(nn.Linear(input_dim, output_dim), q_noise, qn_block_size)
def build_self_attention(
self, embed_dim, args, add_bias_kv=False, add_zero_attn=False
):
return MultiheadAttention(
embed_dim,
args.decoder_attention_heads,
dropout=args.attention_dropout,
add_bias_kv=add_bias_kv,
add_zero_attn=add_zero_attn,
self_attention=not getattr(args, "cross_self_attention", False),
q_noise=self.quant_noise,
qn_block_size=self.quant_noise_block_size,
#has_relative_attention_bias=args.has_relative_attention_bias,
)
def build_encoder_attention(self, embed_dim, args):
return MultiheadAttention(
embed_dim,
args.decoder_attention_heads,
kdim=getattr(args, "encoder_embed_dim", None),
vdim=getattr(args, "encoder_embed_dim", None),
dropout=args.attention_dropout,
encoder_decoder_attention=True,
q_noise=self.quant_noise,
qn_block_size=self.quant_noise_block_size,
)
def prepare_for_onnx_export_(self):
self.onnx_trace = True
def residual_connection(self, x, residual):
return residual + x
def forward(
self,
x,
encoder_out: Optional[torch.Tensor] = None,
encoder_padding_mask: Optional[torch.Tensor] = None,
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None,
prev_self_attn_state: Optional[List[torch.Tensor]] = None,
prev_attn_state: Optional[List[torch.Tensor]] = None,
self_attn_mask: Optional[torch.Tensor] = None,
self_attn_padding_mask: Optional[torch.Tensor] = None,
need_attn: bool = False,
need_head_weights: bool = False,
pos_bias=None,
):
"""
Args:
x (Tensor): input to the layer of shape `(seq_len, batch, embed_dim)`
encoder_padding_mask (ByteTensor, optional): binary
ByteTensor of shape `(batch, src_len)` where padding
elements are indicated by ``1``.
need_attn (bool, optional): return attention weights
need_head_weights (bool, optional): return attention weights
for each head (default: return average over heads).
Returns:
encoded output of shape `(seq_len, batch, embed_dim)`
"""
ft = self.freeze_decoder_updates <= self.num_updates
with torch.no_grad() if not ft else contextlib.ExitStack():
if need_head_weights:
need_attn = True
residual = x
if self.normalize_before:
x = self.self_attn_layer_norm(x)
if pos_bias is not None:
pos_bias = self.norm_k(pos_bias)
if prev_self_attn_state is not None:
prev_key, prev_value = prev_self_attn_state[:2]
saved_state: Dict[str, Optional[Tensor]] = {
"prev_key": prev_key,
"prev_value": prev_value,
}
if len(prev_self_attn_state) >= 3:
saved_state["prev_key_padding_mask"] = prev_self_attn_state[2]
assert incremental_state is not None
self.self_attn._set_input_buffer(incremental_state, saved_state)
_self_attn_input_buffer = self.self_attn._get_input_buffer(incremental_state)
if self.cross_self_attention and not (
incremental_state is not None
and _self_attn_input_buffer is not None
and "prev_key" in _self_attn_input_buffer
):
if self_attn_mask is not None:
assert encoder_out is not None
self_attn_mask = torch.cat(
(x.new_zeros(x.size(0), encoder_out.size(0)), self_attn_mask), dim=1
)
if self_attn_padding_mask is not None:
if encoder_padding_mask is None:
assert encoder_out is not None
encoder_padding_mask = self_attn_padding_mask.new_zeros(
encoder_out.size(1), encoder_out.size(0)
)
self_attn_padding_mask = torch.cat(
(encoder_padding_mask, self_attn_padding_mask), dim=1
)
assert encoder_out is not None
y = torch.cat((encoder_out, x), dim=0)
else:
y = x
x, attn = self.self_attn(
query=x,
key=y,
value=y,
key_padding_mask=self_attn_padding_mask,
incremental_state=incremental_state,
need_weights=False,
attn_mask=self_attn_mask,
position_bias=pos_bias,
)
x = self.dropout_module(x)
x = self.residual_connection(x, residual)
if not self.normalize_before:
x = self.self_attn_layer_norm(x)
if self.encoder_attn is not None and encoder_out is not None:
residual = x
if self.normalize_before:
x = self.encoder_attn_layer_norm(x)
if prev_attn_state is not None:
prev_key, prev_value = prev_attn_state[:2]
saved_state: Dict[str, Optional[Tensor]] = {
"prev_key": prev_key,
"prev_value": prev_value,
}
if len(prev_attn_state) >= 3:
saved_state["prev_key_padding_mask"] = prev_attn_state[2]
assert incremental_state is not None
self.encoder_attn._set_input_buffer(incremental_state, saved_state)
x, attn = self.encoder_attn(
query=x,
key=encoder_out,
value=encoder_out,
key_padding_mask=encoder_padding_mask,
incremental_state=incremental_state,
static_kv=True,
need_weights=need_attn or (not self.training and self.need_attn),
need_head_weights=need_head_weights,
)
x = self.dropout_module(x)
x = self.residual_connection(x, residual)
if not self.normalize_before:
x = self.encoder_attn_layer_norm(x)
with torch.no_grad() if not ft else contextlib.ExitStack():
residual = x
if self.normalize_before:
x = self.final_layer_norm(x)
x = self.activation_fn(self.fc1(x))
x = self.activation_dropout_module(x)
x = self.fc2(x)
x = self.dropout_module(x)
x = self.residual_connection(x, residual)
if not self.normalize_before:
x = self.final_layer_norm(x)
if self.onnx_trace and incremental_state is not None:
saved_state = self.self_attn._get_input_buffer(incremental_state)
assert saved_state is not None
if self_attn_padding_mask is not None:
self_attn_state = [
saved_state["prev_key"],
saved_state["prev_value"],
saved_state["prev_key_padding_mask"],
]
else:
self_attn_state = [saved_state["prev_key"], saved_state["prev_value"]]
return x, attn, self_attn_state
return x, attn, None
def make_generation_fast_(self, need_attn: bool = False, **kwargs):
self.need_attn = need_attn
def set_num_updates(self, num_updates):
"""Set the number of parameters updates."""
self.num_updates = num_updates
| EXA-1-master | exa/models/unilm-master/speecht5/speecht5/models/modules/transformer_layer.py |
# --------------------------------------------------------
# SpeechT5: Unified-Modal Encoder-Decoder Pre-Training for Spoken Language Processing (https://arxiv.org/abs/2110.07205)
# Github source: https://github.com/microsoft/SpeechT5/tree/main/SpeechT5
# Copyright (c) 2021 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Based on fairseq and espnet code bases
# https://github.com/pytorch/fairseq; https://github.com/espnet/espnet
# --------------------------------------------------------
from typing import Dict, List
import numpy as np
import torch
import torch.nn as nn
import contextlib
from fairseq import utils
from fairseq.models import (
FairseqEncoder,
)
from fairseq.modules import (
FairseqDropout,
LayerNorm,
TransformerEncoderLayer,
)
from torch import Tensor
from .transformer_layer import TransformerSentenceEncoderLayer
DEFAULT_MIN_PARAMS_TO_WRAP = int(1e8)
def Linear(in_features, out_features, bias=True):
m = nn.Linear(in_features, out_features, bias)
nn.init.xavier_uniform_(m.weight)
if bias:
nn.init.constant_(m.bias, 0.0)
return m
class RelativePositionalEncoding(torch.nn.Module):
def __init__(self, d_model, maxlen=1000, embed_v=False):
super(RelativePositionalEncoding, self).__init__()
self.d_model = d_model
self.maxlen = maxlen
self.pe_k = torch.nn.Embedding(2*maxlen, d_model)
if embed_v:
self.pe_v = torch.nn.Embedding(2*maxlen, d_model)
self.embed_v = embed_v
def forward(self, pos_seq):
pos_seq[pos_seq < -self.maxlen] = -self.maxlen
pos_seq[pos_seq >= self.maxlen] = self.maxlen - 1
pos_seq = pos_seq + self.maxlen
if self.embed_v:
return self.pe_k(pos_seq), self.pe_v(pos_seq)
else:
return self.pe_k(pos_seq), None
class TransformerEncoder(FairseqEncoder):
"""
Transformer encoder consisting of *args.encoder_layers* layers. Each layer
is a :class:`TransformerEncoderLayer`.
Args:
args (argparse.Namespace): parsed command-line arguments
dictionary (~fairseq.data.Dictionary): encoding dictionary
embed_tokens (torch.nn.Embedding): input embedding
"""
def __init__(self, args, tgt_dict=None, embed_tokens=None):
self.args = args
super().__init__(None)
self.register_buffer("version", torch.Tensor([3]))
self.dropout_module = FairseqDropout(
args.dropout, module_name=self.__class__.__name__
)
self.encoder_layerdrop = args.encoder_layerdrop
self.freeze_encoder_updates = args.freeze_encoder_updates
if args.no_freeze_encoder_layer is not None:
self.no_freeze_encoder_layer = eval(args.no_freeze_encoder_layer)
else:
self.no_freeze_encoder_layer = None
self.num_updates = 0
export = getattr(args, "export", False)
self.layers = nn.ModuleList([])
self.layers.extend(
[self.build_encoder_layer(args) for i in range(args.encoder_layers)]
)
self.num_layers = len(self.layers)
self.use_sent_enc_layer = args.use_sent_enc_layer
self.unb_enc_layer = getattr(args, "unb_enc_layer", -1)
self.layer_norm_first = args.layer_norm_first
self.layer_norm = LayerNorm(args.encoder_embed_dim, eps=args.layer_norm_eps, export=export)
if args.share_ctc_embed and embed_tokens is not None:
self.proj = nn.Linear(
embed_tokens.weight.shape[1],
embed_tokens.weight.shape[0],
bias=False,
)
self.proj.weight = embed_tokens.weight
elif tgt_dict is not None:
self.proj = Linear(args.encoder_embed_dim, len(tgt_dict))
else:
self.proj = None
if args.relative_position_embedding:
self.pos_emb = RelativePositionalEncoding(args.encoder_embed_dim//args.encoder_attention_heads, args.encoder_max_relative_position)
def build_encoder_layer(self, args):
if args.use_sent_enc_layer:
layer = TransformerSentenceEncoderLayer(
embedding_dim=args.encoder_embed_dim,
ffn_embedding_dim=args.encoder_ffn_embed_dim,
num_attention_heads=args.encoder_attention_heads,
dropout=args.dropout,
attention_dropout=args.attention_dropout,
activation_dropout=args.activation_dropout,
activation_fn=args.activation_fn,
layer_norm_first=args.layer_norm_first,
has_relative_attention_bias=args.relative_position_embedding,
)
else:
layer = TransformerEncoderLayer(args)
return layer
def forward(
self,
encoder_in,
encoder_padding_mask,
return_all_hiddens: bool = False,
tgt_layer=None,
):
"""
Args:
src_tokens (LongTensor): tokens in the source language of shape
`(batch, src_len)`
src_lengths (torch.LongTensor): lengths of each source sentence of
shape `(batch)`
return_all_hiddens (bool, optional): also return all of the
intermediate hidden states (default: False).
token_embeddings (torch.Tensor, optional): precomputed embeddings
default `None` will recompute embeddings
Returns:
dict:
- **encoder_out** (Tensor): the last encoder layer's output of
shape `(src_len, batch, embed_dim)`
- **encoder_padding_mask** (ByteTensor): the positions of
padding elements of shape `(batch, src_len)`
- **encoder_embedding** (Tensor): the (scaled) embedding lookup
of shape `(batch, src_len, embed_dim)`
- **encoder_states** (List[Tensor]): all intermediate
hidden states of shape `(src_len, batch, embed_dim)`.
Only populated if *return_all_hiddens* is True.
"""
if self.no_freeze_encoder_layer is None:
ft = self.freeze_encoder_updates <= self.num_updates
else:
ft = True
with torch.no_grad() if not ft else contextlib.ExitStack():
encoder_out = self.forward_scriptable(
encoder_in, encoder_padding_mask, return_all_hiddens, tgt_layer=tgt_layer,
)
# CTC and bert
if self.proj:
x_for_ctc = self.proj(self.dropout_module(encoder_out["encoder_out"][0]))
else:
x_for_ctc = None
encoder_out["encoder_out_for_ctc"] = [x_for_ctc] # T x B x C
return encoder_out
# TorchScript doesn't support super() method so that the scriptable Subclass
# can't access the base class model in Torchscript.
# Current workaround is to add a helper function with different name and
# call the helper function from scriptable Subclass.
def forward_scriptable(
self,
encoder_in,
encoder_padding_mask,
return_all_hiddens: bool = False,
tgt_layer=None,
):
"""
Args:
src_tokens (LongTensor): tokens in the source language of shape
`(batch, src_len)`
src_lengths (torch.LongTensor): lengths of each source sentence of
shape `(batch)`
return_all_hiddens (bool, optional): also return all of the
intermediate hidden states (default: False).
token_embeddings (torch.Tensor, optional): precomputed embeddings
default `None` will recompute embeddings
Returns:
dict:
- **encoder_out** (Tensor): the last encoder layer's output of
shape `(src_len, batch, embed_dim)`
- **encoder_padding_mask** (ByteTensor): the positions of
padding elements of shape `(batch, src_len)`
- **encoder_embedding** (Tensor): the (scaled) embedding lookup
of shape `(batch, src_len, embed_dim)`
- **encoder_states** (List[Tensor]): all intermediate
hidden states of shape `(src_len, batch, embed_dim)`.
Only populated if *return_all_hiddens* is True.
"""
if self.no_freeze_encoder_layer is not None:
ft = self.freeze_encoder_updates <= self.num_updates
else:
ft = True
with torch.no_grad() if not ft else contextlib.ExitStack():
# compute padding mask
if not self.use_sent_enc_layer:
has_pads = encoder_in.device.type == "xla" or encoder_padding_mask.any()
if not self.layer_norm_first:
encoder_in = self.layer_norm(encoder_in)
encoder_in = self.dropout_module(encoder_in)
# B x T x C -> T x B x C
x = encoder_in.transpose(0, 1)
encoder_states = []
if return_all_hiddens:
encoder_states.append(x)
## relative position embedding
if self.args.relative_position_embedding:
x_len = x.shape[0]
pos_seq = torch.arange(0, x_len).long().to(x.device)
pos_seq = pos_seq[:, None] - pos_seq[None, :]
pos_k, pos_v = self.pos_emb(pos_seq)
else:
pos_k = None
# encoder layers
r = None
d = None
for i, layer in enumerate(self.layers):
dropout_probability = np.random.random()
with torch.no_grad() if (not ft) and i not in self.no_freeze_encoder_layer else contextlib.ExitStack():
if not self.training or (dropout_probability > self.encoder_layerdrop) or i == self.unb_enc_layer:
if self.use_sent_enc_layer:
x, _ = layer(x, self_attn_padding_mask=encoder_padding_mask, self_attn_mask=None, need_weights=False, pos_bias=pos_k)
# x, _ = layer(x, self_attn_padding_mask=encoder_padding_mask, need_weights=False, pos_bias=pos_k)
else:
x = layer(x, encoder_padding_mask=encoder_padding_mask if has_pads else None, attn_mask=None)
# x = layer(x, encoder_padding_mask=encoder_padding_mask if has_pads else None)
if i == self.unb_enc_layer:
d = x
if i == tgt_layer:
r = x
break
if return_all_hiddens:
assert encoder_states is not None
encoder_states.append(x)
with torch.no_grad() if not ft else contextlib.ExitStack():
# Finally T x B x C
if self.layer_norm_first:
x = self.layer_norm(x.transpose(0, 1)).transpose(0, 1)
if r is not None:
x = r
# The Pytorch Mobile lite interpreter does not supports returning NamedTuple in
# `forward` so we use a dictionary instead.
# TorchScript does not support mixed values so the values are all lists.
# The empty list is equivalent to None.
return {
"encoder_out": [x], # T x B x C
"encoder_padding_mask": [encoder_padding_mask], # B x T
"encoder_states": encoder_states, # List[T x B x C]
"src_tokens": [],
"decoder_input": [d],
}
@torch.jit.export
def reorder_encoder_out(self, encoder_out: Dict[str, List[Tensor]], new_order):
"""
Reorder encoder output according to *new_order*.
Args:
encoder_out: output from the ``forward()`` method
new_order (LongTensor): desired order
Returns:
*encoder_out* rearranged according to *new_order*
"""
if len(encoder_out["encoder_out"]) == 0:
new_encoder_out = []
else:
new_encoder_out = [encoder_out["encoder_out"][0].index_select(1, new_order)]
if len(encoder_out["encoder_out_for_ctc"]) == 0:
new_x_for_ctc = []
else:
new_x_for_ctc = [encoder_out["encoder_out_for_ctc"][0].index_select(1, new_order)]
if len(encoder_out["encoder_padding_mask"]) == 0:
new_encoder_padding_mask = []
else:
new_encoder_padding_mask = [
encoder_out["encoder_padding_mask"][0].index_select(0, new_order)
]
if len(encoder_out["src_tokens"]) == 0:
src_tokens = []
else:
src_tokens = [(encoder_out["src_tokens"][0]).index_select(0, new_order)]
if len(encoder_out["decoder_input"]) == 0 or encoder_out["decoder_input"][0] is None:
new_decoder_input = []
else:
new_decoder_input = [
encoder_out["decoder_input"][0].index_select(0, new_order)
]
encoder_states = encoder_out["encoder_states"]
if len(encoder_states) > 0:
for idx, state in enumerate(encoder_states):
encoder_states[idx] = state.index_select(1, new_order)
return {
"encoder_out": new_encoder_out, # T x B x C
"encoder_padding_mask": new_encoder_padding_mask, # B x T
"encoder_states": encoder_states, # List[T x B x C]
"src_tokens": src_tokens, # B x T
"encoder_out_for_ctc": new_x_for_ctc, # T x B x C
"decoder_input": new_decoder_input,
}
# def max_positions(self):
# """Maximum input length supported by the encoder."""
# return self.max_source_positions
def upgrade_state_dict_named(self, state_dict, name):
"""Upgrade a (possibly old) state dict for new versions of fairseq."""
# if isinstance(self.embed_positions, SinusoidalPositionalEmbedding):
# weights_key = "{}.embed_positions.weights".format(name)
# if weights_key in state_dict:
# print("deleting {0}".format(weights_key))
# del state_dict[weights_key]
# state_dict[
# "{}.embed_positions._float_tensor".format(name)
# ] = torch.FloatTensor(1)
for i in range(self.num_layers):
# update layer norms
if not isinstance(self.layers[i], TransformerSentenceEncoderLayer):
self.layers[i].upgrade_state_dict_named(
state_dict, "{}.layers.{}".format(name, i)
)
version_key = "{}.version".format(name)
if utils.item(state_dict.get(version_key, torch.Tensor([1]))[0]) < 2:
# earlier checkpoints did not normalize after the stack of layers
self.layer_norm = None
self.normalize = False
state_dict[version_key] = torch.Tensor([1])
return state_dict
def set_num_updates(self, num_updates):
"""Set the number of parameters updates."""
super().set_num_updates(num_updates)
self.num_updates = num_updates
| EXA-1-master | exa/models/unilm-master/speecht5/speecht5/models/modules/encoder.py |
# --------------------------------------------------------
# SpeechT5: Unified-Modal Encoder-Decoder Pre-Training for Spoken Language Processing (https://arxiv.org/abs/2110.07205)
# Github source: https://github.com/microsoft/SpeechT5/tree/main/SpeechT5
# Copyright (c) 2021 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Based on fairseq and espnet code bases
# https://github.com/pytorch/fairseq; https://github.com/espnet/espnet
# --------------------------------------------------------
import torch.nn as nn
import torch
import contextlib
from fairseq import utils
from fairseq.modules import (
AdaptiveSoftmax,
)
class TextDecoderPostnet(nn.Module):
"""
Args:
in_channels (int): the number of input channels
mid_channels (int): the number of intermediate channels
out_channels (int): the number of output channels
kernel_sizes (List[int]): the kernel size for each convolutional layer
"""
def __init__(self, embed_tokens, dictionary, args, output_projection=None,):
super(TextDecoderPostnet, self).__init__()
self.output_embed_dim = args.decoder_output_dim
self.output_projection = output_projection
self.adaptive_softmax = None
self.share_input_output_embed = args.share_input_output_embed
if self.output_projection is None:
self.build_output_projection(args, dictionary, embed_tokens)
self.freeze_decoder_updates = args.freeze_decoder_updates
self.num_updates = 0
def output_layer(self, features):
"""Project features to the vocabulary size."""
if self.adaptive_softmax is None:
# project back to size of vocabulary
return self.output_projection(features)
else:
return features
def build_output_projection(self, args, dictionary, embed_tokens):
if args.adaptive_softmax_cutoff is not None:
self.adaptive_softmax = AdaptiveSoftmax(
len(dictionary),
self.output_embed_dim,
utils.eval_str_list(args.adaptive_softmax_cutoff, type=int),
dropout=args.adaptive_softmax_dropout,
adaptive_inputs=embed_tokens if args.tie_adaptive_weights else None,
factor=args.adaptive_softmax_factor,
tie_proj=args.tie_adaptive_proj,
)
elif self.share_input_output_embed:
self.output_projection = nn.Linear(
embed_tokens.weight.shape[1],
embed_tokens.weight.shape[0],
bias=False,
)
self.output_projection.weight = embed_tokens.weight
else:
self.output_projection = nn.Linear(
self.output_embed_dim, len(dictionary), bias=False
)
nn.init.normal_(
self.output_projection.weight, mean=0, std=self.output_embed_dim ** -0.5
)
# num_base_layers = getattr(args, "base_layers", 0)
# for i in range(num_base_layers):
# self.layers.insert(
# ((i + 1) * args.decoder_layers) // (num_base_layers + 1),
# BaseLayer(args),
# )
def forward(self, x):
ft = self.freeze_decoder_updates <= self.num_updates
with torch.no_grad() if not ft else contextlib.ExitStack():
return self._forward(x)
def _forward(self, x):
# embed positions
x = self.output_layer(x)
return x
def set_num_updates(self, num_updates):
"""Set the number of parameters updates."""
self.num_updates = num_updates
| EXA-1-master | exa/models/unilm-master/speecht5/speecht5/models/modules/text_decoder_postnet.py |
# --------------------------------------------------------
# SpeechT5: Unified-Modal Encoder-Decoder Pre-Training for Spoken Language Processing (https://arxiv.org/abs/2110.07205)
# Github source: https://github.com/microsoft/SpeechT5/tree/main/SpeechT5
# Copyright (c) 2021 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Based on fairseq and espnet code bases
# https://github.com/pytorch/fairseq; https://github.com/espnet/espnet
# --------------------------------------------------------
import logging
import math
import torch
import contextlib
from typing import List, Tuple
import torch.nn as nn
from fairseq.data.data_utils import lengths_to_padding_mask
from fairseq.data.data_utils import compute_mask_indices
from fairseq.modules import (
PositionalEmbedding,
Fp32GroupNorm,
FairseqDropout,
SamePad,
GradMultiply,
LayerNorm,
Fp32LayerNorm,
TransposeLast,
)
import numpy as np
logger = logging.getLogger(__name__)
class LinearLayer(nn.Module):
def __init__(self, idim, odom, dropout=0):
super(LinearLayer, self).__init__()
self.linear = nn.Sequential(
nn.Linear(idim, odom),
nn.LayerNorm(odom),
nn.Dropout(dropout),
nn.ReLU(),
)
def get_out_seq_lens_tensor(self, in_seq_lens_tensor):
out = in_seq_lens_tensor.clone()
return out
def forward(self, src_tokens, src_lengths):
"""
src_tokens: [B, T, C]
src_lengths: [B]
"""
x = self.linear(src_tokens)
x = x.transpose(0, 1).contiguous() # -> T x B x C
return x, src_lengths
class SpeechEncoderPrenet(nn.Module):
"""
Args:
in_channels (int): the number of input channels
mid_channels (int): the number of intermediate channels
out_channels (int): the number of output channels
kernel_sizes (List[int]): the kernel size for each convolutional layer
"""
def __init__(self, args):
super(SpeechEncoderPrenet, self).__init__()
self.dropout_module = FairseqDropout(
p=args.dropout, module_name=self.__class__.__name__
)
self.embed_scale = math.sqrt(args.encoder_embed_dim)
if args.no_scale_embedding:
self.embed_scale = 1.0
self.padding_idx = 1
self.freeze_encoder_updates = args.freeze_encoder_updates
self.num_updates = 0
assert args.encoder_speech_prenet in ["conv", "linear"], args.encoder_speech_prenet
feature_enc_layers = eval(args.conv_feature_layers) # noqa
self.embed = feature_enc_layers[-1][0]
self.feature_extractor = ConvFeatureExtractionModel(
conv_layers=feature_enc_layers,
dropout=0.0,
mode=args.extractor_mode,
conv_bias=args.conv_bias,
)
feature_ds_rate = np.prod([s for _, _, s in feature_enc_layers])
self.feat2tar_ratio = (
args.label_rates * feature_ds_rate / args.sample_rate
)
self.post_extract_proj = (
nn.Linear(self.embed, args.encoder_embed_dim)
if self.embed != args.encoder_embed_dim
else None
)
self.use_conv_pos = args.use_conv_pos
self.use_sinc_pos = args.use_sinc_pos
self.use_abs_pos = getattr(args, "use_abs_pos", False)
self.feature_grad_mult = args.feature_grad_mult
if self.use_conv_pos:
self.layer_norm = LayerNorm(self.embed)
self.pos_conv = nn.Conv1d(
args.encoder_embed_dim,
args.encoder_embed_dim,
kernel_size=args.conv_pos,
padding=args.conv_pos // 2,
groups=args.conv_pos_groups,
)
dropout = 0
std = math.sqrt((4 * (1.0 - dropout)) / (args.conv_pos * args.encoder_embed_dim))
nn.init.normal_(self.pos_conv.weight, mean=0, std=std)
nn.init.constant_(self.pos_conv.bias, 0)
self.pos_conv = nn.utils.weight_norm(self.pos_conv, name="weight", dim=2)
self.pos_conv = nn.Sequential(self.pos_conv, SamePad(args.conv_pos), nn.GELU())
assert not (self.use_sinc_pos and self.use_abs_pos), f"sinc pos: {self.use_sinc_pos} abs pos: {self.use_abs_pos}"
if self.use_sinc_pos:
self.embed_positions = PositionalEmbedding(
args.max_speech_positions, args.encoder_embed_dim, self.padding_idx
)
if self.use_abs_pos:
self.embed_positions = PositionalEmbedding(
args.max_speech_positions, args.encoder_embed_dim, self.padding_idx, learned=True
)
# Hubert
self.mask_prob = args.mask_prob
self.mask_selection = args.mask_selection
self.mask_other = args.mask_other
self.hubert_mask_length = args.hubert_mask_length
self.no_mask_overlap = args.no_mask_overlap
self.mask_min_space = args.mask_min_space
self.mask_channel_prob = args.mask_channel_prob
self.mask_channel_selection = args.mask_channel_selection
self.mask_channel_other = args.mask_channel_other
self.mask_channel_length = args.mask_channel_length
self.no_mask_channel_overlap = args.no_mask_channel_overlap
self.mask_channel_min_space = args.mask_channel_min_space
self.mask_emb = nn.Parameter(
torch.FloatTensor(args.encoder_embed_dim).uniform_()
)
def forward(self, src_tokens, require_feat_pen=False, target_list=None, padding_mask=None, mask=True):
ft = self.freeze_encoder_updates <= self.num_updates
with torch.no_grad() if not ft else contextlib.ExitStack():
return self._forward(src_tokens, require_feat_pen, target_list, padding_mask, mask)
def _forward(self, src_tokens, require_feat_pen=False, target_list=None, padding_mask=None, mask=True):
if self.feature_grad_mult > 0:
x = self.feature_extractor(src_tokens)
x = x.transpose(1, 2).transpose(0, 1) # [length, batch, hidden_size]
if self.feature_grad_mult != 1.0:
x = GradMultiply.apply(x, self.feature_grad_mult)
else:
with torch.no_grad():
x = self.feature_extractor(src_tokens)
x = x.transpose(1, 2).transpose(0, 1) # [length, batch, hidden_size]
x = x.transpose(0, 1) # [batch, length, hidden_size]
encoder_padding_mask = padding_mask
x = x.transpose(1, 2) # [batch, hidden_size, length]
if target_list is not None:
x, target_list = self.forward_targets(x, target_list)
features_pen = x.float().pow(2).mean()
x = x.transpose(1, 2) # [batch, length, hidden_size]
x = self.layer_norm(x)
encoder_padding_mask = self.forward_padding_mask(x, encoder_padding_mask)
if self.post_extract_proj is not None:
x = self.post_extract_proj(x)
x = self.dropout_module(x)
if mask:
x, mask_indices = self.apply_hubert_mask(
x, encoder_padding_mask
)
else:
x = x
mask_indices = None
if self.use_conv_pos:
positions = self.pos_conv(x.transpose(1, 2))
positions = positions.transpose(1, 2)
#else:
# positions = self.embed_positions(encoder_padding_mask)
x = x + positions
if self.use_sinc_pos:
positions = self.embed_positions(encoder_padding_mask)
x = x + positions
# x = self.dropout_module(x)
if require_feat_pen:
return (x, features_pen, mask_indices, target_list), encoder_padding_mask
else:
# For consistence with encoder
return x, encoder_padding_mask
def forward_targets(
self, features: torch.Tensor, target_list: List[torch.Tensor],
) -> Tuple[torch.Tensor, torch.Tensor]:
# Trim features to ensure labels exist and then get aligned labels
feat_tsz = features.size(2)
targ_tsz = min([t.size(1) for t in target_list])
if self.feat2tar_ratio * feat_tsz > targ_tsz:
feat_tsz = int(targ_tsz / self.feat2tar_ratio)
features = features[..., :feat_tsz]
target_inds = torch.arange(feat_tsz).float() * self.feat2tar_ratio
target_list = [t[:, target_inds.long()] for t in target_list]
return features, target_list
def forward_padding_mask(
self, features: torch.Tensor, padding_mask: torch.Tensor,
) -> torch.Tensor:
extra = padding_mask.size(1) % features.size(1)
if extra > 0:
padding_mask = padding_mask[:, :-extra]
padding_mask = padding_mask.view(
padding_mask.size(0), features.size(1), -1
)
padding_mask = padding_mask.all(-1)
return padding_mask
def get_src_lengths(self, src_lengths):
return self.feature_extractor.get_out_seq_lens_tensor(src_lengths)
def apply_hubert_mask(self, x, padding_mask):
B, T, C = x.shape
if self.mask_prob > 0:
mask_indices = compute_mask_indices(
(B, T),
padding_mask,
self.mask_prob,
self.hubert_mask_length,
self.mask_selection,
self.mask_other,
min_masks=2,
no_overlap=self.no_mask_overlap,
min_space=self.mask_min_space,
)
mask_indices = torch.from_numpy(mask_indices).to(x.device)
x[mask_indices] = self.mask_emb
else:
mask_indices = None
if self.mask_channel_prob > 0:
mask_channel_indices = compute_mask_indices(
(B, C),
None,
self.mask_channel_prob,
self.mask_channel_length,
self.mask_channel_selection,
self.mask_channel_other,
no_overlap=self.no_mask_channel_overlap,
min_space=self.mask_channel_min_space,
)
mask_channel_indices = (
torch.from_numpy(mask_channel_indices)
.to(x.device)
.unsqueeze(1)
.expand(-1, T, -1)
)
x[mask_channel_indices] = 0
return x, mask_indices
def set_num_updates(self, num_updates):
"""Set the number of parameters updates."""
self.num_updates = num_updates
class ConvFeatureExtractionModel(nn.Module):
def __init__(
self,
conv_layers: List[Tuple[int, int, int]],
dropout: float = 0.0,
mode: str = "default",
conv_bias: bool = False,
):
super().__init__()
assert mode in {"default", "layer_norm"}
def block(
n_in,
n_out,
k,
stride,
is_layer_norm=False,
is_group_norm=False,
conv_bias=False,
):
def make_conv():
conv = nn.Conv1d(n_in, n_out, k, stride=stride, bias=conv_bias)
nn.init.kaiming_normal_(conv.weight)
return conv
assert (
is_layer_norm and is_group_norm
) == False, "layer norm and group norm are exclusive"
if is_layer_norm:
return nn.Sequential(
make_conv(),
nn.Dropout(p=dropout),
nn.Sequential(
TransposeLast(),
Fp32LayerNorm(dim, elementwise_affine=True),
TransposeLast(),
),
nn.GELU(),
)
elif is_group_norm:
return nn.Sequential(
make_conv(),
nn.Dropout(p=dropout),
Fp32GroupNorm(dim, dim, affine=True),
nn.GELU(),
)
else:
return nn.Sequential(make_conv(), nn.Dropout(p=dropout), nn.GELU())
in_d = 1
self.conv_layers = nn.ModuleList()
self.conv_layers_infos = conv_layers
for i, cl in enumerate(conv_layers):
assert len(cl) == 3, "invalid conv definition: " + str(cl)
(dim, k, stride) = cl
self.conv_layers.append(
block(
in_d,
dim,
k,
stride,
is_layer_norm=mode == "layer_norm",
is_group_norm=mode == "default" and i == 0,
conv_bias=conv_bias,
)
)
in_d = dim
def forward(self, x):
# BxT -> BxCxT
x = x.unsqueeze(1)
for conv in self.conv_layers:
x = conv(x)
return x
def get_out_seq_lens_nonmask_after_a_layer(self, in_seq_lens_tensor, i):
"""Returns the out_seq_lens_nonmask 0/1 tensor after a layer.
Args:
in_seq_lens_tensor (LongTensor): length
Returns:
LongTensor: length
"""
out_lengths = in_seq_lens_tensor.clone()
out_lengths = ((out_lengths.float() - (self.conv_layers_infos[i][1] - 1) - 1) / self.conv_layers_infos[i][-1] + 1).floor().long()
out_nonmask = (~lengths_to_padding_mask(out_lengths)).float()
return out_nonmask, out_lengths
def get_out_seq_lens_tensor(self, in_seq_lens_tensor):
out = in_seq_lens_tensor.clone()
for i in range(len(self.conv_layers)):
out = ((out.float() - (self.conv_layers_infos[i][1] - 1) - 1) / self.conv_layers_infos[i][-1] + 1).floor().long()
return out
| EXA-1-master | exa/models/unilm-master/speecht5/speecht5/models/modules/speech_encoder_prenet.py |
# --------------------------------------------------------
# SpeechT5: Unified-Modal Encoder-Decoder Pre-Training for Spoken Language Processing (https://arxiv.org/abs/2110.07205)
# Github source: https://github.com/microsoft/SpeechT5/tree/main/SpeechT5
# Copyright (c) 2021 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Based on fairseq and espnet code bases
# https://github.com/pytorch/fairseq; https://github.com/espnet/espnet
# --------------------------------------------------------
import contextlib
import torch
import torch.nn as nn
from espnet.nets.pytorch_backend.tacotron2.decoder import Postnet
class SpeechDecoderPostnet(nn.Module):
"""
Args:
in_channels (int): the number of input channels
mid_channels (int): the number of intermediate channels
out_channels (int): the number of output channels
kernel_sizes (List[int]): the kernel size for each convolutional layer
"""
def __init__(
self,
odim,
args,
):
super(SpeechDecoderPostnet, self).__init__()
# define decoder postnet
# define final projection
self.feat_out = torch.nn.Linear(args.decoder_embed_dim, odim * args.reduction_factor)
self.prob_out = torch.nn.Linear(args.decoder_embed_dim, args.reduction_factor)
# define postnet
self.postnet = (
None
if args.postnet_layers == 0
else Postnet(
idim=0,
odim=odim,
n_layers=args.postnet_layers,
n_chans=args.postnet_chans,
n_filts=args.postnet_filts,
use_batch_norm=args.use_batch_norm,
dropout_rate=args.postnet_dropout_rate,
)
)
self.odim = odim
self.num_updates = 0
self.freeze_decoder_updates = args.freeze_decoder_updates
def forward(self, zs):
ft = self.freeze_decoder_updates <= self.num_updates
with torch.no_grad() if not ft else contextlib.ExitStack():
# (B, Lmax//r, odim * r) -> (B, Lmax//r * r, odim)
before_outs = self.feat_out(zs).view(zs.size(0), -1, self.odim)
# (B, Lmax//r, r) -> (B, Lmax//r * r)
logits = self.prob_out(zs).view(zs.size(0), -1)
# postnet -> (B, Lmax//r * r, odim)
if self.postnet is None:
after_outs = before_outs
else:
after_outs = before_outs + self.postnet(
before_outs.transpose(1, 2)
).transpose(1, 2)
return before_outs, after_outs, logits
def set_num_updates(self, num_updates):
"""Set the number of parameters updates."""
self.num_updates = num_updates
| EXA-1-master | exa/models/unilm-master/speecht5/speecht5/models/modules/speech_decoder_postnet.py |
# --------------------------------------------------------
# SpeechT5: Unified-Modal Encoder-Decoder Pre-Training for Spoken Language Processing (https://arxiv.org/abs/2110.07205)
# Github source: https://github.com/microsoft/SpeechT5/tree/main/SpeechT5
# Copyright (c) 2021 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Based on fairseq and espnet code bases
# https://github.com/pytorch/fairseq; https://github.com/espnet/espnet
# --------------------------------------------------------
import contextlib
import torch
import torch.nn as nn
import torch.nn.functional as F
from espnet.nets.pytorch_backend.tacotron2.decoder import Prenet as TacotronDecoderPrenet
from espnet.nets.pytorch_backend.transformer.embedding import PositionalEncoding
from espnet.nets.pytorch_backend.transformer.embedding import ScaledPositionalEncoding
from espnet.nets.pytorch_backend.nets_utils import make_non_pad_mask
class SpeechDecoderPrenet(nn.Module):
"""
Args:
in_channels (int): the number of input channels
mid_channels (int): the number of intermediate channels
out_channels (int): the number of output channels
kernel_sizes (List[int]): the kernel size for each convolutional layer
"""
def __init__(
self,
odim,
args,
):
super(SpeechDecoderPrenet, self).__init__()
# define decoder prenet
if args.dprenet_layers != 0:
# decoder prenet
decoder_input_layer = torch.nn.Sequential(
TacotronDecoderPrenet(
idim=odim,
n_layers=args.dprenet_layers,
n_units=args.dprenet_units,
dropout_rate=args.dprenet_dropout_rate,
),
torch.nn.Linear(args.dprenet_units, args.decoder_embed_dim),
)
else:
decoder_input_layer = "linear"
pos_enc_class = (
ScaledPositionalEncoding if args.dec_use_scaled_pos_enc else PositionalEncoding
)
if decoder_input_layer == "linear":
self.decoder_prenet = torch.nn.Sequential(
torch.nn.Linear(odim, args.decoder_embed_dim),
torch.nn.LayerNorm(args.decoder_embed_dim),
torch.nn.Dropout(args.transformer_dec_dropout_rate),
torch.nn.ReLU(),
pos_enc_class(args.decoder_embed_dim, args.transformer_dec_positional_dropout_rate),
)
elif isinstance(decoder_input_layer, torch.nn.Module):
self.decoder_prenet = torch.nn.Sequential(
decoder_input_layer, pos_enc_class(args.decoder_embed_dim, args.transformer_dec_positional_dropout_rate, max_len=args.max_speech_positions)
)
if args.spk_embed_integration_type == 'pre':
self.spkembs_layer = torch.nn.Sequential(
torch.nn.Linear(args.spk_embed_dim + args.decoder_embed_dim, args.decoder_embed_dim), torch.nn.ReLU()
)
self.num_updates = 0
self.freeze_decoder_updates = args.freeze_decoder_updates
def forward(self, prev_output_tokens, tgt_lengths_in=None, spkembs=None):
ft = self.freeze_decoder_updates <= self.num_updates
with torch.no_grad() if not ft else contextlib.ExitStack():
prev_output_tokens = self.decoder_prenet(prev_output_tokens)
if spkembs is not None:
spkembs = F.normalize(spkembs).unsqueeze(1).expand(-1, prev_output_tokens.size(1), -1)
prev_output_tokens = self.spkembs_layer(torch.cat([prev_output_tokens, spkembs], dim=-1))
if tgt_lengths_in is not None:
tgt_frames_mask = ~(self._source_mask(tgt_lengths_in).squeeze(1))
else:
tgt_frames_mask = None
return prev_output_tokens, tgt_frames_mask
def _source_mask(self, ilens):
"""Make masks for self-attention.
Args:
ilens (LongTensor or List): Batch of lengths (B,).
Returns:
Tensor: Mask tensor for self-attention.
dtype=torch.uint8 in PyTorch 1.2-
dtype=torch.bool in PyTorch 1.2+ (including 1.2)
Examples:
>>> ilens = [5, 3]
>>> self._source_mask(ilens)
tensor([[[1, 1, 1, 1, 1],
[[1, 1, 1, 0, 0]]], dtype=torch.uint8)
"""
x_masks = make_non_pad_mask(ilens).to(next(self.parameters()).device)
return x_masks.unsqueeze(-2)
def set_num_updates(self, num_updates):
"""Set the number of parameters updates."""
self.num_updates = num_updates
| EXA-1-master | exa/models/unilm-master/speecht5/speecht5/models/modules/speech_decoder_prenet.py |
# --------------------------------------------------------
# SpeechT5: Unified-Modal Encoder-Decoder Pre-Training for Spoken Language Processing (https://arxiv.org/abs/2110.07205)
# Github source: https://github.com/microsoft/SpeechT5/tree/main/SpeechT5
# Copyright (c) 2021 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Based on fairseq and espnet code bases
# https://github.com/pytorch/fairseq; https://github.com/espnet/espnet
# --------------------------------------------------------
import math
import torch.nn as nn
import torch
import contextlib
from fairseq.modules.quant_noise import quant_noise as apply_quant_noise_
from fairseq.models.transformer import Linear, LayerNorm
from fairseq.modules import (
PositionalEmbedding,
FairseqDropout,
)
class TextDecoderPrenet(nn.Module):
"""
Args:
in_channels (int): the number of input channels
mid_channels (int): the number of intermediate channels
out_channels (int): the number of output channels
kernel_sizes (List[int]): the kernel size for each convolutional layer
"""
def __init__(self, embed_tokens, args):
super(TextDecoderPrenet, self).__init__()
self.dropout_module = FairseqDropout(
args.dropout, module_name=self.__class__.__name__
)
self.decoder_layerdrop = args.decoder_layerdrop
self.num_updates = 0
input_embed_dim = embed_tokens.embedding_dim
embed_dim = args.decoder_embed_dim
self.embed_dim = embed_dim
self.output_embed_dim = args.decoder_output_dim
self.padding_idx = embed_tokens.padding_idx
self.embed_tokens = embed_tokens
self.embed_scale = 1.0 if args.no_scale_embedding else math.sqrt(embed_dim)
if not args.adaptive_input and args.quant_noise_pq > 0:
self.quant_noise = apply_quant_noise_(
nn.Linear(embed_dim, embed_dim, bias=False),
args.quant_noise_pq,
args.quant_noise_pq_block_size,
)
else:
self.quant_noise = None
self.project_in_dim = (
Linear(input_embed_dim, embed_dim, bias=False)
if embed_dim != input_embed_dim
else None
)
self.embed_positions = (
PositionalEmbedding(
args.max_text_positions,
embed_dim,
self.padding_idx,
learned=args.decoder_learned_pos,
)
if not args.no_token_positional_embeddings
else None
)
export = getattr(args, "export", False)
if getattr(args, "layernorm_embedding", False):
self.layernorm_embedding = LayerNorm(embed_dim, export=export)
else:
self.layernorm_embedding = None
self.freeze_decoder_updates = args.freeze_decoder_updates
def forward(self, prev_output_tokens, incremental_state=None):
ft = self.freeze_decoder_updates <= self.num_updates
with torch.no_grad() if not ft else contextlib.ExitStack():
return self._forward(prev_output_tokens, incremental_state)
def _forward(self, prev_output_tokens, incremental_state=None):
if prev_output_tokens.eq(self.padding_idx).any():
x_mask = prev_output_tokens.eq(self.padding_idx)
else:
x_mask = None
# embed positions
positions = None
if self.embed_positions is not None:
positions = self.embed_positions(
prev_output_tokens, incremental_state=incremental_state
)
if incremental_state is not None:
prev_output_tokens = prev_output_tokens[:, -1:]
if positions is not None:
positions = positions[:, -1:]
# embed tokens and positions
x = self.embed_scale * self.embed_tokens(prev_output_tokens)
if self.quant_noise is not None:
x = self.quant_noise(x)
if self.project_in_dim is not None:
x = self.project_in_dim(x)
if positions is not None:
x += positions
if self.layernorm_embedding is not None:
x = self.layernorm_embedding(x)
x = self.dropout_module(x)
return x, x_mask, incremental_state
def set_num_updates(self, num_updates):
"""Set the number of parameters updates."""
self.num_updates = num_updates
| EXA-1-master | exa/models/unilm-master/speecht5/speecht5/models/modules/text_decoder_prenet.py |
# --------------------------------------------------------
# SpeechT5: Unified-Modal Encoder-Decoder Pre-Training for Spoken Language Processing (https://arxiv.org/abs/2110.07205)
# Github source: https://github.com/microsoft/SpeechT5/tree/main/SpeechT5
# Copyright (c) 2021 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Based on fairseq and espnet code bases
# https://github.com/pytorch/fairseq; https://github.com/espnet/espnet
# --------------------------------------------------------
import torch.nn as nn
from espnet.nets.pytorch_backend.transformer.embedding import PositionalEncoding
from espnet.nets.pytorch_backend.transformer.embedding import ScaledPositionalEncoding
class TextEncoderPrenet(nn.Module):
"""
Args:
in_channels (int): the number of input channels
mid_channels (int): the number of intermediate channels
out_channels (int): the number of output channels
kernel_sizes (List[int]): the kernel size for each convolutional layer
"""
def __init__(
self,
embed_tokens,
args,
):
super(TextEncoderPrenet, self).__init__()
self.padding_idx = embed_tokens.padding_idx
# define encoder prenet
# get positional encoding class
pos_enc_class = (
ScaledPositionalEncoding if args.enc_use_scaled_pos_enc else PositionalEncoding
)
self.encoder_prenet = nn.Sequential(
embed_tokens,
pos_enc_class(args.encoder_embed_dim, args.transformer_enc_positional_dropout_rate, max_len=args.max_text_positions),
)
def forward(self, src_tokens):
return self.encoder_prenet(src_tokens), src_tokens.eq(self.padding_idx)
| EXA-1-master | exa/models/unilm-master/speecht5/speecht5/models/modules/text_encoder_prenet.py |
# --------------------------------------------------------
# SpeechT5: Unified-Modal Encoder-Decoder Pre-Training for Spoken Language Processing (https://arxiv.org/abs/2110.07205)
# Github source: https://github.com/microsoft/SpeechT5/tree/main/SpeechT5
# Copyright (c) 2021 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Based on fairseq and espnet code bases
# https://github.com/pytorch/fairseq; https://github.com/espnet/espnet
# --------------------------------------------------------
import logging
import os
from typing import Any, List, Optional
import librosa
import numpy as np
import torch
import torch.nn.functional as F
from fairseq.data.fairseq_dataset import FairseqDataset
logger = logging.getLogger(__name__)
def _collate_frames(
frames: List[torch.Tensor], is_audio_input: bool = False
):
"""
Convert a list of 2D frames into a padded 3D tensor
Args:
frames (list): list of 2D frames of size L[i]*f_dim. Where L[i] is
length of i-th frame and f_dim is static dimension of features
Returns:
3D tensor of size len(frames)*len_max*f_dim where len_max is max of L[i]
"""
max_len = max(frame.size(0) for frame in frames)
if is_audio_input:
out = frames[0].new_zeros((len(frames), max_len))
else:
out = frames[0].new_zeros((len(frames), max_len, frames[0].size(1)))
for i, v in enumerate(frames):
out[i, : v.size(0)] = v
return out
def load_audio(manifest_path, max_keep, min_keep):
"""manifest tsv: src_wav, src_nframe, tgt_wav, tgt_nframe, tgt_spkemb"""
n_long, n_short = 0, 0
src_names, tgt_names, inds, sizes, tgt_sizes, spk_embeds = [], [], [], [], [], []
with open(manifest_path) as f:
root = f.readline().strip()
for ind, line in enumerate(f):
items = line.strip().split("\t")
assert len(items) >= 2, line
sz = int(items[1])
if min_keep is not None and sz < min_keep:
n_short += 1
elif max_keep is not None and sz > max_keep:
n_long += 1
else:
src_names.append(items[0])
tgt_names.append(items[2])
tgt_sizes.append(items[3])
spk_embeds.append(items[4])
inds.append(ind)
sizes.append(sz)
tot = ind + 1
logger.info(
(
f"max_keep={max_keep}, min_keep={min_keep}, "
f"loaded {len(src_names)}, skipped {n_short} short and {n_long} long, "
f"longest-loaded={max(sizes)}, shortest-loaded={min(sizes)}"
)
)
return root, src_names, inds, tot, sizes, tgt_names, tgt_sizes, spk_embeds
def logmelfilterbank(
audio,
sampling_rate,
fft_size=1024,
hop_size=256,
win_length=None,
window="hann",
num_mels=80,
fmin=80,
fmax=7600,
eps=1e-10,
):
"""Compute log-Mel filterbank feature.
(https://github.com/kan-bayashi/ParallelWaveGAN/blob/master/parallel_wavegan/bin/preprocess.py)
Args:
audio (ndarray): Audio signal (T,).
sampling_rate (int): Sampling rate.
fft_size (int): FFT size.
hop_size (int): Hop size.
win_length (int): Window length. If set to None, it will be the same as fft_size.
window (str): Window function type.
num_mels (int): Number of mel basis.
fmin (int): Minimum frequency in mel basis calculation.
fmax (int): Maximum frequency in mel basis calculation.
eps (float): Epsilon value to avoid inf in log calculation.
Returns:
ndarray: Log Mel filterbank feature (#frames, num_mels).
"""
# get amplitude spectrogram
x_stft = librosa.stft(audio, n_fft=fft_size, hop_length=hop_size,
win_length=win_length, window=window, pad_mode="reflect")
spc = np.abs(x_stft).T # (#frames, #bins)
# get mel basis
fmin = 0 if fmin is None else fmin
fmax = sampling_rate / 2 if fmax is None else fmax
mel_basis = librosa.filters.mel(sr=sampling_rate, n_fft=fft_size, n_mels=num_mels, fmin=fmin, fmax=fmax)
return np.log10(np.maximum(eps, np.dot(spc, mel_basis.T)))
class SpeechToSpeechDataset(FairseqDataset):
def __init__(
self,
manifest_path: str,
sample_rate: float,
max_keep_sample_size: Optional[int] = None,
min_keep_sample_size: Optional[int] = None,
shuffle: bool = True,
normalize: bool = False,
reduction_factor: int = 1,
):
self.audio_root, self.audio_names, inds, tot, self.wav_sizes, self.tgt_audios, self.tgt_sizes, self.tgt_spkembs = load_audio(
manifest_path, max_keep_sample_size, min_keep_sample_size
)
self.sample_rate = sample_rate
self.shuffle = shuffle
self.normalize = normalize
self.reduction_factor = reduction_factor
logger.info(
f"reduction_factor={reduction_factor}, normalize={normalize}"
)
def get_audio(self, index):
import soundfile as sf
wav_fbank = []
for name in [self.audio_names[index], self.tgt_audios[index]]:
wav_path = os.path.join(self.audio_root, name)
wav, cur_sample_rate = sf.read(wav_path)
wav = torch.from_numpy(wav).float()
fbank = logmelfilterbank(
wav.view(-1).cpu().numpy(), 16000
)
fbank = torch.from_numpy(fbank).float()
wav = self.postprocess(wav, cur_sample_rate)
wav_fbank.append(wav)
wav_fbank.append(fbank)
src_wav, src_fbank, tgt_wav, tgt_fbank = wav_fbank
return src_wav, src_fbank, tgt_wav, tgt_fbank
def __getitem__(self, index):
src_wav, src_fbank, tgt_wav, tgt_fbank = self.get_audio(index)
spkembs = np.load(os.path.join(self.audio_root, self.tgt_spkembs[index]))
spkembs = torch.from_numpy(spkembs).float()
name = self.audio_names[index].replace("/", ".").replace(".wav", "") + "-" + self.tgt_audios[index].replace("/", ".").replace(".wav", "") + ".wav"
return {"id": index, "source": src_wav, "target": tgt_fbank, "spkembs": spkembs, "audio_name": name, "tgt_name": self.tgt_audios[index]}
def __len__(self):
return len(self.wav_sizes)
def collater(self, samples):
samples = [s for s in samples if s["source"] is not None]
if len(samples) == 0:
return {}
audios = [s["source"] for s in samples]
audio_sizes = [len(s) for s in audios]
audio_size = max(audio_sizes)
collated_audios, padding_mask = self.collater_audio(
audios, audio_size
)
fbanks = [s["target"] for s in samples]
fbank_sizes = [len(s) for s in fbanks]
collated_fbanks = _collate_frames(fbanks)
collated_fbanks_size = torch.tensor(fbank_sizes, dtype=torch.long)
# thin out frames for reduction factor (B, Lmax, odim) -> (B, Lmax//r, odim)
if self.reduction_factor > 1:
collated_fbanks_in = collated_fbanks[:, self.reduction_factor - 1 :: self.reduction_factor]
collated_fbanks_size_in = collated_fbanks_size.new([torch.div(olen, self.reduction_factor, rounding_mode='floor') for olen in collated_fbanks_size])
else:
collated_fbanks_in, collated_fbanks_size_in = collated_fbanks, collated_fbanks_size
prev_output_tokens = torch.cat(
[collated_fbanks_in.new_zeros((collated_fbanks_in.shape[0], 1, collated_fbanks_in.shape[2])), collated_fbanks_in[:, :-1]], dim=1
)
# make labels for stop prediction
labels = collated_fbanks.new_zeros(collated_fbanks.size(0), collated_fbanks.size(1))
for i, l in enumerate(fbank_sizes):
labels[i, l - 1 :] = 1.0
spkembs = _collate_frames([s["spkembs"] for s in samples], is_audio_input=True)
net_input = {
"source": collated_audios,
"padding_mask": padding_mask,
"prev_output_tokens": prev_output_tokens,
"tgt_lengths": collated_fbanks_size_in,
"spkembs": spkembs,
"task_name": "s2s",
}
batch = {
"id": torch.LongTensor([s["id"] for s in samples]),
"name": [s["audio_name"] for s in samples],
"tgt_name": [s["tgt_name"] for s in samples],
"net_input": net_input,
"labels": labels,
"dec_target": collated_fbanks,
"dec_target_lengths": collated_fbanks_size,
"src_lengths": torch.LongTensor(audio_sizes),
"task_name": "s2s",
"ntokens": sum(audio_sizes),
"target": collated_fbanks,
}
return batch
def collater_audio(self, audios, audio_size):
collated_audios = audios[0].new_zeros(len(audios), audio_size)
padding_mask = (
torch.BoolTensor(collated_audios.shape).fill_(False)
)
for i, audio in enumerate(audios):
diff = len(audio) - audio_size
if diff == 0:
collated_audios[i] = audio
elif diff < 0:
collated_audios[i] = torch.cat([audio, audio.new_full((-diff,), 0.0)])
padding_mask[i, diff:] = True
else:
raise Exception("Diff should not be larger than 0")
return collated_audios, padding_mask
def num_tokens(self, index):
return self.wav_sizes[index]
def size(self, index):
return self.wav_sizes[index], self.tgt_sizes[index]
@property
def sizes(self):
return np.array(self.wav_sizes)
@property
def can_reuse_epoch_itr_across_epochs(self):
"""No cache dataset if dataset is large-scale. Cache dataset for small dataset."""
return True
def ordered_indices(self):
if self.shuffle:
order = [np.random.permutation(len(self))]
else:
order = [np.arange(len(self))]
order.append(self.wav_sizes)
return np.lexsort(order)[::-1]
def postprocess(self, wav, cur_sample_rate):
if wav.dim() == 2:
wav = wav.mean(-1)
assert wav.dim() == 1, wav.dim()
if cur_sample_rate != self.sample_rate:
raise Exception(f"sr {cur_sample_rate} != {self.sample_rate}")
if self.normalize:
with torch.no_grad():
wav = F.layer_norm(wav, wav.shape)
return wav
| EXA-1-master | exa/models/unilm-master/speecht5/speecht5/data/speech_to_speech_dataset.py |
# --------------------------------------------------------
# SpeechT5: Unified-Modal Encoder-Decoder Pre-Training for Spoken Language Processing (https://arxiv.org/abs/2110.07205)
# Github source: https://github.com/microsoft/SpeechT5/tree/main/SpeechT5
# Copyright (c) 2021 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Based on fairseq and espnet code bases
# https://github.com/pytorch/fairseq; https://github.com/espnet/espnet
# --------------------------------------------------------
import bisect
import logging
import numpy as np
from torch.utils.data.dataloader import default_collate
from fairseq.data import data_utils
from fairseq.data.fairseq_dataset import FairseqDataset
logger = logging.getLogger(__name__)
class MultitaskDataset(FairseqDataset):
@staticmethod
def cumsum(sequence):
r, s = [], 0
for e in sequence:
curr_len = len(e)
r.append(curr_len + s)
s += curr_len
return r
def __init__(self, datasets, sample_ratios=1, batch_ratio=None):
super(MultitaskDataset, self).__init__()
assert len(datasets) > 0, "datasets should not be an empty iterable"
self.datasets = list(datasets)
if isinstance(sample_ratios, int):
sample_ratios = [sample_ratios] * len(self.datasets)
if batch_ratio is not None:
logger.info('batch ratio is ' + str(batch_ratio))
self.batch_ratio = batch_ratio
else:
self.batch_ratio = None
else:
logger.info('set sample ratio to ' + str(sample_ratios))
if batch_ratio is not None:
logger.info('batch ratio is ' + str(batch_ratio))
self.batch_ratio = batch_ratio
else:
self.batch_ratio = None
self.sample_ratios = sample_ratios
self._ordered_indices = None
self._update_size()
def __len__(self):
return self.cumulative_sizes[-1]
def __getitem__(self, idx):
dataset_idx, sample_idx = self._get_dataset_and_sample_index(idx)
sample = self.datasets[dataset_idx][sample_idx]
if isinstance(sample, dict):
sample["dataset_idx"] = dataset_idx
else:
sample = sample + (dataset_idx,)
return sample
def _update_size(self):
self.cumulative_sizes = self.cumsum(self.datasets)
self.real_sizes = [len(d) for d in self.datasets]
def _get_dataset_and_sample_index(self, idx: int):
dataset_idx = bisect.bisect_right(self.cumulative_sizes, idx)
if dataset_idx == 0:
sample_idx = idx
else:
sample_idx = idx - self.cumulative_sizes[dataset_idx - 1]
sample_idx = sample_idx % self.real_sizes[dataset_idx]
return dataset_idx, sample_idx
def collater(self, samples, **extra_args):
# For now only supports datasets with same underlying collater implementations
if samples is not None and len(samples) > 0:
if isinstance(samples[0], dict):
dataset_idx = samples[0]["dataset_idx"]
else:
dataset_idx = samples[0][-1]
samples = [sample[:-1] for sample in samples]
else:
dataset_idx = 0
if hasattr(self.datasets[dataset_idx], "collater"):
return self.datasets[dataset_idx].collater(samples, **extra_args)
else:
return default_collate(samples, **extra_args)
def size(self, idx: int):
"""
Return an example's size as a float or tuple.
"""
dataset_idx, sample_idx = self._get_dataset_and_sample_index(idx)
return self.datasets[dataset_idx].size(sample_idx)
def num_tokens(self, index: int):
return np.max(self.size(index))
def attr(self, attr: str, index: int):
dataset_idx = bisect.bisect_right(self.cumulative_sizes, index)
return getattr(self.datasets[dataset_idx], attr, None)
@property
def sizes(self):
_dataset_sizes = []
for ds in self.datasets:
if isinstance(ds.sizes, np.ndarray):
_dataset_sizes.append(ds.sizes)
else:
# Only support underlying dataset with single size array.
assert isinstance(ds.sizes, list)
_dataset_sizes.append(ds.sizes[0])
return np.concatenate(_dataset_sizes)
@property
def supports_prefetch(self):
return all(d.supports_prefetch for d in self.datasets)
def ordered_indices(self):
# ordered_indices = []
# for i, dataset in enumerate(self.datasets):
# indice = dataset.ordered_indices()
# ordered_indices.append(indice)
if self._ordered_indices is None:
# Call the underlying dataset's ordered_indices() here, so that we
# get the same random ordering as we would have from using the
# underlying sub-datasets directly.
self._ordered_indices = [
dataset.ordered_indices()
for dataset in self.datasets
]
return np.arange(len(self))
def prefetch(self, indices):
frm = 0
for to, ds in zip(self.cumulative_sizes, self.datasets):
real_size = len(ds)
if getattr(ds, "supports_prefetch", False):
ds.prefetch([(i - frm) % real_size for i in indices if frm <= i < to])
frm = to
def batch_by_size(
self,
indices,
max_tokens=None,
max_sentences=None,
required_batch_size_multiple=1,
):
if not hasattr(self, "max_tokens"):
self.max_tokens = max_tokens
if not hasattr(self, "max_sentences"):
self.max_sentences = max_sentences
if not hasattr(self, "required_batch_size_multiple"):
self.required_batch_size_multiple = required_batch_size_multiple
batch_samplers = []
for i, dataset in enumerate(self.datasets):
batch_sampler = dataset.batch_by_size(
self._ordered_indices[i],
max_tokens=max_tokens if self.batch_ratio is None else max_tokens * self.batch_ratio[i],
max_sentences=max_sentences,
required_batch_size_multiple=required_batch_size_multiple,
)
if i > 0:
for batch in batch_sampler:
batch += self.cumulative_sizes[i - 1]
if self.sample_ratios[i] != 1.0:
batch_sampler = np.array(batch_sampler)
batch_sampler = np.random.choice(batch_sampler, int(len(batch_sampler) * self.sample_ratios[i]))
batch_sampler = list(batch_sampler)
logger.info('Adjust batch by ratio ' + str(self.sample_ratios[i]) + ' and the number of batch is ' + str(int(len(batch_sampler))) + ' for dataset ' + str(i))
batch_samplers.extend(batch_sampler)
return batch_samplers
def filter_indices_by_size(self, indices, max_positions):
"""
Filter each sub-dataset independently, then update the round robin to work
on the filtered sub-datasets.
"""
if not hasattr(self, "max_positions"):
self.max_positions = max_positions
ignored_some = False
for i in range(len(self.datasets)):
# ignored = []
self._ordered_indices[i], ignored = self.datasets[i].filter_indices_by_size(
self._ordered_indices[i], self.max_positions[i]
)
if len(ignored) > 0:
ignored_some = True
logger.warning(
f"{len(ignored)} samples from {i} have invalid sizes and will be skipped, "
f"max_positions={self.max_positions[i]}, first few sample ids={ignored[:10]}"
)
logger.info('update dataset size')
self._update_size()
# Since we are modifying in place the _ordered_indices,
# it's not possible anymore to return valid ignored indices.
# Hopefully the extra debug information print above should be enough to debug.
# Ideally we would receive ignore_invalid_inputs so that we could have
# a proper error message.
return (np.arange(len(self)), [0] if ignored_some else [])
@property
def can_reuse_epoch_itr_across_epochs(self):
return all(d.can_reuse_epoch_itr_across_epochs for d in self.datasets)
def set_epoch(self, epoch):
super().set_epoch(epoch)
for ds in self.datasets:
if hasattr(ds, "set_epoch"):
ds.set_epoch(epoch)
def shuffle_batches(self, batches, seed):
logger.info("shuffle batches")
new_batches_fromlist = []
new_batches_notlist = []
new_batches = []
with data_utils.numpy_seed(seed):
np.random.shuffle(batches)
for batch in batches:
if isinstance(batch, list):
# np.random.shuffle(batch)
new_batches_fromlist.append(batch)
else:
new_batches_notlist.append(batch)
logger.info("Get " + str(len(new_batches_fromlist)) + " chunk from speech sides")
logger.info("Get " + str(sum([len(batch_list) for batch_list in new_batches_fromlist])) + " batches from speech sides")
logger.info("Get " + str(len(new_batches_notlist)) + " batches from text sides")
if len(new_batches_fromlist) == 0:
return new_batches_notlist
st_ratio = int(len(new_batches_notlist) / len(new_batches_fromlist))
logger.info("Get st_ratio " + str(st_ratio))
last_idx = 0
for i in range(len(new_batches_fromlist)):
if i == len(new_batches_fromlist) - 1:
new_batches_fromlist[i].extend(new_batches_notlist[last_idx:])
else:
new_batches_fromlist[i].extend(new_batches_notlist[last_idx : last_idx + st_ratio])
np.random.shuffle(new_batches_fromlist[i])
new_batches.extend(new_batches_fromlist[i])
last_idx = last_idx + st_ratio
logger.info("Finish shuffle")
return new_batches
def reset_batch_sampler(self):
logger.info("reset batch sampler")
self._ordered_indices = [
self.datasets[i].ordered_indices()
for i in range(len(self.datasets))
]
self.filter_indices_by_size(None, None)
batch_samplers = self.batch_by_size(
None,
self.max_tokens,
self.max_sentences,
self.required_batch_size_multiple
)
return batch_samplers
| EXA-1-master | exa/models/unilm-master/speecht5/speecht5/data/multitask_dataset.py |
# --------------------------------------------------------
# SpeechT5: Unified-Modal Encoder-Decoder Pre-Training for Spoken Language Processing (https://arxiv.org/abs/2110.07205)
# Github source: https://github.com/microsoft/SpeechT5/tree/main/SpeechT5
# Copyright (c) 2021 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Based on fairseq and espnet code bases
# https://github.com/pytorch/fairseq; https://github.com/espnet/espnet
# --------------------------------------------------------
import itertools
import logging
import os
import sys
from typing import Any, List, Optional, Union
import numpy as np
import torch
import torch.nn.functional as F
import librosa
from fairseq.data.audio.speech_to_text_dataset import get_features_or_waveform
from fairseq.data import data_utils
from fairseq.data.fairseq_dataset import FairseqDataset
logger = logging.getLogger(__name__)
def _collate_frames(
frames: List[torch.Tensor], is_audio_input: bool = False
):
"""
Convert a list of 2D frames into a padded 3D tensor
Args:
frames (list): list of 2D frames of size L[i]*f_dim. Where L[i] is
length of i-th frame and f_dim is static dimension of features
Returns:
3D tensor of size len(frames)*len_max*f_dim where len_max is max of L[i]
"""
max_len = max(frame.size(0) for frame in frames)
if is_audio_input:
out = frames[0].new_zeros((len(frames), max_len))
else:
out = frames[0].new_zeros((len(frames), max_len, frames[0].size(1)))
for i, v in enumerate(frames):
out[i, : v.size(0)] = v
return out
def add_first_frame_and_remove_last_frame(ys):
ys_in = torch.cat(
[ys.new_zeros((ys.shape[0], 1, ys.shape[2])), ys[:, :-1]], dim=1
)
return ys_in
def load_audio(manifest_path, max_keep, min_keep):
n_long, n_short = 0, 0
names, inds, sizes, spk_embeds = [], [], [], []
with open(manifest_path) as f:
root = f.readline().strip()
for ind, line in enumerate(f):
items = line.strip().split("\t")
assert len(items) == 3, line
sz = int(items[1])
if min_keep is not None and sz < min_keep:
n_short += 1
elif max_keep is not None and sz > max_keep:
n_long += 1
else:
names.append(items[0])
spk_embeds.append(items[2])
inds.append(ind)
sizes.append(sz)
tot = ind + 1
logger.info(
(
f"max_keep={max_keep}, min_keep={min_keep}, "
f"loaded {len(names)}, skipped {n_short} short and {n_long} long, "
f"longest-loaded={max(sizes)}, shortest-loaded={min(sizes)}"
)
)
return root, names, inds, tot, sizes, spk_embeds
def load_label(label_path, inds, tot):
with open(label_path) as f:
labels = [line.rstrip() for line in f]
assert (
len(labels) == tot
), f"number of labels does not match ({len(labels)} != {tot})"
labels = [labels[i] for i in inds]
return labels
def load_label_offset(label_path, inds, tot):
with open(label_path) as f:
code_lengths = [len(line.encode("utf-8")) for line in f]
assert (
len(code_lengths) == tot
), f"number of labels does not match ({len(code_lengths)} != {tot})"
offsets = list(itertools.accumulate([0] + code_lengths))
offsets = [(offsets[i], offsets[i + 1]) for i in inds]
return offsets
def verify_label_lengths(
audio_sizes,
audio_rate,
label_path,
label_rate,
inds,
tot,
tol=0.1, # tolerance in seconds
):
if label_rate < 0:
logger.info(f"{label_path} is sequence label. skipped")
return
with open(label_path) as f:
lengths = [len(line.rstrip().split()) for line in f]
assert len(lengths) == tot
lengths = [lengths[i] for i in inds]
num_invalid = 0
for i, ind in enumerate(inds):
dur_from_audio = audio_sizes[i] / audio_rate
dur_from_label = lengths[i] / label_rate
if abs(dur_from_audio - dur_from_label) > tol:
logger.warning(
(
f"audio and label duration differ too much "
f"(|{dur_from_audio} - {dur_from_label}| > {tol}) "
f"in line {ind+1} of {label_path}. Check if `label_rate` "
f"is correctly set (currently {label_rate}). "
f"num. of samples = {audio_sizes[i]}; "
f"label length = {lengths[i]}"
)
)
num_invalid += 1
if num_invalid > 0:
logger.warning(
f"total {num_invalid} (audio, label) pairs with mismatched lengths"
)
def logmelfilterbank(
audio,
sampling_rate,
fft_size=1024,
hop_size=256,
win_length=None,
window="hann",
num_mels=80,
fmin=80,
fmax=7600,
eps=1e-10,
):
"""Compute log-Mel filterbank feature.
(https://github.com/kan-bayashi/ParallelWaveGAN/blob/master/parallel_wavegan/bin/preprocess.py)
Args:
audio (ndarray): Audio signal (T,).
sampling_rate (int): Sampling rate.
fft_size (int): FFT size.
hop_size (int): Hop size.
win_length (int): Window length. If set to None, it will be the same as fft_size.
window (str): Window function type.
num_mels (int): Number of mel basis.
fmin (int): Minimum frequency in mel basis calculation.
fmax (int): Maximum frequency in mel basis calculation.
eps (float): Epsilon value to avoid inf in log calculation.
Returns:
ndarray: Log Mel filterbank feature (#frames, num_mels).
"""
# get amplitude spectrogram
x_stft = librosa.stft(audio, n_fft=fft_size, hop_length=hop_size,
win_length=win_length, window=window, pad_mode="reflect")
spc = np.abs(x_stft).T # (#frames, #bins)
# get mel basis
fmin = 0 if fmin is None else fmin
fmax = sampling_rate / 2 if fmax is None else fmax
mel_basis = librosa.filters.mel(sr=sampling_rate, n_fft=fft_size, n_mels=num_mels, fmin=fmin, fmax=fmax)
return np.log10(np.maximum(eps, np.dot(spc, mel_basis.T)))
class SpeechPretrainDataset(FairseqDataset):
def __init__(
self,
manifest_path: str,
sample_rate: float,
label_paths: List[str],
label_rates: Union[List[float], float], # -1 for sequence labels
pad_list: List[str],
eos_list: List[str],
label_processors: Optional[List[Any]] = None,
max_keep_sample_size: Optional[int] = None,
min_keep_sample_size: Optional[int] = None,
max_sample_size: Optional[int] = None,
shuffle: bool = True,
pad_audio: bool = False,
normalize: bool = False,
store_labels: bool = True,
random_crop: bool = False,
single_target: bool = False,
reduction_factor: int = 1,
):
self.audio_root, self.audio_names, inds, tot, self.sizes, self.spk_embeds = load_audio(
manifest_path, max_keep_sample_size, min_keep_sample_size
)
self.sample_rate = sample_rate
self.shuffle = shuffle
self.random_crop = random_crop
self.num_labels = len(label_paths)
self.pad_list = pad_list
self.eos_list = eos_list
self.label_processors = label_processors
self.single_target = single_target
self.label_rates = (
[label_rates for _ in range(len(label_paths))]
if isinstance(label_rates, float)
else label_rates
)
self.store_labels = store_labels
if store_labels:
self.label_list = [load_label(p, inds, tot) for p in label_paths]
else:
self.label_paths = label_paths
self.label_offsets_list = [
load_label_offset(p, inds, tot) for p in label_paths
]
assert label_processors is None or len(label_processors) == self.num_labels
for label_path, label_rate in zip(label_paths, self.label_rates):
verify_label_lengths(
self.sizes, sample_rate, label_path, label_rate, inds, tot
)
self.max_sample_size = (
max_sample_size if max_sample_size is not None else sys.maxsize
)
self.pad_audio = pad_audio
self.normalize = normalize
self.reduction_factor = reduction_factor
logger.info(
f"pad_audio={pad_audio}, random_crop={random_crop}, reduction_factor={reduction_factor}, "
f"normalize={normalize}, max_sample_size={self.max_sample_size}"
)
def get_audio(self, index):
import soundfile as sf
wav_path = os.path.join(self.audio_root, self.audio_names[index])
wav, cur_sample_rate = sf.read(wav_path)
wav = torch.from_numpy(wav).float()
fbank = logmelfilterbank(
wav.view(-1).cpu().numpy(), 16000
)
fbank = torch.from_numpy(fbank).float()
wav = self.postprocess(wav, cur_sample_rate)
return wav, fbank
def get_label(self, index, label_idx):
if self.store_labels:
label = self.label_list[label_idx][index]
else:
with open(self.label_paths[label_idx]) as f:
offset_s, offset_e = self.label_offsets_list[label_idx][index]
f.seek(offset_s)
label = f.read(offset_e - offset_s)
if self.label_processors is not None:
label = self.label_processors[label_idx](label)
return label
def get_labels(self, index):
return [self.get_label(index, i) for i in range(self.num_labels)]
def __getitem__(self, index):
wav, fbank = self.get_audio(index)
labels = self.get_labels(index)
spkembs = get_features_or_waveform(
os.path.join(self.audio_root, self.spk_embeds[index])
)
spkembs = torch.from_numpy(spkembs).float()
return {"id": index, "source": wav, "target": fbank, "label_list": labels, 'spkembs': spkembs}
def __len__(self):
return len(self.sizes)
def crop_to_max_size(self, wav, target_size):
size = len(wav)
diff = size - target_size
if diff <= 0:
return wav, 0
start, end = 0, target_size
if self.random_crop:
start = np.random.randint(0, diff + 1)
end = size - diff + start
return wav[start:end], start
def collater(self, samples):
# target = max(sizes) -> random_crop not used
# target = max_sample_size -> random_crop used for long
samples = [s for s in samples if s["source"] is not None]
if len(samples) == 0:
return {}
audios = [s["source"] for s in samples]
audio_sizes = [len(s) for s in audios]
fbanks = [s["target"] for s in samples]
fbank_sizes = [len(s) for s in fbanks]
if self.pad_audio:
audio_size = min(max(audio_sizes), self.max_sample_size)
else:
audio_size = min(min(audio_sizes), self.max_sample_size)
collated_audios, padding_mask, audio_starts = self.collater_audio(
audios, audio_size
)
collated_fbanks = []
collated_audios_size = []
for i in range(len(fbanks)):
fbank_start = int(audio_starts[i] / (audio_sizes[i] / fbank_sizes[i]))
fbank_size = int(audio_size / (audio_sizes[i] / fbank_sizes[i]))
fbank_end = min(fbank_start + fbank_size, fbank_sizes[i])
collated_fbanks.append(fbanks[i][fbank_start : fbank_end])
collated_audios_size.append(audio_size)
collated_fbanks_size = [len(s) for s in collated_fbanks]
collated_fbanks = _collate_frames(collated_fbanks)
collated_fbanks_size = torch.tensor(collated_fbanks_size, dtype=torch.long)
# thin out frames for reduction factor (B, Lmax, odim) -> (B, Lmax//r, odim)
if self.reduction_factor > 1:
collated_fbanks_in = collated_fbanks[:, self.reduction_factor - 1 :: self.reduction_factor]
collated_fbanks_size_in = collated_fbanks_size.new([torch.div(olen, self.reduction_factor, rounding_mode='floor') for olen in collated_fbanks_size])
else:
collated_fbanks_in, collated_fbanks_size_in = collated_fbanks, collated_fbanks_size
prev_output_tokens = torch.cat(
[collated_fbanks_in.new_zeros((collated_fbanks_in.shape[0], 1, collated_fbanks_in.shape[2])), collated_fbanks_in[:, :-1]], dim=1
)
# make labels for stop prediction
labels = collated_fbanks.new_zeros(collated_fbanks.size(0), collated_fbanks.size(1))
for i, l in enumerate(fbank_sizes):
labels[i, l - 1 :] = 1.0
spkembs = _collate_frames([s["spkembs"] for s in samples], is_audio_input=True)
targets_by_label = [
[s["label_list"][i] for s in samples] for i in range(self.num_labels)
]
targets_list, lengths_list, ntokens_list = self.collater_label(
targets_by_label, audio_size, audio_starts
)
net_input = {
"source": collated_audios,
"padding_mask": padding_mask,
"prev_output_tokens": prev_output_tokens,
"spkembs": spkembs,
"tgt_lengths": collated_fbanks_size_in,
}
batch = {
"id": torch.LongTensor([s["id"] for s in samples]),
"net_input": net_input,
"labels": labels,
"dec_target": collated_fbanks,
"dec_target_lengths": collated_fbanks_size,
"src_lengths": collated_audios_size,
"task_name": 'speech_pretrain',
}
if self.single_target:
batch["target_lengths"] = lengths_list[0]
batch["ntokens"] = ntokens_list[0]
batch["target"] = targets_list[0]
else:
batch["target_lengths_list"] = lengths_list
batch["ntokens_list"] = ntokens_list
batch["target_list"] = targets_list
return batch
def collater_audio(self, audios, audio_size):
collated_audios = audios[0].new_zeros(len(audios), audio_size)
padding_mask = (
torch.BoolTensor(collated_audios.shape).fill_(False)
# if self.pad_audio else None
)
audio_starts = [0 for _ in audios]
for i, audio in enumerate(audios):
diff = len(audio) - audio_size
if diff == 0:
collated_audios[i] = audio
elif diff < 0:
assert self.pad_audio
collated_audios[i] = torch.cat([audio, audio.new_full((-diff,), 0.0)])
padding_mask[i, diff:] = True
else:
collated_audios[i], audio_starts[i] = self.crop_to_max_size(
audio, audio_size
)
return collated_audios, padding_mask, audio_starts
def collater_frm_label(self, targets, audio_size, audio_starts, label_rate, pad):
assert label_rate > 0
s2f = label_rate / self.sample_rate
frm_starts = [int(round(s * s2f)) for s in audio_starts]
frm_size = int(round(audio_size * s2f))
if not self.pad_audio:
rem_size = [len(t) - s for t, s in zip(targets, frm_starts)]
frm_size = min(frm_size, *rem_size)
targets = [t[s : s + frm_size] for t, s in zip(targets, frm_starts)]
logger.debug(f"audio_starts={audio_starts}")
logger.debug(f"frame_starts={frm_starts}")
logger.debug(f"frame_size={frm_size}")
lengths = torch.LongTensor([len(t) for t in targets])
ntokens = lengths.sum().item()
targets = data_utils.collate_tokens(targets, pad_idx=pad, left_pad=False)
return targets, lengths, ntokens
def collater_seq_label(self, targets, pad):
lengths = torch.LongTensor([len(t) for t in targets])
ntokens = lengths.sum().item()
targets = data_utils.collate_tokens(targets, pad_idx=pad, left_pad=False)
return targets, lengths, ntokens
def collater_label(self, targets_by_label, audio_size, audio_starts):
targets_list, lengths_list, ntokens_list = [], [], []
itr = zip(targets_by_label, self.label_rates, self.pad_list)
for targets, label_rate, pad in itr:
if label_rate == -1.0:
targets, lengths, ntokens = self.collater_seq_label(targets, pad)
else:
targets, lengths, ntokens = self.collater_frm_label(
targets, audio_size, audio_starts, label_rate, pad
)
targets_list.append(targets)
lengths_list.append(lengths)
ntokens_list.append(ntokens)
return targets_list, lengths_list, ntokens_list
def num_tokens(self, index):
return self.size(index)
def size(self, index):
if self.pad_audio:
return self.sizes[index]
return min(self.sizes[index], self.max_sample_size)
def ordered_indices(self):
if self.shuffle:
order = [np.random.permutation(len(self))]
else:
order = [np.arange(len(self))]
order.append(self.sizes)
return np.lexsort(order)[::-1]
def postprocess(self, wav, cur_sample_rate):
if wav.dim() == 2:
wav = wav.mean(-1)
assert wav.dim() == 1, wav.dim()
if cur_sample_rate != self.sample_rate:
raise Exception(f"sr {cur_sample_rate} != {self.sample_rate}")
if self.normalize:
with torch.no_grad():
wav = F.layer_norm(wav, wav.shape)
return wav
| EXA-1-master | exa/models/unilm-master/speecht5/speecht5/data/speech_dataset.py |
EXA-1-master | exa/models/unilm-master/speecht5/speecht5/data/__init__.py |
|
# --------------------------------------------------------
# SpeechT5: Unified-Modal Encoder-Decoder Pre-Training for Spoken Language Processing (https://arxiv.org/abs/2110.07205)
# Github source: https://github.com/microsoft/SpeechT5/tree/main/SpeechT5
# Copyright (c) 2021 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Based on fairseq and espnet code bases
# https://github.com/pytorch/fairseq; https://github.com/espnet/espnet
# --------------------------------------------------------
import math
import numpy as np
import torch
from fairseq.data import FairseqDataset, data_utils
def collate(
samples,
pad_idx,
eos_idx,
vocab,
left_pad_source=False,
left_pad_target=False,
input_feeding=True,
pad_to_length=None,
):
assert input_feeding
if len(samples) == 0:
return {}
def merge(key, left_pad, move_eos_to_beginning=False, pad_to_length=None):
return data_utils.collate_tokens(
[s[key] for s in samples],
pad_idx,
eos_idx=None, # use eos_idx of each sample instead of vocab.eos()
left_pad=left_pad,
move_eos_to_beginning=move_eos_to_beginning,
pad_to_length=pad_to_length,
)
id = torch.LongTensor([s["id"] for s in samples])
src_tokens = merge(
"source",
left_pad=left_pad_source,
pad_to_length=pad_to_length["source"] if pad_to_length is not None else None,
)
# sort by descending source length
src_lengths = torch.LongTensor([s["source"].numel() for s in samples])
src_lengths, sort_order = src_lengths.sort(descending=True)
id = id.index_select(0, sort_order)
src_tokens = src_tokens.index_select(0, sort_order)
prev_output_tokens = None
target = None
if samples[0].get("target", None) is not None:
target = merge(
"target",
left_pad=left_pad_target,
pad_to_length=pad_to_length["target"]
if pad_to_length is not None
else None,
)
target = target.index_select(0, sort_order)
ntokens = sum(len(s["target"]) for s in samples)
if input_feeding:
# we create a shifted version of targets for feeding the
# previous output token(s) into the next decoder step
prev_output_tokens = merge(
"target",
left_pad=left_pad_target,
move_eos_to_beginning=True,
pad_to_length=pad_to_length["target"]
if pad_to_length is not None
else None,
)
prev_output_tokens = prev_output_tokens.index_select(0, sort_order)
else:
ntokens = sum(len(s["source"]) for s in samples)
batch = {
"id": id,
"ntokens": ntokens,
"net_input": {
"src_tokens": src_tokens,
"src_lengths": src_lengths,
},
"target": target,
"nsentences": samples[0]["source"].size(0),
"sort_order": sort_order,
"task_name": 'text_pretrain',
}
if prev_output_tokens is not None:
batch["net_input"]["prev_output_tokens"] = prev_output_tokens
return batch
class TextPretrainDataset(FairseqDataset):
"""
A wrapper around TokenBlockDataset for BART dataset.
Args:
dataset (TokenBlockDataset): dataset to wrap
sizes (List[int]): sentence lengths
vocab (~fairseq.data.Dictionary): vocabulary
mask_idx (int): dictionary index used for masked token
mask_whole_words: only mask whole words. This should be a byte mask
over vocab indices, indicating whether it is the beginning of a
word. We will extend any mask to encompass the whole word.
shuffle (bool, optional): shuffle the elements before batching.
Default: ``True``
seed: Seed for random number generator for reproducibility.
args: argparse arguments.
"""
def __init__(
self,
dataset,
sizes,
vocab,
mask_idx,
mask_whole_words,
shuffle,
seed,
args,
eos=None,
item_transform_func=None,
iid_noise_target=False,
uni_mask_idxs=None,
):
self.dataset = dataset
self.sizes = sizes
self.vocab = vocab
self.shuffle = shuffle
self.seed = seed
if iid_noise_target:
assert isinstance(uni_mask_idxs, torch.Tensor), "if use iid_noise_target, the uni_mask_idxs must be a tensor which contain the mask indexs"
self.iid_noise_target = iid_noise_target
self.uni_mask_idxs = uni_mask_idxs
self.mask_idx = mask_idx
self.mask_whole_word = mask_whole_words
self.mask_ratio = args.mask
self.random_ratio = args.mask_random
self.insert_ratio = args.insert
self.rotate_ratio = args.rotate
self.permute_sentence_ratio = args.permute_sentences
self.eos = eos if eos is not None else vocab.eos()
self.item_transform_func = item_transform_func
if args.bpe != "gpt2":
self.full_stop_index = self.vocab.eos()
else:
assert args.bpe == "gpt2"
self.full_stop_index = self.vocab.index("13")
self.replace_length = args.replace_length
if self.replace_length not in [-1, 0, 1]:
raise ValueError(f"invalid arg: replace_length={self.replace_length}")
if args.mask_length not in ["subword", "word", "span-poisson"]:
raise ValueError(f"invalid arg: mask-length={args.mask_length}")
if args.mask_length == "subword" and args.replace_length not in [0, 1]:
raise ValueError(f"if using subwords, use replace-length=1 or 0")
self.mask_span_distribution = None
if args.mask_length == "span-poisson":
_lambda = args.poisson_lambda
lambda_to_the_k = 1
e_to_the_minus_lambda = math.exp(-_lambda)
k_factorial = 1
ps = []
for k in range(0, 128):
ps.append(e_to_the_minus_lambda * lambda_to_the_k / k_factorial)
lambda_to_the_k *= _lambda
k_factorial *= k + 1
if ps[-1] < 0.0000001:
break
ps = torch.FloatTensor(ps)
self.mask_span_distribution = torch.distributions.Categorical(ps)
self.epoch = 0
@property
def can_reuse_epoch_itr_across_epochs(self):
return True # only the noise changes, not item sizes
def set_epoch(self, epoch, **unused):
self.epoch = epoch
def __getitem__(self, index):
with data_utils.numpy_seed(self.seed, self.epoch, index):
tokens = self.dataset[index]
assert tokens[-1] == self.eos
source, target = tokens, tokens.clone()
if self.permute_sentence_ratio > 0.0:
source = self.permute_sentences(source, self.permute_sentence_ratio)
if self.mask_ratio > 0:
source, new_target = self.add_whole_word_mask(source, self.mask_ratio)
if new_target is not None:
target = new_target
if self.insert_ratio > 0:
source = self.add_insertion_noise(source, self.insert_ratio)
if self.rotate_ratio > 0.0 and np.random.random() < self.rotate_ratio:
source = self.add_rolling_noise(source)
# there can additional changes to make:
if self.item_transform_func is not None:
source, target = self.item_transform_func(source, target)
assert (source >= 0).all()
assert (source[1:-1] >= 1).all()
assert (source <= len(self.vocab)).all()
assert source[0] == self.vocab.bos()
assert source[-1] == self.eos
return {
"id": index,
"source": source,
"target": target,
}
def __len__(self):
return len(self.dataset)
def permute_sentences(self, source, p=1.0):
full_stops = source == self.full_stop_index
# Pretend it ends with a full stop so last span is a sentence
full_stops[-2] = 1
# Tokens that are full stops, where the previous token is not
sentence_ends = (full_stops[1:] * ~full_stops[:-1]).nonzero(as_tuple=False) + 2
result = source.clone()
num_sentences = sentence_ends.size(0)
num_to_permute = math.ceil((num_sentences * 2 * p) / 2.0)
substitutions = torch.randperm(num_sentences)[:num_to_permute]
ordering = torch.arange(0, num_sentences)
ordering[substitutions] = substitutions[torch.randperm(num_to_permute)]
# Ignore <bos> at start
index = 1
for i in ordering:
sentence = source[(sentence_ends[i - 1] if i > 0 else 1) : sentence_ends[i]]
result[index : index + sentence.size(0)] = sentence
index += sentence.size(0)
return result
def word_starts(self, source):
if self.mask_whole_word is not None:
is_word_start = self.mask_whole_word.gather(0, source)
else:
is_word_start = torch.ones(source.size())
is_word_start[0] = 0
is_word_start[-1] = 0
return is_word_start
def add_whole_word_mask(self, source, p):
source_ori = source.clone()
is_word_start = self.word_starts(source)
num_to_mask = int(math.ceil(is_word_start.float().sum() * p))
num_inserts = 0
if num_to_mask == 0:
return source
if self.mask_span_distribution is not None:
lengths = self.mask_span_distribution.sample(sample_shape=(num_to_mask,))
# Make sure we have enough to mask
cum_length = torch.cumsum(lengths, 0)
while cum_length[-1] < num_to_mask:
lengths = torch.cat(
[
lengths,
self.mask_span_distribution.sample(sample_shape=(num_to_mask,)),
],
dim=0,
)
cum_length = torch.cumsum(lengths, 0)
# Trim to masking budget
i = 0
while cum_length[i] < num_to_mask:
i += 1
lengths[i] = num_to_mask - (0 if i == 0 else cum_length[i - 1])
num_to_mask = i + 1
lengths = lengths[:num_to_mask]
# Handle 0-length mask (inserts) separately
lengths = lengths[lengths > 0]
num_inserts = num_to_mask - lengths.size(0)
num_to_mask -= num_inserts
if num_to_mask == 0:
return self.add_insertion_noise(source, num_inserts / source.size(0))
assert (lengths > 0).all()
else:
lengths = torch.ones((num_to_mask,)).long()
assert is_word_start[-1] == 0
word_starts = is_word_start.nonzero(as_tuple=False)
indices = word_starts[
torch.randperm(word_starts.size(0))[:num_to_mask]
].squeeze(1)
mask_random = torch.FloatTensor(num_to_mask).uniform_() < self.random_ratio
source_length = source.size(0)
assert source_length - 1 not in indices
to_keep = torch.ones(source_length, dtype=torch.bool)
is_word_start[
-1
] = 255 # acts as a long length, so spans don't go over the end of doc
if self.replace_length == 0:
to_keep[indices] = 0
else:
# keep index, but replace it with [MASK]
source[indices] = self.mask_idx
source[indices[mask_random]] = torch.randint(
1, len(self.vocab), size=(mask_random.sum(),)
)
if self.mask_span_distribution is not None:
assert len(lengths.size()) == 1
assert lengths.size() == indices.size()
lengths -= 1
while indices.size(0) > 0:
assert lengths.size() == indices.size()
lengths -= is_word_start[indices + 1].long()
uncompleted = lengths >= 0
indices = indices[uncompleted] + 1
mask_random = mask_random[uncompleted]
lengths = lengths[uncompleted]
if self.replace_length != -1:
# delete token
to_keep[indices] = 0
else:
# keep index, but replace it with [MASK]
source[indices] = self.mask_idx
source[indices[mask_random]] = torch.randint(
1, len(self.vocab), size=(mask_random.sum(),)
)
else:
# A bit faster when all lengths are 1
while indices.size(0) > 0:
uncompleted = is_word_start[indices + 1] == 0
indices = indices[uncompleted] + 1
mask_random = mask_random[uncompleted]
if self.replace_length != -1:
# delete token
to_keep[indices] = 0
else:
# keep index, but replace it with [MASK]
source[indices] = self.mask_idx
source[indices[mask_random]] = torch.randint(
1, len(self.vocab), size=(mask_random.sum(),)
)
assert source_length - 1 not in indices
if not self.iid_noise_target:
source = source[to_keep]
target = None
else:
## Prepare source
source_mask_idx = (source == self.mask_idx).nonzero().view(-1)
source[source_mask_idx] = self.uni_mask_idxs[:source_mask_idx.size(0)]
source = source[to_keep]
## Prepare target
to_keep[source_mask_idx] = 0
# source_mask_idx: from [a, b, c, ...] to [a, b + 1, c + 2, ...]
source_mask_idx = source_mask_idx + torch.arange(source_mask_idx.size(0))
# target: source_length + mask_length
target = source_ori.new_zeros(source_mask_idx.size(0) + source_ori.size(0))
# target: [0, 0, 0, X, 0, 0, Y, ....]
target[source_mask_idx] = self.uni_mask_idxs[:source_mask_idx.size(0)]
target_to_keep = to_keep.new_zeros(source_mask_idx.size(0) + source_ori.size(0))
# Copy original value to target and target_to_keep
target_to_keep[target == 0] = to_keep
target_to_keep[-1] = 0
target[target == 0] = source_ori
target = target[~target_to_keep]
if num_inserts > 0:
source = self.add_insertion_noise(source, num_inserts / source.size(0))
return source, target
def add_permuted_noise(self, tokens, p):
num_words = len(tokens)
num_to_permute = math.ceil(((num_words * 2) * p) / 2.0)
substitutions = torch.randperm(num_words - 2)[:num_to_permute] + 1
tokens[substitutions] = tokens[substitutions[torch.randperm(num_to_permute)]]
return tokens
def add_rolling_noise(self, tokens):
offset = np.random.randint(1, max(1, tokens.size(-1) - 1) + 1)
tokens = torch.cat(
(tokens[0:1], tokens[offset:-1], tokens[1:offset], tokens[-1:]),
dim=0,
)
return tokens
def add_insertion_noise(self, tokens, p):
if p == 0.0:
return tokens
num_tokens = len(tokens)
n = int(math.ceil(num_tokens * p))
noise_indices = torch.randperm(num_tokens + n - 2)[:n] + 1
noise_mask = torch.zeros(size=(num_tokens + n,), dtype=torch.bool)
noise_mask[noise_indices] = 1
result = torch.LongTensor(n + len(tokens)).fill_(-1)
num_random = int(math.ceil(n * self.random_ratio))
result[noise_indices[num_random:]] = self.mask_idx
result[noise_indices[:num_random]] = torch.randint(
low=1, high=len(self.vocab), size=(num_random,)
)
result[~noise_mask] = tokens
assert (result >= 0).all()
return result
def collater(self, samples, pad_to_length=None):
"""Merge a list of samples to form a mini-batch.
Args:
samples (List[dict]): samples to collate
Returns:
dict: a mini-batch of data
"""
return collate(
samples, self.vocab.pad(), self.eos, self.vocab, pad_to_length=pad_to_length
)
def num_tokens(self, index):
"""Return the number of tokens in a sample. This value is used to
enforce ``--max-tokens`` during batching."""
return self.sizes[index]
def size(self, index):
"""Return an example's size as a float or tuple. This value is used when
filtering a dataset with ``--max-positions``."""
return self.sizes[index]
def ordered_indices(self):
"""Return an ordered list of indices. Batches will be constructed based
on this order."""
if self.shuffle:
indices = np.random.permutation(len(self))
else:
indices = np.arange(len(self))
return indices[np.argsort(self.sizes[indices], kind="mergesort")]
def prefetch(self, indices):
self.src.prefetch(indices)
self.tgt.prefetch(indices)
@property
def supports_prefetch(self):
return (
hasattr(self.src, "supports_prefetch")
and self.src.supports_prefetch
and hasattr(self.tgt, "supports_prefetch")
and self.tgt.supports_prefetch
)
| EXA-1-master | exa/models/unilm-master/speecht5/speecht5/data/text_dataset.py |
# --------------------------------------------------------
# SpeechT5: Unified-Modal Encoder-Decoder Pre-Training for Spoken Language Processing (https://arxiv.org/abs/2110.07205)
# Github source: https://github.com/microsoft/SpeechT5/tree/main/SpeechT5
# Copyright (c) 2021 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Based on fairseq and espnet code bases
# https://github.com/pytorch/fairseq; https://github.com/espnet/espnet
# --------------------------------------------------------
import itertools
import logging
import os
from typing import Any, List, Optional
import numpy as np
import torch
import torch.nn.functional as F
from fairseq.data import data_utils, Dictionary
from fairseq.data.fairseq_dataset import FairseqDataset
logger = logging.getLogger(__name__)
def load_audio(manifest_path, max_keep, min_keep):
n_long, n_short = 0, 0
names, inds, sizes = [], [], []
with open(manifest_path) as f:
root = f.readline().strip()
for ind, line in enumerate(f):
items = line.strip().split("\t")
assert len(items) >= 2, line
sz = int(items[1])
if min_keep is not None and sz < min_keep:
n_short += 1
elif max_keep is not None and sz > max_keep:
n_long += 1
else:
names.append(items[0])
inds.append(ind)
sizes.append(sz)
tot = ind + 1
logger.info(
(
f"max_keep={max_keep}, min_keep={min_keep}, "
f"loaded {len(names)}, skipped {n_short} short and {n_long} long, "
f"longest-loaded={max(sizes)}, shortest-loaded={min(sizes)}"
)
)
return root, names, inds, tot, sizes
def load_label(label_path, inds, tot):
with open(label_path) as f:
labels = [line.rstrip() for line in f]
assert (
len(labels) == tot
), f"number of labels does not match ({len(labels)} != {tot})"
labels = [labels[i] for i in inds]
return labels
def load_label_offset(label_path, inds, tot):
with open(label_path) as f:
code_lengths = [len(line.encode("utf-8")) for line in f]
assert (
len(code_lengths) == tot
), f"number of labels does not match ({len(code_lengths)} != {tot})"
offsets = list(itertools.accumulate([0] + code_lengths))
offsets = [(offsets[i], offsets[i + 1]) for i in inds]
return offsets
class SpeechToTextDataset(FairseqDataset):
def __init__(
self,
manifest_path: str,
sample_rate: float,
label_paths: List[str],
label_processors: Optional[List[Any]] = None,
max_keep_sample_size: Optional[int] = None,
min_keep_sample_size: Optional[int] = None,
shuffle: bool = True,
normalize: bool = False,
store_labels: bool = True,
tgt_dict: Optional[Dictionary] = None,
tokenizer = None,
):
self.audio_root, self.audio_names, inds, tot, self.wav_sizes = load_audio(
manifest_path, max_keep_sample_size, min_keep_sample_size
)
self.sample_rate = sample_rate
self.shuffle = shuffle
self.tgt_dict = tgt_dict
self.tokenizer = tokenizer
self.num_labels = len(label_paths)
self.label_processors = label_processors
self.store_labels = store_labels
if store_labels:
self.label_list = [load_label(p, inds, tot) for p in label_paths]
else:
self.label_paths = label_paths
self.label_offsets_list = [
load_label_offset(p, inds, tot) for p in label_paths
]
assert label_processors is None or len(label_processors) == self.num_labels
self.normalize = normalize
logger.info(
f"normalize={normalize}"
)
def get_audio(self, index):
import soundfile as sf
wav_path = os.path.join(self.audio_root, self.audio_names[index])
wav, cur_sample_rate = sf.read(wav_path)
wav = torch.from_numpy(wav).float()
wav = self.postprocess(wav, cur_sample_rate)
return wav
def get_label(self, index, label_idx):
if self.store_labels:
label = self.label_list[label_idx][index]
else:
with open(self.label_paths[label_idx]) as f:
offset_s, offset_e = self.label_offsets_list[label_idx][index]
f.seek(offset_s)
label = f.read(offset_e - offset_s)
if self.tokenizer is not None:
label = self.tokenizer.encode(label)
if self.label_processors is not None:
label = self.label_processors[label_idx](label)
return label
def get_labels(self, index):
return [self.get_label(index, i) for i in range(self.num_labels)]
def __getitem__(self, index):
wav = self.get_audio(index)
labels = self.get_labels(index)
return {"id": index, "source": wav, "label_list": labels}
def __len__(self):
return len(self.wav_sizes)
def collater(self, samples):
samples = [s for s in samples if s["source"] is not None]
if len(samples) == 0:
return {}
audios = [s["source"] for s in samples]
audio_sizes = [len(s) for s in audios]
audio_size = max(audio_sizes)
collated_audios, padding_mask = self.collater_audio(
audios, audio_size
)
targets_by_label = [
[s["label_list"][i] for s in samples] for i in range(self.num_labels)
]
targets_list, lengths_list, ntokens_list = self.collater_label(targets_by_label)
decoder_label = [
torch.cat((targets_list[0][i, :lengths_list[0][i]], torch.tensor([self.tgt_dict.eos()])), 0).long()
for i in range(targets_list[0].size(0))
]
decoder_target = data_utils.collate_tokens(
decoder_label,
self.tgt_dict.pad(),
self.tgt_dict.eos(),
left_pad=False,
move_eos_to_beginning=False,
)
decoder_target_lengths = torch.tensor(
[x.size(0) for x in decoder_label], dtype=torch.long
)
prev_output_tokens = data_utils.collate_tokens(
decoder_label,
self.tgt_dict.pad(),
self.tgt_dict.eos(),
left_pad=False,
move_eos_to_beginning=True,
)
net_input = {
"source": collated_audios,
"padding_mask": padding_mask,
"prev_output_tokens": prev_output_tokens,
"task_name": "s2t",
}
batch = {
"id": torch.LongTensor([s["id"] for s in samples]),
"net_input": net_input,
"target": decoder_target,
"target_lengths": decoder_target_lengths,
"task_name": "s2t",
"ntokens": ntokens_list[0]
}
return batch
def collater_audio(self, audios, audio_size):
collated_audios = audios[0].new_zeros(len(audios), audio_size)
padding_mask = (
torch.BoolTensor(collated_audios.shape).fill_(False)
)
for i, audio in enumerate(audios):
diff = len(audio) - audio_size
if diff == 0:
collated_audios[i] = audio
elif diff < 0:
collated_audios[i] = torch.cat([audio, audio.new_full((-diff,), 0.0)])
padding_mask[i, diff:] = True
else:
raise Exception("Diff should not be larger than 0")
return collated_audios, padding_mask
def collater_seq_label(self, targets, pad):
lengths = torch.LongTensor([len(t) for t in targets])
ntokens = lengths.sum().item()
targets = data_utils.collate_tokens(targets, pad_idx=pad, left_pad=False)
return targets, lengths, ntokens
def collater_label(self, targets_by_label):
targets_list, lengths_list, ntokens_list = [], [], []
itr = zip(targets_by_label, [self.tgt_dict.pad()])
for targets, pad in itr:
targets, lengths, ntokens = self.collater_seq_label(targets, pad)
targets_list.append(targets)
lengths_list.append(lengths)
ntokens_list.append(ntokens)
return targets_list, lengths_list, ntokens_list
def num_tokens(self, index):
return self.size(index)
def size(self, index):
return self.wav_sizes[index]
@property
def sizes(self):
return np.array(self.wav_sizes)
def ordered_indices(self):
if self.shuffle:
order = [np.random.permutation(len(self))]
else:
order = [np.arange(len(self))]
order.append(self.wav_sizes)
return np.lexsort(order)[::-1]
def postprocess(self, wav, cur_sample_rate):
if wav.dim() == 2:
wav = wav.mean(-1)
assert wav.dim() == 1, wav.dim()
if cur_sample_rate != self.sample_rate:
raise Exception(f"sr {cur_sample_rate} != {self.sample_rate}")
if self.normalize:
with torch.no_grad():
wav = F.layer_norm(wav, wav.shape)
return wav
| EXA-1-master | exa/models/unilm-master/speecht5/speecht5/data/speech_to_text_dataset.py |
# --------------------------------------------------------
# SpeechT5: Unified-Modal Encoder-Decoder Pre-Training for Spoken Language Processing (https://arxiv.org/abs/2110.07205)
# Github source: https://github.com/microsoft/SpeechT5/tree/main/SpeechT5
# Copyright (c) 2021 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Based on fairseq and espnet code bases
# https://github.com/pytorch/fairseq; https://github.com/espnet/espnet
# --------------------------------------------------------
import itertools
import logging
import os
from typing import Any, List, Optional
import numpy as np
import torch
import torch.nn.functional as F
import librosa
from fairseq.data.audio.speech_to_text_dataset import get_features_or_waveform
from fairseq.data import data_utils, Dictionary
from fairseq.data.fairseq_dataset import FairseqDataset
logger = logging.getLogger(__name__)
def _collate_frames(
frames: List[torch.Tensor], is_audio_input: bool = False
):
"""
Convert a list of 2D frames into a padded 3D tensor
Args:
frames (list): list of 2D frames of size L[i]*f_dim. Where L[i] is
length of i-th frame and f_dim is static dimension of features
Returns:
3D tensor of size len(frames)*len_max*f_dim where len_max is max of L[i]
"""
max_len = max(frame.size(0) for frame in frames)
if is_audio_input:
out = frames[0].new_zeros((len(frames), max_len))
else:
out = frames[0].new_zeros((len(frames), max_len, frames[0].size(1)))
for i, v in enumerate(frames):
out[i, : v.size(0)] = v
return out
def load_audio(manifest_path, max_keep, min_keep):
n_long, n_short = 0, 0
names, inds, sizes, spk_embeds = [], [], [], []
with open(manifest_path) as f:
root = f.readline().strip()
for ind, line in enumerate(f):
items = line.strip().split("\t")
assert len(items) == 3, line
sz = int(items[1])
if min_keep is not None and sz < min_keep:
n_short += 1
elif max_keep is not None and sz > max_keep:
n_long += 1
else:
names.append(items[0])
spk_embeds.append(items[2])
inds.append(ind)
sizes.append(sz)
tot = ind + 1
logger.info(
(
f"max_keep={max_keep}, min_keep={min_keep}, "
f"loaded {len(names)}, skipped {n_short} short and {n_long} long, "
f"longest-loaded={max(sizes)}, shortest-loaded={min(sizes)}"
)
)
return root, names, inds, tot, sizes, spk_embeds
def load_label(label_path, inds, tot):
with open(label_path) as f:
labels = [line.rstrip() for line in f]
assert (
len(labels) == tot
), f"number of labels does not match ({len(labels)} != {tot})"
labels = [labels[i] for i in inds]
return labels
def load_label_offset(label_path, inds, tot):
with open(label_path) as f:
code_lengths = [len(line.encode("utf-8")) for line in f]
assert (
len(code_lengths) == tot
), f"number of labels does not match ({len(code_lengths)} != {tot})"
offsets = list(itertools.accumulate([0] + code_lengths))
offsets = [(offsets[i], offsets[i + 1]) for i in inds]
return offsets
def logmelfilterbank(
audio,
sampling_rate,
fft_size=1024,
hop_size=256,
win_length=None,
window="hann",
num_mels=80,
fmin=80,
fmax=7600,
eps=1e-10,
):
"""Compute log-Mel filterbank feature.
(https://github.com/kan-bayashi/ParallelWaveGAN/blob/master/parallel_wavegan/bin/preprocess.py)
Args:
audio (ndarray): Audio signal (T,).
sampling_rate (int): Sampling rate.
fft_size (int): FFT size.
hop_size (int): Hop size.
win_length (int): Window length. If set to None, it will be the same as fft_size.
window (str): Window function type.
num_mels (int): Number of mel basis.
fmin (int): Minimum frequency in mel basis calculation.
fmax (int): Maximum frequency in mel basis calculation.
eps (float): Epsilon value to avoid inf in log calculation.
Returns:
ndarray: Log Mel filterbank feature (#frames, num_mels).
"""
# get amplitude spectrogram
x_stft = librosa.stft(audio, n_fft=fft_size, hop_length=hop_size,
win_length=win_length, window=window, pad_mode="reflect")
spc = np.abs(x_stft).T # (#frames, #bins)
# get mel basis
fmin = 0 if fmin is None else fmin
fmax = sampling_rate / 2 if fmax is None else fmax
mel_basis = librosa.filters.mel(sr=sampling_rate, n_fft=fft_size, n_mels=num_mels, fmin=fmin, fmax=fmax)
return np.log10(np.maximum(eps, np.dot(spc, mel_basis.T)))
class TextToSpeechDataset(FairseqDataset):
def __init__(
self,
manifest_path: str,
sample_rate: float,
label_paths: List[str],
label_processors: Optional[List[Any]] = None,
max_keep_sample_size: Optional[int] = None,
min_keep_sample_size: Optional[int] = None,
shuffle: bool = True,
normalize: bool = False,
store_labels: bool = True,
src_dict: Optional[Dictionary] = None,
tokenizer = None,
reduction_factor: int = 1,
):
self.audio_root, self.audio_names, inds, tot, self.wav_sizes, self.spk_embeds = load_audio(
manifest_path, max_keep_sample_size, min_keep_sample_size
)
self.sample_rate = sample_rate
self.shuffle = shuffle
self.src_dict = src_dict
self.tokenizer = tokenizer
self.num_labels = len(label_paths)
self.label_processors = label_processors
self.store_labels = store_labels
if store_labels:
self.label_list = [load_label(p, inds, tot) for p in label_paths]
else:
self.label_paths = label_paths
self.label_offsets_list = [
load_label_offset(p, inds, tot) for p in label_paths
]
assert label_processors is None or len(label_processors) == self.num_labels
self.normalize = normalize
self.reduction_factor = reduction_factor
logger.info(
f"reduction_factor={reduction_factor}, normalize={normalize}"
)
def get_audio(self, index):
import soundfile as sf
wav_path = os.path.join(self.audio_root, self.audio_names[index])
wav, cur_sample_rate = sf.read(wav_path)
wav = torch.from_numpy(wav).float()
fbank = logmelfilterbank(
wav.view(-1).cpu().numpy(), 16000
)
fbank = torch.from_numpy(fbank).float()
wav = self.postprocess(wav, cur_sample_rate)
return wav, fbank
def get_label(self, index, label_idx):
if self.store_labels:
label = self.label_list[label_idx][index]
else:
with open(self.label_paths[label_idx]) as f:
offset_s, offset_e = self.label_offsets_list[label_idx][index]
f.seek(offset_s)
label = f.read(offset_e - offset_s)
if self.tokenizer is not None:
label = self.tokenizer.encode(label)
if self.label_processors is not None:
label = self.label_processors[label_idx](label)
return label
def get_labels(self, index):
return [self.get_label(index, i) for i in range(self.num_labels)]
def __getitem__(self, index):
wav, fbank = self.get_audio(index)
labels = self.get_labels(index)
spkembs = get_features_or_waveform(
os.path.join(self.audio_root, self.spk_embeds[index])
)
spkembs = torch.from_numpy(spkembs).float()
return {"id": index, "source": labels, "target": fbank, "spkembs": spkembs, "audio_name": self.audio_names[index]}
def __len__(self):
return len(self.wav_sizes)
def collater(self, samples):
samples = [s for s in samples if s["source"] is not None]
if len(samples) == 0:
return {}
fbanks = [s["target"] for s in samples]
fbank_sizes = [len(s) for s in fbanks]
collated_fbanks = _collate_frames(fbanks)
collated_fbanks_size = torch.tensor(fbank_sizes, dtype=torch.long)
# thin out frames for reduction factor (B, Lmax, odim) -> (B, Lmax//r, odim)
if self.reduction_factor > 1:
collated_fbanks_in = collated_fbanks[:, self.reduction_factor - 1 :: self.reduction_factor]
collated_fbanks_size_in = collated_fbanks_size.new([torch.div(olen, self.reduction_factor, rounding_mode='floor') for olen in collated_fbanks_size])
else:
collated_fbanks_in, collated_fbanks_size_in = collated_fbanks, collated_fbanks_size
prev_output_tokens = torch.cat(
[collated_fbanks_in.new_zeros((collated_fbanks_in.shape[0], 1, collated_fbanks_in.shape[2])), collated_fbanks_in[:, :-1]], dim=1
)
# make labels for stop prediction
labels = collated_fbanks.new_zeros(collated_fbanks.size(0), collated_fbanks.size(1))
for i, l in enumerate(fbank_sizes):
labels[i, l - 1 :] = 1.0
spkembs = _collate_frames([s["spkembs"] for s in samples], is_audio_input=True)
sources_by_label = [
[s["source"][i] for s in samples] for i in range(self.num_labels)
]
sources_list, lengths_list, ntokens_list = self.collater_label(sources_by_label)
net_input = {
"src_tokens": sources_list[0],
"src_lengths": lengths_list[0],
"prev_output_tokens": prev_output_tokens,
"tgt_lengths": collated_fbanks_size_in,
"spkembs": spkembs,
"task_name": "t2s",
}
batch = {
"id": torch.LongTensor([s["id"] for s in samples]),
"name": [s["audio_name"] for s in samples],
"net_input": net_input,
"labels": labels,
"dec_target": collated_fbanks,
"dec_target_lengths": collated_fbanks_size,
"src_lengths": lengths_list[0],
"task_name": "t2s",
"ntokens": ntokens_list[0],
"target": collated_fbanks,
}
return batch
def collater_seq_label(self, targets, pad):
lengths = torch.LongTensor([len(t) for t in targets])
ntokens = lengths.sum().item()
targets = data_utils.collate_tokens(targets, pad_idx=pad, left_pad=False)
return targets, lengths, ntokens
def collater_label(self, targets_by_label):
targets_list, lengths_list, ntokens_list = [], [], []
itr = zip(targets_by_label, [self.src_dict.pad()])
for targets, pad in itr:
targets, lengths, ntokens = self.collater_seq_label(targets, pad)
targets_list.append(targets)
lengths_list.append(lengths)
ntokens_list.append(ntokens)
return targets_list, lengths_list, ntokens_list
def num_tokens(self, index):
return self.size(index)
def size(self, index):
return self.wav_sizes[index]
@property
def sizes(self):
return np.array(self.wav_sizes)
def ordered_indices(self):
if self.shuffle:
order = [np.random.permutation(len(self))]
else:
order = [np.arange(len(self))]
order.append(self.wav_sizes)
return np.lexsort(order)[::-1]
def postprocess(self, wav, cur_sample_rate):
if wav.dim() == 2:
wav = wav.mean(-1)
assert wav.dim() == 1, wav.dim()
if cur_sample_rate != self.sample_rate:
raise Exception(f"sr {cur_sample_rate} != {self.sample_rate}")
if self.normalize:
with torch.no_grad():
wav = F.layer_norm(wav, wav.shape)
return wav
| EXA-1-master | exa/models/unilm-master/speecht5/speecht5/data/text_to_speech_dataset.py |
# --------------------------------------------------------
# SpeechT5: Unified-Modal Encoder-Decoder Pre-Training for Spoken Language Processing (https://arxiv.org/abs/2110.07205)
# Github source: https://github.com/microsoft/SpeechT5/tree/main/SpeechT5
# Copyright (c) 2021 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Based on fairseq and espnet code bases
# https://github.com/pytorch/fairseq; https://github.com/espnet/espnet
# --------------------------------------------------------
import math
from argparse import Namespace
from dataclasses import dataclass, field
from omegaconf import II
from typing import Optional
import torch
import torch.nn.functional as F
from fairseq import metrics, utils
from fairseq.criterions import FairseqCriterion, register_criterion
from fairseq.dataclass import FairseqDataclass
from fairseq.data.data_utils import post_process
from fairseq.tasks import FairseqTask
from fairseq.logging.meters import safe_round
import logging
logger = logging.getLogger(__name__)
@dataclass
class SpeechtoTextLossConfig(FairseqDataclass):
zero_infinity: bool = field(
default=False,
metadata={"help": "zero inf loss when source length <= target length"},
)
sentence_avg: bool = II("optimization.sentence_avg")
post_process: Optional[str] = field(
default="sentencepiece",
metadata={
"help": "how to post process predictions into words. can be letter, "
"wordpiece, BPE symbols, etc. "
"See fairseq.data.data_utils.post_process() for full list of options"
},
)
wer_kenlm_model: Optional[str] = field(
default=None,
metadata={
"help": "if this is provided, use kenlm to compute wer (along with other wer_* args)"
},
)
wer_lexicon: Optional[str] = field(
default=None,
metadata={"help": "lexicon to use with wer_kenlm_model"},
)
wer_lm_weight: float = field(
default=2.0,
metadata={"help": "lm weight to use with wer_kenlm_model"},
)
wer_word_score: float = field(
default=-1.0,
metadata={"help": "lm word score to use with wer_kenlm_model"},
)
wer_args: Optional[str] = field(
default=None,
metadata={
"help": "DEPRECATED: tuple of (wer_kenlm_model, wer_lexicon, wer_lm_weight, wer_word_score)"
},
)
label_smoothing: float = field(
default=0.0,
metadata={"help": "epsilon for label smoothing, 0 means no label smoothing"},
)
report_accuracy: bool = field(
default=False,
metadata={"help": "report accuracy metric"},
)
ignore_prefix_size: int = field(
default=0,
metadata={"help": "Ignore first N tokens"},
)
#: bool = II("optimization.sentence_avg")
ce_weight: float = field(
default=1.0,
metadata={"help": "loss weight for cross entropy"},
)
ctc_weight: float = field(
default=0.0,
metadata={"help": "loss weiehgt for ctc in ASR"},
)
def label_smoothed_nll_loss(lprobs, target, epsilon, ignore_index=None, reduce=True):
if target.dim() == lprobs.dim() - 1:
target = target.unsqueeze(-1)
nll_loss = -lprobs.gather(dim=-1, index=target)
smooth_loss = -lprobs.sum(dim=-1, keepdim=True)
if ignore_index is not None:
pad_mask = target.eq(ignore_index)
nll_loss.masked_fill_(pad_mask, 0.0)
smooth_loss.masked_fill_(pad_mask, 0.0)
else:
nll_loss = nll_loss.squeeze(-1)
smooth_loss = smooth_loss.squeeze(-1)
if reduce:
nll_loss = nll_loss.sum()
smooth_loss = smooth_loss.sum()
eps_i = epsilon / (lprobs.size(-1) - 1)
loss = (1.0 - epsilon - eps_i) * nll_loss + eps_i * smooth_loss
return loss, nll_loss
class SpeechtoTextLoss(FairseqCriterion):
def __init__(
self,
cfg: SpeechtoTextLossConfig,
task: FairseqTask,
sentence_avg=True,
label_smoothing=0.1,
ignore_prefix_size=0,
report_accuracy=False,
ce_weight=1.0,
ctc_weight=0.0,
):
super().__init__(task)
self.blank_idx = (
task.target_dictionary.index(task.blank_symbol)
if hasattr(task, "blank_symbol")
else 0
)
#print ("self.blank_idx: ", self.blank_idx)
self.pad_idx = task.target_dictionary.pad()
self.eos_idx = task.target_dictionary.eos()
self.post_process = cfg.post_process
self.ce_weight = ce_weight
self.ctc_weight = ctc_weight
## for ce
self.sentence_avg = sentence_avg
self.eps = label_smoothing
self.ignore_prefix_size = ignore_prefix_size
self.report_accuracy = report_accuracy
if cfg.wer_args is not None:
(
cfg.wer_kenlm_model,
cfg.wer_lexicon,
cfg.wer_lm_weight,
cfg.wer_word_score,
) = eval(cfg.wer_args)
if cfg.wer_kenlm_model is not None:
from examples.speech_recognition.w2l_decoder import W2lKenLMDecoder
dec_args = Namespace()
dec_args.nbest = 1
dec_args.criterion = "ctc"
dec_args.kenlm_model = cfg.wer_kenlm_model
dec_args.lexicon = cfg.wer_lexicon
dec_args.beam = 50
dec_args.beam_size_token = min(50, len(task.target_dictionary))
dec_args.beam_threshold = min(50, len(task.target_dictionary))
dec_args.lm_weight = cfg.wer_lm_weight
dec_args.word_score = cfg.wer_word_score
dec_args.unk_weight = -math.inf
dec_args.sil_weight = 0
self.w2l_decoder = W2lKenLMDecoder(dec_args, task.target_dictionary)
else:
self.w2l_decoder = None
self.zero_infinity = cfg.zero_infinity
#self.sentence_avg = cfg.sentence_avg
if self.ce_weight > 0 and self.ctc_weight > 0:
logger.info("Using cross entropy loss and CTC loss for ASR")
elif self.ce_weight > 0:
logger.info("Only using CE loss")
elif self.ctc_weight > 0:
logger.info("Only using CTC loss for ASR")
else:
logger.info("ERROR")
def forward(self, model, sample, reduce=True):
if self.ce_weight == 0 and self.ctc_weight > 0:
sample["only_ctc"] = True
net_output_decoder, net_output = model(**sample["net_input"])
if self.ce_weight > 0:
loss_ce, nll_loss_ce = self.compute_loss(model, net_output_decoder, sample, reduce=reduce)
#print ("loss_ce: ", loss_ce)
else:
nll_loss_ce = None
if self.ctc_weight > 0:
loss_ctc, lprobs, input_lengths = self.compute_loss_ctc(model, net_output, sample)
if self.ce_weight > 0 and self.ctc_weight > 0:
loss = self.ce_weight * loss_ce + self.ctc_weight * loss_ctc
elif self.ce_weight > 0:
loss = loss_ce
elif self.ctc_weight > 0:
loss = loss_ctc
else:
logger.info("ERROR: must ce_weight > 0 or ctc_weight > 0")
ntokens = (
sample["ntokens"] if "ntokens" in sample else sample["target_lengths"].sum().item()
)
sample_size = sample["target"].size(0) if self.sentence_avg else ntokens
logging_output = {
"loss": loss.item(),
"ce_loss": loss_ce.item() if self.ce_weight > 0 else 0,
"ctc_loss": loss_ctc.item() if self.ctc_weight > 0 else 0,
"nll_loss": nll_loss_ce.item() if nll_loss_ce is not None else 0,
"ntokens": sample["ntokens"],
"nsentences": sample["target"].size(0),
"sample_size": sample_size,
}
if self.ce_weight > 0 and self.report_accuracy:
n_correct, total = self.compute_accuracy(model, net_output_decoder, sample)
logging_output["n_correct"] = utils.item(n_correct.item())
logging_output["total"] = utils.item(total.data)
if self.ctc_weight > 0 and not model.training:
import editdistance
with torch.no_grad():
lprobs_t = lprobs.transpose(0, 1).float().contiguous().cpu()
c_err = 0
c_len = 0
w_errs = 0
w_len = 0
wv_errs = 0
for lp, t, inp_l in zip(
lprobs_t,
sample["target_label"]
if "target_label" in sample
else sample["target"],
input_lengths,
):
lp = lp[:inp_l].unsqueeze(0)
decoded = None
if self.w2l_decoder is not None:
decoded = self.w2l_decoder.decode(lp)
if len(decoded) < 1:
decoded = None
else:
decoded = decoded[0]
if len(decoded) < 1:
decoded = None
else:
decoded = decoded[0]
p = (t != self.task.target_dictionary.pad()) & (
t != self.task.target_dictionary.eos()
)
targ = t[p]
targ_units = self.task.target_dictionary.string(targ)
targ_units_arr = targ.tolist()
toks = lp.argmax(dim=-1).unique_consecutive()
pred_units_arr = toks[toks != self.blank_idx].tolist()
c_err += editdistance.eval(pred_units_arr, targ_units_arr)
c_len += len(targ_units_arr)
targ_words = post_process(targ_units, self.post_process).split()
pred_units = self.task.target_dictionary.string(pred_units_arr)
pred_words_raw = post_process(pred_units, self.post_process).split()
if decoded is not None and "words" in decoded:
pred_words = decoded["words"]
w_errs += editdistance.eval(pred_words, targ_words)
wv_errs += editdistance.eval(pred_words_raw, targ_words)
else:
dist = editdistance.eval(pred_words_raw, targ_words)
w_errs += dist
wv_errs += dist
w_len += len(targ_words)
logging_output["wv_errors"] = wv_errs
logging_output["w_errors"] = w_errs
logging_output["w_total"] = w_len
logging_output["c_errors"] = c_err
logging_output["c_total"] = c_len
return loss, sample_size, logging_output
def compute_loss_ctc(self, model, net_output, sample):
lprobs = model.get_normalized_probs_for_ctc(
net_output, log_probs=True
).contiguous() # (T, B, C) from the encoder
if net_output["encoder_padding_mask"] is not None:
non_padding_mask = ~net_output["encoder_padding_mask"][0]
input_lengths = non_padding_mask.long().sum(-1)
else:
input_lengths = lprobs.new_full(
(lprobs.size(1),), lprobs.size(0), dtype=torch.long
)
pad_mask = (sample["target"] != self.pad_idx) & (
sample["target"] != self.eos_idx
)
targets_flat = sample["target"].masked_select(pad_mask)
if "target_lengths" in sample:
target_lengths = sample["target_lengths"]
else:
target_lengths = pad_mask.sum(-1)
##processing
target_lengths = target_lengths - 1
with torch.backends.cudnn.flags(enabled=False):
loss_ctc = F.ctc_loss(
lprobs,
targets_flat,
input_lengths,
target_lengths,
blank=self.blank_idx,
reduction="sum",
zero_infinity=self.zero_infinity,
)
return loss_ctc, lprobs, input_lengths
## for ce
def get_lprobs_and_target(self, model, net_output, sample):
lprobs = model.get_normalized_probs(net_output, log_probs=True)
target = model.get_targets(sample, net_output)
if self.ignore_prefix_size > 0:
if getattr(lprobs, "batch_first", False):
lprobs = lprobs[:, self.ignore_prefix_size :, :].contiguous()
target = target[:, self.ignore_prefix_size :].contiguous()
else:
lprobs = lprobs[self.ignore_prefix_size :, :, :].contiguous()
target = target[self.ignore_prefix_size :, :].contiguous()
return lprobs.view(-1, lprobs.size(-1)), target.view(-1)
def compute_loss(self, model, net_output, sample, reduce=True):
lprobs, target = self.get_lprobs_and_target(model, net_output, sample)
loss, nll_loss = label_smoothed_nll_loss(
lprobs,
target,
self.eps,
ignore_index=self.padding_idx,
reduce=reduce,
)
return loss, nll_loss
def compute_accuracy(self, model, net_output, sample):
lprobs, target = self.get_lprobs_and_target(model, net_output, sample)
mask = target.ne(self.padding_idx)
n_correct = torch.sum(
lprobs.argmax(1).masked_select(mask).eq(target.masked_select(mask))
)
total = torch.sum(mask)
return n_correct, total
@staticmethod
def reduce_metrics(logging_outputs) -> None:
"""Aggregate logging outputs from data parallel training."""
loss_sum = utils.item(sum(log.get("loss", 0) for log in logging_outputs))
nll_loss_sum = sum(log.get("nll_loss", 0) for log in logging_outputs)
ce_loss_sum = sum(log.get("ce_loss", 0) for log in logging_outputs)
ctc_loss_sum = sum(log.get("ctc_loss", 0) for log in logging_outputs)
ntokens = utils.item(sum(log.get("ntokens", 0) for log in logging_outputs))
nsentences = utils.item(
sum(log.get("nsentences", 0) for log in logging_outputs)
)
sample_size = utils.item(
sum(log.get("sample_size", 0) for log in logging_outputs)
)
metrics.log_scalar(
"loss", loss_sum / sample_size / math.log(2), sample_size, round=3
)
metrics.log_scalar(
"ctc_loss", ctc_loss_sum / sample_size / math.log(2), ntokens, 2, round=3
)
metrics.log_scalar(
"ce_loss", ce_loss_sum / ntokens, ntokens, 2, round=3
)
metrics.log_scalar(
"nll_loss", nll_loss_sum / ntokens / math.log(2), ntokens, 2, round=3
)
metrics.log_derived(
"ppl", lambda meters: utils.get_perplexity(meters["nll_loss"].avg, 2)
)
total = utils.item(sum(log.get("total", 0) for log in logging_outputs))
if total > 0:
metrics.log_scalar("total", total)
n_correct = utils.item(
sum(log.get("n_correct", 0) for log in logging_outputs)
)
metrics.log_scalar("n_correct", n_correct)
metrics.log_derived(
"accuracy",
lambda meters: round(
meters["n_correct"].sum * 100.0 / meters["total"].sum, 3
)
if meters["total"].sum > 0
else float("nan"),
2
)
metrics.log_scalar("ntokens", ntokens)
metrics.log_scalar("nsentences", nsentences)
if sample_size != ntokens:
metrics.log_scalar(
"nll_loss", loss_sum / ntokens / math.log(2), ntokens, round=3
)
c_errors = sum(log.get("c_errors", 0) for log in logging_outputs)
metrics.log_scalar("_c_errors", c_errors)
c_total = sum(log.get("c_total", 0) for log in logging_outputs)
metrics.log_scalar("_c_total", c_total)
w_errors = sum(log.get("w_errors", 0) for log in logging_outputs)
metrics.log_scalar("_w_errors", w_errors)
wv_errors = sum(log.get("wv_errors", 0) for log in logging_outputs)
metrics.log_scalar("_wv_errors", wv_errors)
w_total = sum(log.get("w_total", 0) for log in logging_outputs)
metrics.log_scalar("_w_total", w_total)
if c_total > 0:
metrics.log_derived(
"uer",
lambda meters: safe_round(
meters["_c_errors"].sum * 100.0 / meters["_c_total"].sum, 3
)
if meters["_c_total"].sum > 0
else float("nan"),
)
if w_total > 0:
metrics.log_derived(
"wer",
lambda meters: safe_round(
meters["_w_errors"].sum * 100.0 / meters["_w_total"].sum, 3
)
if meters["_w_total"].sum > 0
else float("nan"),
)
metrics.log_derived(
"raw_wer",
lambda meters: safe_round(
meters["_wv_errors"].sum * 100.0 / meters["_w_total"].sum, 3
)
if meters["_w_total"].sum > 0
else float("nan"),
)
@staticmethod
def logging_outputs_can_be_summed() -> bool:
"""
Whether the logging outputs returned by `forward` can be summed
across workers prior to calling `reduce_metrics`. Setting this
to True will improves distributed training speed.
"""
return True
| EXA-1-master | exa/models/unilm-master/speecht5/speecht5/criterions/speech_to_text_loss.py |
import importlib
import os
for file in os.listdir(os.path.dirname(__file__)):
if file.endswith(".py") and not file.startswith("_"):
criterion_name = file[: file.find(".py")]
importlib.import_module(
"speecht5.criterions." + criterion_name
) | EXA-1-master | exa/models/unilm-master/speecht5/speecht5/criterions/__init__.py |
# --------------------------------------------------------
# SpeechT5: Unified-Modal Encoder-Decoder Pre-Training for Spoken Language Processing (https://arxiv.org/abs/2110.07205)
# Github source: https://github.com/microsoft/SpeechT5/tree/main/SpeechT5
# Copyright (c) 2021 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Based on fairseq and espnet code bases
# https://github.com/pytorch/fairseq; https://github.com/espnet/espnet
# --------------------------------------------------------
import re
from dataclasses import dataclass
import math
from fairseq import metrics, utils
from fairseq.criterions import FairseqCriterion, register_criterion
from speecht5.criterions.text_to_speech_loss import TexttoSpeechLoss
from speecht5.criterions.text_pretrain_criterion import TextPretrainCriterion, TextPretrainCriterionConfig
from fairseq.criterions.label_smoothed_cross_entropy import LabelSmoothedCrossEntropyCriterionConfig
from speecht5.criterions.speech_pretrain_criterion import SpeechPretrainCriterion, SpeechPretrainCriterionConfig
from speecht5.criterions.speech_to_text_loss import SpeechtoTextLoss, SpeechtoTextLossConfig
from fairseq.logging.meters import safe_round
@dataclass
class SpeechT5CriterionConfig(
LabelSmoothedCrossEntropyCriterionConfig,
TextPretrainCriterionConfig,
SpeechPretrainCriterionConfig,
SpeechtoTextLossConfig
):
pass
@register_criterion(
"speecht5", dataclass=SpeechT5CriterionConfig
)
class SpeechT5Criterion(FairseqCriterion):
def __init__(
self,
task,
sentence_avg,
label_smoothing,
pred_masked_weight,
pred_nomask_weight,
loss_weights=None,
log_keys=None,
ignore_prefix_size=0,
report_accuracy=False,
use_masking=True,
use_weighted_masking=False,
loss_type="L1",
bce_pos_weight=5.0,
bce_loss_lambda=1.0,
use_guided_attn_loss=False,
num_heads_applied_guided_attn=2,
ce_weight=1.0,
ctc_weight=0.0,
hubert_weight=1.0,
dec_weight=1.0,
bart_weight=1.0,
):
super().__init__(task)
self.speech_criterion = TexttoSpeechLoss(
task,
sentence_avg,
use_masking,
use_weighted_masking,
loss_type,
bce_pos_weight,
bce_loss_lambda,
use_guided_attn_loss,
num_heads_applied_guided_attn=num_heads_applied_guided_attn,
)
self.text_criterion = SpeechtoTextLoss(
SpeechtoTextLossConfig,
task,
sentence_avg,
label_smoothing,
ignore_prefix_size,
report_accuracy,
ce_weight,
ctc_weight
)
self.text_pretrain_criterion = TextPretrainCriterion(
task,
sentence_avg,
bart_weight,
loss_weights,
)
self.speech_pretrain_criterion = SpeechPretrainCriterion(
task,
sentence_avg,
pred_masked_weight,
pred_nomask_weight,
loss_weights,
log_keys,
use_masking,
use_weighted_masking,
loss_type,
bce_pos_weight,
hubert_weight,
dec_weight
)
def forward(self, model, sample, reduce=True):
"""Compute the loss for the given sample.
Returns a tuple with three elements:
1) the loss
2) the sample size, which is used as the denominator for the gradient
3) logging outputs to display while training
"""
task_name = sample['task_name']
if task_name == 's2t' or task_name == 's2c':
return self.text_criterion(model, sample, reduce)
elif task_name == 't2s' or task_name == 's2s':
return self.speech_criterion(model, sample)
elif task_name == 'text_pretrain':
return self.text_pretrain_criterion(model, sample, reduce)
elif task_name == 'speech_pretrain':
return self.speech_pretrain_criterion(model, sample, reduce)
@classmethod
def reduce_metrics(cls, logging_outputs):
"""Aggregate logging outputs from data parallel training."""
logging_outputs_dict = {}
for logging_output in logging_outputs:
for task_name in logging_output:
if task_name not in ['s2t', 't2s', 's2c', 's2s', 'text_pretrain', 'speech_pretrain']:
continue
if task_name not in logging_outputs_dict:
logging_outputs_dict[task_name] = []
logging_outputs_dict[task_name].append(logging_output[task_name])
for task_name in logging_outputs_dict:
if task_name == 's2t':
# LabelSmoothedCrossEntropyCriterion.reduce_metrics([logging_output['s2t'] for logging_output in logging_outputs])
s2t_logging_output = logging_outputs_dict[task_name]
# s2t_sum = sum(log.get("ce_loss", 0) for log in logging_outputs)
loss_sum = sum(log.get("loss", 0) for log in s2t_logging_output)
nll_loss_sum = sum(log.get("nll_loss", 0) for log in s2t_logging_output)
ntokens = sum(log.get("ntokens", 0) for log in s2t_logging_output)
ce_loss_sum = sum(log.get("ce_loss", 0) for log in s2t_logging_output)
ctc_loss_sum = sum(log.get("ctc_loss", 0) for log in s2t_logging_output)
sample_size = max(1, sum(log.get("sample_size", 0) for log in s2t_logging_output))
metrics.log_scalar(
"s2t_loss", loss_sum / sample_size / math.log(2), sample_size, 1, round=3
)
metrics.log_scalar(
"s2t_nll_loss", nll_loss_sum / ntokens / math.log(2), ntokens, 2, round=3
)
metrics.log_derived(
"s2t_ppl", lambda meters: utils.get_perplexity(meters["s2t_nll_loss"].avg, 2)
)
metrics.log_scalar(
"ctc_loss", ctc_loss_sum / sample_size / math.log(2), ntokens, 2, round=3
)
metrics.log_scalar(
"ce_loss", ce_loss_sum / ntokens, ntokens, 2, round=3
)
total = utils.item(sum(log.get("total", 0) for log in s2t_logging_output))
if total > 0:
metrics.log_scalar("s2t_total", total)
n_correct = utils.item(
sum(log.get("n_correct", 0) for log in s2t_logging_output)
)
metrics.log_scalar("s2t_n_correct", n_correct)
metrics.log_derived(
"s2t_accuracy",
lambda meters: round(
meters["s2t_n_correct"].sum * 100.0 / meters["s2t_total"].sum, 3
)
if meters["s2t_total"].sum > 0
else float("nan"),
2
)
c_errors = sum(log.get("c_errors", 0) for log in s2t_logging_output)
metrics.log_scalar("_c_errors", c_errors)
c_total = sum(log.get("c_total", 0) for log in s2t_logging_output)
metrics.log_scalar("_c_total", c_total)
w_errors = sum(log.get("w_errors", 0) for log in s2t_logging_output)
metrics.log_scalar("_w_errors", w_errors)
wv_errors = sum(log.get("wv_errors", 0) for log in s2t_logging_output)
metrics.log_scalar("_wv_errors", wv_errors)
w_total = sum(log.get("w_total", 0) for log in s2t_logging_output)
metrics.log_scalar("_w_total", w_total)
if c_total > 0:
metrics.log_derived(
"uer",
lambda meters: safe_round(
meters["_c_errors"].sum * 100.0 / meters["_c_total"].sum, 3
)
if meters["_c_total"].sum > 0
else float("nan"),
)
if w_total > 0:
metrics.log_derived(
"wer",
lambda meters: safe_round(
meters["_w_errors"].sum * 100.0 / meters["_w_total"].sum, 3
)
if meters["_w_total"].sum > 0
else float("nan"),
)
metrics.log_derived(
"raw_wer",
lambda meters: safe_round(
meters["_wv_errors"].sum * 100.0 / meters["_w_total"].sum, 3
)
if meters["_w_total"].sum > 0
else float("nan"),
)
if task_name == 't2s':
# TTSLossCriterion.reduce_metrics([logging_output['t2s'] for logging_output in logging_outputs])
# t2s_sum = sum(log.get("speech_loss", 0) for log in logging_outputs)
t2s_logging_output = logging_outputs_dict[task_name]
loss_sum = sum(log.get("loss", 0) for log in t2s_logging_output)
l1_loss_sum = sum(log.get("l1_loss", 0) for log in t2s_logging_output)
l2_loss_sum = sum(log.get("l2_loss", 0) for log in t2s_logging_output)
bce_loss_sum = sum(log.get("bce_loss", 0) for log in t2s_logging_output)
sample_size = max(1, sum(log.get("sample_size", 0) for log in t2s_logging_output))
metrics.log_scalar(
"t2s_loss", loss_sum / sample_size, sample_size, 1, round=5
)
encoder_alpha_sum = sum(log.get("encoder_alpha", 0) for log in t2s_logging_output)
decoder_alpha_sum = sum(log.get("decoder_alpha", 0) for log in t2s_logging_output)
ngpu = sum(log.get("ngpu", 0) for log in t2s_logging_output)
metrics.log_scalar(
"t2s_l1_loss", l1_loss_sum / sample_size, sample_size, 2, round=5
)
metrics.log_scalar(
"t2s_l2_loss", l2_loss_sum / sample_size, sample_size, 2, round=5
)
metrics.log_scalar(
"t2s_bce_loss", bce_loss_sum / sample_size, sample_size, 2, round=5
)
metrics.log_scalar(
"t2s_encoder_alpha", encoder_alpha_sum / sample_size, sample_size, round=5
)
metrics.log_scalar(
"t2s_decoder_alpha", decoder_alpha_sum / sample_size, sample_size, round=5
)
if "enc_dec_attn_loss" in t2s_logging_output[0]:
enc_dec_attn_loss_sum = sum(log.get("enc_dec_attn_loss", 0) for log in t2s_logging_output)
metrics.log_scalar(
"t2s_enc_dec_attn_loss", enc_dec_attn_loss_sum / sample_size, sample_size, round=8
)
if task_name == 's2c':
s2c_logging_output = logging_outputs_dict[task_name]
loss_sum = sum(log.get("loss", 0) for log in s2c_logging_output)
nll_loss_sum = sum(log.get("nll_loss", 0) for log in s2c_logging_output)
ntokens = sum(log.get("ntokens", 0) for log in s2c_logging_output)
sample_size = max(1, sum(log.get("sample_size", 0) for log in s2c_logging_output))
metrics.log_scalar(
"s2c_loss", loss_sum / sample_size / math.log(2), sample_size, 1, round=3
)
metrics.log_scalar(
"s2c_nll_loss", nll_loss_sum / ntokens / math.log(2), ntokens, 2, round=3
)
total = utils.item(sum(log.get("total", 0) for log in s2c_logging_output))
if total > 0:
metrics.log_scalar("s2c_total", total)
n_correct = utils.item(sum(log.get("n_correct", 0) for log in s2c_logging_output))
metrics.log_scalar("s2c_n_correct", n_correct)
metrics.log_derived(
"s2c_accuracy",
lambda meters: round(
meters["s2c_n_correct"].sum * 100.0 / meters["s2c_total"].sum, 3
)
if meters["s2c_total"].sum > 0
else float("nan"),
2
)
if task_name == 's2s':
s2s_logging_output = logging_outputs_dict[task_name]
loss_sum = sum(log.get("loss", 0) for log in s2s_logging_output)
l1_loss_sum = sum(log.get("l1_loss", 0) for log in s2s_logging_output)
l2_loss_sum = sum(log.get("l2_loss", 0) for log in s2s_logging_output)
bce_loss_sum = sum(log.get("bce_loss", 0) for log in s2s_logging_output)
sample_size = max(1, sum(log.get("sample_size", 0) for log in s2s_logging_output))
metrics.log_scalar(
"s2s_loss", loss_sum / sample_size, sample_size, 1, round=5
)
encoder_alpha_sum = sum(log.get("encoder_alpha", 0) for log in s2s_logging_output)
decoder_alpha_sum = sum(log.get("decoder_alpha", 0) for log in s2s_logging_output)
ngpu = sum(log.get("ngpu", 0) for log in s2s_logging_output)
metrics.log_scalar(
"s2s_l1_loss", l1_loss_sum / sample_size, sample_size, 2, round=5
)
metrics.log_scalar(
"s2s_l2_loss", l2_loss_sum / sample_size, sample_size, 2, round=5
)
metrics.log_scalar(
"s2s_bce_loss", bce_loss_sum / sample_size, sample_size, 2, round=5
)
metrics.log_scalar(
"s2s_decoder_alpha", decoder_alpha_sum / sample_size, sample_size, round=5
)
if "enc_dec_attn_loss" in s2s_logging_output[0]:
enc_dec_attn_loss_sum = sum(log.get("enc_dec_attn_loss", 0) for log in s2s_logging_output)
metrics.log_scalar(
"s2s_enc_dec_attn_loss", enc_dec_attn_loss_sum / sample_size, sample_size, round=8
)
if task_name == 'text_pretrain':
bart_logging_output = logging_outputs_dict[task_name]
loss_sum = sum(log.get("loss", 0) for log in bart_logging_output)
ntokens = sum(log.get("ntokens", 0) for log in bart_logging_output)
sample_size = max(1, sum(log.get("sample_size", 0) for log in bart_logging_output))
bart_loss_sum = sum(log.get("bart_loss", 0) for log in bart_logging_output)
# we divide by log(2) to convert the loss from base e to base 2
metrics.log_scalar(
"text_loss", loss_sum / sample_size / math.log(2), sample_size, round=3
)
metrics.log_scalar(
"bart_loss", bart_loss_sum / sample_size / math.log(2), ntokens, 2, round=3
)
if sample_size != ntokens:
metrics.log_scalar(
"bart_nll_loss", bart_loss_sum / ntokens / math.log(2), ntokens, round=3
)
metrics.log_derived(
"bart_ppl", lambda meters: utils.get_perplexity(meters["bart_nll_loss"].avg)
)
else:
metrics.log_derived(
"bart_ppl", lambda meters: utils.get_perplexity(meters["bart_loss"].avg)
)
metrics.log_scalar("bart_wpb", ntokens, priority=180, round=1)
val_prob_perplexity = 0
val_code_perplexity = 0
sample_size_pp = 0
count_log_cp = 0
for log in bart_logging_output:
if "loss_prob_perplexity" in log:
val_prob_perplexity = val_prob_perplexity + log["loss_prob_perplexity"]
sample_size_pp = sample_size_pp + log["sample_size"]
if "code_perplexity" in log:
val_code_perplexity = val_code_perplexity + log["code_perplexity"]
count_log_cp = count_log_cp + 1
if val_prob_perplexity > 0:
metrics.log_scalar("text_loss_prob_perplexity", val_prob_perplexity / sample_size_pp / math.log(2), round=3)
if val_code_perplexity > 0:
metrics.log_scalar("text_code_perplexity", val_code_perplexity / count_log_cp, round=3)
if task_name == 'speech_pretrain':
hubert_logging_output = logging_outputs_dict[task_name]
loss_sum = sum(log.get("loss", 0) for log in hubert_logging_output)
ntokens = sum(log.get("ntokens", 0) for log in hubert_logging_output)
sample_size = max(1, sum(log.get("sample_size", 0) for log in hubert_logging_output))
dec_loss_sum = sum(log.get("dec_loss", 0) for log in hubert_logging_output)
l1_loss_sum = sum(log.get("l1_loss", 0) for log in hubert_logging_output)
l2_loss_sum = sum(log.get("l2_loss", 0) for log in hubert_logging_output)
bce_loss_sum = sum(log.get("bce_loss", 0) for log in hubert_logging_output)
ngpu = sum(log.get("ngpu", 0) for log in hubert_logging_output)
metrics.log_scalar("hubert_loss", loss_sum / sample_size / math.log(2), sample_size, round=3)
if sample_size != ntokens:
metrics.log_scalar("hubert_nll_loss", loss_sum / ntokens / math.log(2), ntokens, round=3)
metrics.log_derived("hubert_ppl", lambda meters: utils.get_perplexity(meters["hubert_nll_loss"].avg))
else:
metrics.log_derived("hubert_ppl", lambda meters: utils.get_perplexity(meters["hubert_loss"].avg))
counts = {}
for lk in hubert_logging_output[0].keys():
if lk.startswith("count_"):
val = sum(log[lk] for log in hubert_logging_output)
metrics.log_scalar("hubert_" + lk, val)
counts[lk] = val
for lk in hubert_logging_output[0].keys():
if lk.startswith("loss_") and lk != 'loss_prob_perplexity':
val = sum(log[lk] for log in hubert_logging_output)
metrics.log_scalar("hubert_" + lk, val / sample_size / math.log(2), round=3)
elif lk.startswith("correct_"):
val = sum(log[lk] for log in hubert_logging_output)
metrics.log_scalar("hubert_" + lk, val / counts[re.sub("correct", "count", lk)])
# elif lk == 'code_perplexity':
# val = sum(log[lk] for log in hubert_logging_output)
# metrics.log_scalar("hubert_" + lk, val / len(hubert_logging_output), round=3)
val_prob_perplexity = 0
val_code_perplexity = 0
sample_size_pp = 0
count_log_cp = 0
for log in hubert_logging_output:
if "loss_prob_perplexity" in log:
val_prob_perplexity = val_prob_perplexity + log["loss_prob_perplexity"]
sample_size_pp = sample_size_pp + log["sample_size"]
if "code_perplexity" in log:
val_code_perplexity = val_code_perplexity + log["code_perplexity"]
count_log_cp = count_log_cp + 1
if val_prob_perplexity > 0:
metrics.log_scalar("hubert_loss_prob_perplexity", val_prob_perplexity / sample_size_pp / math.log(2), round=3)
if val_code_perplexity > 0:
metrics.log_scalar("hubert_code_perplexity", val_code_perplexity / count_log_cp, round=3)
metrics.log_scalar(
"hubert_dec_loss", dec_loss_sum / ngpu, sample_size, 2, round=5
)
metrics.log_scalar(
"hubert_l1_loss", l1_loss_sum / ngpu, sample_size, 2, round=5
)
metrics.log_scalar(
"hubert_l2_loss", l2_loss_sum / ngpu, sample_size, 2, round=5
)
metrics.log_scalar(
"hubert_bce_loss", bce_loss_sum / ngpu, sample_size, 2, round=5
)
if "enc_dec_attn_loss" in hubert_logging_output[0]:
enc_dec_attn_loss_sum = sum(log.get("enc_dec_attn_loss", 0) for log in hubert_logging_output)
metrics.log_scalar(
"hubert_enc_dec_attn_loss", enc_dec_attn_loss_sum / ngpu, sample_size, round=8
)
metrics.log_scalar("hubert_wpb", ntokens, priority=180, round=1)
loss = sum(log.get("loss", 0) for log in logging_outputs)
sample_size = max(1, sum(log.get("sample_size", 0) for log in logging_outputs))
metrics.log_scalar(
"loss", loss / sample_size, sample_size, 1, round=5
)
@staticmethod
def logging_outputs_can_be_summed() -> bool:
"""
Whether the logging outputs returned by `forward` can be summed
across workers prior to calling `reduce_metrics`. Setting this
to True will improves distributed training speed.
"""
return False
| EXA-1-master | exa/models/unilm-master/speecht5/speecht5/criterions/speecht5_criterion.py |
# --------------------------------------------------------
# SpeechT5: Unified-Modal Encoder-Decoder Pre-Training for Spoken Language Processing (https://arxiv.org/abs/2110.07205)
# Github source: https://github.com/microsoft/SpeechT5/tree/main/SpeechT5
# Copyright (c) 2021 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Based on fairseq and espnet code bases
# https://github.com/pytorch/fairseq; https://github.com/espnet/espnet
# --------------------------------------------------------
import math
from dataclasses import dataclass, field
from typing import List, Optional
import torch
import torch.nn.functional as F
from fairseq import metrics, utils
from fairseq.criterions import FairseqCriterion, register_criterion
from fairseq.dataclass import FairseqDataclass
from omegaconf import II
@dataclass
class TextPretrainCriterionConfig(FairseqDataclass):
sentence_avg: bool = II("optimization.sentence_avg")
loss_weights: Optional[List[float]] = field(
default_factory=lambda: [0.1,],
metadata={"help": "weights for additional loss terms (not first one)"},
)
bart_weight: float = field(
default=1.0,
metadata={"help": "loss weight for cross entropy"},
)
class TextPretrainCriterion(FairseqCriterion):
def __init__(self, task, sentence_avg, bart_weight, loss_weights=None):
super().__init__(task)
self.sentence_avg = sentence_avg
self.loss_weights = loss_weights
self.bart_weight = bart_weight
def forward(self, model, sample, reduce=True):
"""Compute the loss for the given sample.
Returns a tuple with three elements:
1) the loss
2) the sample size, which is used as the denominator for the gradient
3) logging outputs to display while training
"""
net_output, codebook_out, encoder_output = model(**sample["net_input"])
bart_loss, _ = self.compute_loss(model, net_output, sample, reduce=reduce)
sample_size = (
sample["target"].size(0) if self.sentence_avg else sample["ntokens"]
)
loss = self.bart_weight * bart_loss
logging_output = {
"loss": loss.item(),
"ntokens": sample["ntokens"],
"nsentences": sample["target"].size(0),
"bart_loss": bart_loss.item(),
"sample_size": sample_size,
}
if "prob_perplexity" in codebook_out:
assert hasattr(model, "get_extra_losses")
extra_losses, names = model.get_extra_losses(codebook_out)
if torch.is_tensor(extra_losses):
extra_losses = [extra_losses]
names = [names]
if len(self.loss_weights) == 1 and len(extra_losses) != 1:
self.loss_weights = [self.loss_weights[0]] * len(extra_losses)
if len(self.loss_weights) > len(extra_losses):
modified_loss_weight = self.loss_weights[len(extra_losses):]
else:
modified_loss_weight = self.loss_weights
# assert len(extra_losses) == len(self.loss_weights), f"{len(extra_losses)}, {len(self.loss_weights)}"
for p, n, coef in zip(extra_losses, names, modified_loss_weight):
# print(n + str(coef))
if coef != 0 and p is not None:
p = coef * p.float() * sample_size
loss += p
logging_output[f"loss_{n}"] = p.item()
if 'loss_prob_perplexity' in logging_output:
logging_output['code_perplexity'] = codebook_out['code_perplexity'].item()
return loss, sample_size, logging_output
def compute_loss(self, model, net_output, sample, reduce=True):
lprobs = model.get_normalized_probs(net_output, log_probs=True)
lprobs = lprobs.view(-1, lprobs.size(-1))
target = model.get_targets(sample, net_output).view(-1)
loss = F.nll_loss(
lprobs,
target,
ignore_index=self.padding_idx,
reduction="sum" if reduce else "none",
)
return loss, loss
@staticmethod
def reduce_metrics(logging_outputs) -> None:
"""Aggregate logging outputs from data parallel training."""
loss_sum = sum(log.get("loss", 0) for log in logging_outputs)
ntokens = sum(log.get("ntokens", 0) for log in logging_outputs)
sample_size = sum(log.get("sample_size", 0) for log in logging_outputs)
bart_loss_sum = sum(log.get("bart_loss", 0) for log in logging_outputs)
# we divide by log(2) to convert the loss from base e to base 2
metrics.log_scalar(
"loss", loss_sum / sample_size / math.log(2), sample_size, round=3
)
metrics.log_scalar(
"bart_loss", bart_loss_sum / sample_size / math.log(2), ntokens, 2, round=3
)
if sample_size != ntokens:
metrics.log_scalar(
"nll_loss", bart_loss_sum / ntokens / math.log(2), ntokens, round=3
)
metrics.log_derived(
"ppl", lambda meters: utils.get_perplexity(meters["nll_loss"].avg)
)
else:
metrics.log_derived(
"ppl", lambda meters: utils.get_perplexity(meters["bart_loss"].avg)
)
if "loss_prob_perplexity" in logging_outputs[0].keys():
val = sum(log["loss_prob_perplexity"] for log in logging_outputs)
metrics.log_scalar("loss_prob_perplexity", val / sample_size / math.log(2), round=3)
if "code_perplexity" in logging_outputs[0].keys():
val = sum(log["code_perplexity"] for log in logging_outputs)
metrics.log_scalar("code_perplexity", val / len(logging_outputs), round=3)
@staticmethod
def logging_outputs_can_be_summed() -> bool:
"""
Whether the logging outputs returned by `forward` can be summed
across workers prior to calling `reduce_metrics`. Setting this
to True will improves distributed training speed.
"""
return True
| EXA-1-master | exa/models/unilm-master/speecht5/speecht5/criterions/text_pretrain_criterion.py |
# --------------------------------------------------------
# SpeechT5: Unified-Modal Encoder-Decoder Pre-Training for Spoken Language Processing (https://arxiv.org/abs/2110.07205)
# Github source: https://github.com/microsoft/SpeechT5/tree/main/SpeechT5
# Copyright (c) 2021 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Based on fairseq and espnet code bases
# https://github.com/pytorch/fairseq; https://github.com/espnet/espnet
# --------------------------------------------------------
from dataclasses import dataclass, field
import torch
from fairseq import metrics, utils
from espnet.nets.pytorch_backend.nets_utils import make_non_pad_mask
from fairseq.criterions import FairseqCriterion, register_criterion
from fairseq.dataclass import FairseqDataclass
from speecht5.models.modules.speech_encoder_prenet import SpeechEncoderPrenet
from espnet.nets.pytorch_backend.e2e_tts_tacotron2 import GuidedAttentionLoss
from omegaconf import II
from typing import Any
@dataclass
class TexttoSpeechLossConfig(FairseqDataclass):
use_masking: bool = field(
default=True,
metadata={"help": "Whether to use masking in calculation of loss"},
)
use_weighted_masking: bool = field(
default=False,
metadata={"help": "Whether to use weighted masking in calculation of loss"},
)
loss_type: str = field(
default="L1",
metadata={"help": "How to calc loss"},
)
bce_pos_weight: float = field(
default=5.0,
metadata={"help": "Positive sample weight in BCE calculation (only for use-masking=True)"},
)
bce_loss_lambda: float = field(
default=1.0,
metadata={"help": "Lambda in bce loss"},
)
use_guided_attn_loss: bool = field(
default=False,
metadata={"help": "Whether to use guided attention loss"},
)
guided_attn_loss_sigma: float = field(
default=0.4,
metadata={"help": "Sigma in guided attention loss"},
)
guided_attn_loss_lambda: float = field(
default=10.0,
metadata={"help": "Lambda in guided attention loss"},
)
num_layers_applied_guided_attn: int = field(
default=2,
metadata={"help": "Number of layers to be applied guided attention loss, if set -1, all of the layers will be applied."},
)
num_heads_applied_guided_attn: int = field(
default=2,
metadata={"help": "Number of heads in each layer to be applied guided attention loss, if set -1, all of the heads will be applied."},
)
modules_applied_guided_attn: Any = field(
default=("encoder-decoder",),
metadata={"help": "Module name list to be applied guided attention loss"},
)
sentence_avg: bool = II("optimization.sentence_avg")
class TexttoSpeechLoss(FairseqCriterion):
def __init__(
self,
task,
sentence_avg,
use_masking=True,
use_weighted_masking=False,
loss_type="L1",
bce_pos_weight=5.0,
bce_loss_lambda=1.0,
use_guided_attn_loss=False,
guided_attn_loss_sigma=0.4,
guided_attn_loss_lambda=1.0,
num_layers_applied_guided_attn=2,
num_heads_applied_guided_attn=2,
modules_applied_guided_attn=["encoder-decoder"],
):
super().__init__(task)
self.sentence_avg = sentence_avg
self.use_masking = use_masking
self.use_weighted_masking = use_weighted_masking
self.loss_type = loss_type
self.bce_pos_weight = bce_pos_weight
self.bce_loss_lambda = bce_loss_lambda
self.use_guided_attn_loss = use_guided_attn_loss
self.guided_attn_loss_sigma = guided_attn_loss_sigma
self.guided_attn_loss_lambda = guided_attn_loss_lambda
# define loss function
self.criterion = Tacotron2Loss(
use_masking=use_masking,
use_weighted_masking=use_weighted_masking,
bce_pos_weight=bce_pos_weight,
)
if self.use_guided_attn_loss:
self.num_layers_applied_guided_attn = num_layers_applied_guided_attn
self.num_heads_applied_guided_attn = num_heads_applied_guided_attn
self.modules_applied_guided_attn = modules_applied_guided_attn
if self.use_guided_attn_loss:
self.attn_criterion = GuidedMultiHeadAttentionLoss(
sigma=guided_attn_loss_sigma,
alpha=guided_attn_loss_lambda,
)
def forward(self, model, sample):
"""Compute the loss for the given sample.
Returns a tuple with three elements:
1) the loss
2) the sample size, which is used as the denominator for the gradient
3) logging outputs to display while training
"""
net_output = model(**sample["net_input"])
loss, l1_loss, l2_loss, bce_loss, enc_dec_attn_loss = self.compute_loss(model, net_output, sample)
# sample_size = (
# sample["target"].size(0) if self.sentence_avg else sample["nframes"]
# )
sample_size = 1
logging_output = {
"loss": loss.item(),
"l1_loss": l1_loss.item(),
"l2_loss": l2_loss.item(),
"bce_loss": bce_loss.item(),
"sample_size": 1,
"ntokens": sample["ntokens"],
"nsentences": sample["target"].size(0),
}
if enc_dec_attn_loss is not None:
logging_output['enc_dec_attn_loss'] = enc_dec_attn_loss.item()
if hasattr(model, 'text_encoder_prenet'):
logging_output["encoder_alpha"] = model.text_encoder_prenet.encoder_prenet[-1].alpha.item()
logging_output["decoder_alpha"] = model.speech_decoder_prenet.decoder_prenet[-1].alpha.item()
elif hasattr(model, "speech_encoder_prenet"):
logging_output["decoder_alpha"] = model.speech_decoder_prenet.decoder_prenet[-1].alpha.item()
else:
if 'task' not in sample:
logging_output["encoder_alpha"] = model.encoder_prenet.encoder_prenet[-1].alpha.item()
logging_output["decoder_alpha"] = model.decoder_prenet.decoder_prenet[-1].alpha.item()
return loss, sample_size, logging_output
def compute_loss(self, model, net_output, sample):
before_outs, after_outs, logits, attn = net_output
labels = sample["labels"]
ys = sample["dec_target"]
olens = sample["dec_target_lengths"]
ilens = sample["src_lengths"]
# modifiy mod part of groundtruth
if model.reduction_factor > 1:
olens_in = olens.new([torch.div(olen, model.reduction_factor, rounding_mode='floor') for olen in olens])
olens = olens.new([olen - olen % model.reduction_factor for olen in olens])
max_olen = max(olens)
ys = ys[:, :max_olen]
labels = labels[:, :max_olen]
labels = torch.scatter(labels, 1, (olens - 1).unsqueeze(1), 1.0) # make sure at least one frame has 1
# labels[:, -1] = 1.0
else:
olens_in = olens
# caluculate loss values
l1_loss, l2_loss, bce_loss = self.criterion(
after_outs, before_outs, logits, ys, labels, olens
)
# l1_loss = l1_loss / ys.size(2)
# l2_loss = l2_loss / ys.size(2)
if self.loss_type == "L1":
loss = l1_loss + self.bce_loss_lambda * bce_loss if self.bce_loss_lambda > 0.0 else l1_loss
elif self.loss_type == "L2":
loss = l2_loss + self.bce_loss_lambda * bce_loss if self.bce_loss_lambda > 0.0 else l2_loss
elif self.loss_type == "L1+L2":
loss = l1_loss + l2_loss + self.bce_loss_lambda * bce_loss if self.bce_loss_lambda > 0.0 else l1_loss + l2_loss
else:
raise ValueError("unknown --loss-type " + self.loss_type)
# calculate guided attention loss
enc_dec_attn_loss = None
if self.use_guided_attn_loss:
# calculate the input lengths of encoder, which is determined by encoder prenet
if hasattr(model, 'encoder_reduction_factor') and model.encoder_reduction_factor > 1:
ilens_in = ilens.new([ilen // model.encoder_reduction_factor for ilen in ilens])
else:
ilens_in = ilens
# work for speech to speech model's input
if "task_name" in sample and sample["task_name"] == "s2s":
m = None
if hasattr(model, 'encoder_prenet'):
m = model.encoder_prenet
elif hasattr(model, 'speech_encoder_prenet'):
m = model.speech_encoder_prenet
if m is not None and isinstance(m, SpeechEncoderPrenet):
ilens_in = m.get_src_lengths(ilens_in)
# calculate for encoder-decoder
if "encoder-decoder" in self.modules_applied_guided_attn:
attn = [att_l[:, : self.num_heads_applied_guided_attn] for att_l in attn]
att_ws = torch.cat(attn, dim=1) # (B, H*L, T_out, T_in)
enc_dec_attn_loss = self.attn_criterion(att_ws, ilens_in, olens_in)
loss = loss + enc_dec_attn_loss
return loss, l1_loss, l2_loss, bce_loss, enc_dec_attn_loss
@classmethod
def reduce_metrics(cls, logging_outputs) -> None:
"""Aggregate logging outputs from data parallel training."""
loss_sum = sum(log.get("loss", 0) for log in logging_outputs)
l1_loss_sum = sum(log.get("l1_loss", 0) for log in logging_outputs)
l2_loss_sum = sum(log.get("l2_loss", 0) for log in logging_outputs)
bce_loss_sum = sum(log.get("bce_loss", 0) for log in logging_outputs)
sample_size = max(1, sum(log.get("sample_size", 0) for log in logging_outputs))
metrics.log_scalar(
"loss", loss_sum / sample_size, sample_size, 1, round=5
)
encoder_alpha_sum = sum(log.get("encoder_alpha", 0) for log in logging_outputs)
decoder_alpha_sum = sum(log.get("decoder_alpha", 0) for log in logging_outputs)
ngpu = sum(log.get("ngpu", 0) for log in logging_outputs)
metrics.log_scalar(
"l1_loss", l1_loss_sum / sample_size, sample_size, 2, round=5
)
metrics.log_scalar(
"l2_loss", l2_loss_sum / sample_size, sample_size, 2, round=5
)
metrics.log_scalar(
"bce_loss", bce_loss_sum / sample_size, sample_size, 2, round=5
)
metrics.log_scalar(
"encoder_alpha", encoder_alpha_sum / sample_size, sample_size, round=5
)
metrics.log_scalar(
"decoder_alpha", decoder_alpha_sum / sample_size, sample_size, round=5
)
if "enc_dec_attn_loss" in logging_outputs[0]:
enc_dec_attn_loss_sum = sum(log.get("enc_dec_attn_loss", 0) for log in logging_outputs)
metrics.log_scalar(
"enc_dec_attn_loss", enc_dec_attn_loss_sum / sample_size, sample_size, round=8
)
@staticmethod
def logging_outputs_can_be_summed() -> bool:
"""
Whether the logging outputs returned by `forward` can be summed
across workers prior to calling `reduce_metrics`. Setting this
to True will improves distributed training speed.
"""
return True
class Tacotron2Loss(torch.nn.Module):
"""Loss function module for Tacotron2."""
def __init__(
self, use_masking=True, use_weighted_masking=False, bce_pos_weight=20.0
):
"""Initialize Tactoron2 loss module.
Args:
use_masking (bool): Whether to apply masking
for padded part in loss calculation.
use_weighted_masking (bool):
Whether to apply weighted masking in loss calculation.
bce_pos_weight (float): Weight of positive sample of stop token.
"""
super(Tacotron2Loss, self).__init__()
assert (use_masking != use_weighted_masking) or not use_masking
self.use_masking = use_masking
self.use_weighted_masking = use_weighted_masking
# define criterions
# reduction = "none" if self.use_weighted_masking else "sum"
reduction = "none" if self.use_weighted_masking else "mean"
self.l1_criterion = torch.nn.L1Loss(reduction=reduction)
self.mse_criterion = torch.nn.MSELoss(reduction=reduction)
self.bce_criterion = torch.nn.BCEWithLogitsLoss(
reduction=reduction, pos_weight=torch.tensor(bce_pos_weight)
)
# NOTE(kan-bayashi): register pre hook function for the compatibility
self._register_load_state_dict_pre_hook(self._load_state_dict_pre_hook)
def forward(self, after_outs, before_outs, logits, ys, labels, olens):
"""Calculate forward propagation.
Args:
after_outs (Tensor): Batch of outputs after postnets (B, Lmax, odim).
before_outs (Tensor): Batch of outputs before postnets (B, Lmax, odim).
logits (Tensor): Batch of stop logits (B, Lmax).
ys (Tensor): Batch of padded target features (B, Lmax, odim).
labels (LongTensor): Batch of the sequences of stop token labels (B, Lmax).
olens (LongTensor): Batch of the lengths of each target (B,).
Returns:
Tensor: L1 loss value.
Tensor: Mean square error loss value.
Tensor: Binary cross entropy loss value.
"""
# make mask and apply it
if self.use_masking:
masks = make_non_pad_mask(olens).unsqueeze(-1).to(ys.device)
ys = ys.masked_select(masks)
after_outs = after_outs.masked_select(masks)
before_outs = before_outs.masked_select(masks)
labels = labels.masked_select(masks[:, :, 0])
logits = logits.masked_select(masks[:, :, 0])
# calculate loss
l1_loss = self.l1_criterion(after_outs, ys) + self.l1_criterion(before_outs, ys)
mse_loss = self.mse_criterion(after_outs, ys) + self.mse_criterion(
before_outs, ys
)
bce_loss = self.bce_criterion(logits, labels)
# make weighted mask and apply it
if self.use_weighted_masking:
masks = make_non_pad_mask(olens).unsqueeze(-1).to(ys.device)
weights = masks.float() / masks.sum(dim=1, keepdim=True).float()
out_weights = weights.div(ys.size(0) * ys.size(2))
logit_weights = weights.div(ys.size(0))
# apply weight
l1_loss = l1_loss.mul(out_weights).masked_select(masks).sum()
mse_loss = mse_loss.mul(out_weights).masked_select(masks).sum()
bce_loss = (
bce_loss.mul(logit_weights.squeeze(-1))
.masked_select(masks.squeeze(-1))
.sum()
)
return l1_loss, mse_loss, bce_loss
def _load_state_dict_pre_hook(
self,
state_dict,
prefix,
local_metadata,
strict,
missing_keys,
unexpected_keys,
error_msgs,
):
"""Apply pre hook fucntion before loading state dict.
From v.0.6.1 `bce_criterion.pos_weight` param is registered as a parameter but
old models do not include it and as a result, it causes missing key error when
loading old model parameter. This function solve the issue by adding param in
state dict before loading as a pre hook function
of the `load_state_dict` method.
"""
key = prefix + "bce_criterion.pos_weight"
if key not in state_dict:
state_dict[key] = self.bce_criterion.pos_weight
class GuidedMultiHeadAttentionLoss(GuidedAttentionLoss):
"""Guided attention loss function module for multi head attention.
Args:
sigma (float, optional): Standard deviation to control
how close attention to a diagonal.
alpha (float, optional): Scaling coefficient (lambda).
reset_always (bool, optional): Whether to always reset masks.
"""
def forward(self, att_ws, ilens, olens):
"""Calculate forward propagation.
Args:
att_ws (Tensor):
Batch of multi head attention weights (B, H, T_max_out, T_max_in).
ilens (LongTensor): Batch of input lenghts (B,).
olens (LongTensor): Batch of output lenghts (B,).
Returns:
Tensor: Guided attention loss value.
"""
if self.guided_attn_masks is None:
self.guided_attn_masks = (
self._make_guided_attention_masks(ilens, olens)
.to(att_ws.device)
.unsqueeze(1)
)
if self.masks is None:
self.masks = self._make_masks(ilens, olens).to(att_ws.device).unsqueeze(1)
losses = self.guided_attn_masks * att_ws
loss = torch.mean(losses.masked_select(self.masks))
if self.reset_always:
self._reset_masks()
return self.alpha * loss
def _make_guided_attention_masks(self, ilens, olens):
n_batches = len(ilens)
max_ilen = max(ilens)
max_olen = max(olens)
guided_attn_masks = torch.zeros((n_batches, max_olen, max_ilen), device=olens.device)
for idx, (ilen, olen) in enumerate(zip(ilens, olens)):
guided_attn_masks[idx, :olen, :ilen] = self._make_guided_attention_mask(
ilen, olen, self.sigma
)
return guided_attn_masks
@staticmethod
def _make_guided_attention_mask(ilen, olen, sigma):
grid_x, grid_y = torch.meshgrid(torch.arange(olen, device=olen.device), torch.arange(ilen, device=olen.device))
grid_x, grid_y = grid_x.float(), grid_y.float()
return 1.0 - torch.exp(
-((grid_y / ilen - grid_x / olen) ** 2) / (2 * (sigma**2))
)
@staticmethod
def _make_masks(ilens, olens):
in_masks = make_non_pad_mask(ilens).to(ilens.device) # (B, T_in)
out_masks = make_non_pad_mask(olens).to(olens.device) # (B, T_out)
return out_masks.unsqueeze(-1) & in_masks.unsqueeze(-2) # (B, T_out, T_in)
| EXA-1-master | exa/models/unilm-master/speecht5/speecht5/criterions/text_to_speech_loss.py |
# --------------------------------------------------------
# SpeechT5: Unified-Modal Encoder-Decoder Pre-Training for Spoken Language Processing (https://arxiv.org/abs/2110.07205)
# Github source: https://github.com/microsoft/SpeechT5/tree/main/SpeechT5
# Copyright (c) 2021 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Based on fairseq and espnet code bases
# https://github.com/pytorch/fairseq; https://github.com/espnet/espnet
# --------------------------------------------------------
import math
import re
from dataclasses import dataclass, field
from typing import List, Optional
import torch
import torch.nn.functional as F
from fairseq import metrics, utils
from fairseq.criterions import FairseqCriterion
from speecht5.criterions.text_to_speech_loss import TexttoSpeechLoss, TexttoSpeechLossConfig
@dataclass
class SpeechPretrainCriterionConfig(TexttoSpeechLossConfig):
pred_masked_weight: float = field(
default=1.0,
metadata={"help": "weight for predictive loss for masked frames"},
)
pred_nomask_weight: float = field(
default=0.0,
metadata={"help": "weight for predictive loss for unmasked frames"},
)
loss_weights: Optional[List[float]] = field(
default_factory=lambda: [10,],
metadata={"help": "weights for additional loss terms (not first one)"},
)
log_keys: List[str] = field(
default_factory=lambda: [],
metadata={"help": "output keys to log"},
)
hubert_weight: float = field(
default=1.0,
metadata={"help": "weight of hubert loss"},
)
dec_weight: float = field(
default=1.0,
metadata={"help": "weight of decoder loss"},
)
class SpeechPretrainCriterion(FairseqCriterion):
def __init__(
self,
task,
sentence_avg,
pred_masked_weight,
pred_nomask_weight,
loss_weights=None,
log_keys=None,
use_masking=True,
use_weighted_masking=False,
loss_type="L1",
bce_pos_weight=5.0,
hubert_weight=1.0,
dec_weight=1.0,
):
super().__init__(task)
self.pred_masked_weight = pred_masked_weight
self.pred_nomask_weight = pred_nomask_weight
self.loss_weights = loss_weights
self.log_keys = [] if log_keys is None else log_keys
self.hubert_weight = hubert_weight
self.dec_weight = dec_weight
self.speech_criterion = TexttoSpeechLoss(
task,
sentence_avg,
use_masking,
use_weighted_masking,
loss_type,
bce_pos_weight,
)
def forward(self, model, sample, reduce=True, log_pred=False):
"""Compute the loss for the given sample.
Returns a tuple with three elements:
1) the loss
2) the sample size, which is used as the denominator for the gradient
3) logging outputs to display while training
"""
if self.dec_weight == 0:
sample["net_input"]["only_hubert"] = True
net_output, net_output_dec = model(target_list=sample["target_list"], **sample["net_input"])
loss = 0.
sample_size = 0
logging_output = {}
reduction = "sum" if reduce else "none"
loss_m_list = []
logp_m_list = model.get_logits(net_output, True)
targ_m_list = model.get_targets(None, net_output, True)
assert self.pred_masked_weight == 0 or len(logp_m_list) > 0
for i, (logp_m, targ_m) in enumerate(zip(logp_m_list, targ_m_list)):
loss_m = F.cross_entropy(logp_m, targ_m, reduction=reduction)
loss_m_list.append(loss_m)
logging_output[f"loss_m_{i}"] = loss_m.detach().item()
if self.pred_masked_weight > 0:
loss += self.pred_masked_weight * sum(loss_m_list)
sample_size += targ_m_list[0].numel()
loss_u_list = []
logp_u_list = model.get_logits(net_output, False)
targ_u_list = model.get_targets(None, net_output, False)
assert self.pred_nomask_weight == 0 or len(logp_u_list) > 0
for i, (logp_u, targ_u) in enumerate(zip(logp_u_list, targ_u_list)):
loss_u = F.cross_entropy(logp_u, targ_u, reduction=reduction)
loss_u_list.append(loss_u)
logging_output[f"loss_u_{i}"] = loss_u.detach().item()
if self.pred_nomask_weight > 0:
loss += self.pred_nomask_weight * sum(loss_u_list)
sample_size += targ_u_list[0].numel()
if self.loss_weights is not None:
assert hasattr(model, "get_extra_losses")
extra_losses, names = model.get_extra_losses(net_output)
if torch.is_tensor(extra_losses):
extra_losses = [extra_losses]
names = [names]
if len(self.loss_weights) == 1 and len(extra_losses) != 1:
self.loss_weights = [self.loss_weights[0]] * len(extra_losses)
if len(self.loss_weights) > len(extra_losses):
modified_loss_weight = self.loss_weights[:len(extra_losses)]
else:
modified_loss_weight = self.loss_weights
# assert len(extra_losses) == len(self.loss_weights), f"{len(extra_losses)}, {len(self.loss_weights)}"
for p, n, coef in zip(extra_losses, names, modified_loss_weight):
# print(n + str(coef))
if coef != 0 and p is not None:
p = coef * p.float() * sample_size
loss += p
logging_output[f"loss_{n}"] = p.detach().item()
logging_output = {
"ntokens": sample_size,
"nsentences": sample["id"].numel(),
"sample_size": sample_size,
"ngpu": 1,
**logging_output,
}
if 'loss_prob_perplexity' in logging_output:
logging_output['code_perplexity'] = net_output['code_perplexity'].detach().item()
for lk in self.log_keys:
if lk in net_output:
logging_output[lk] = float((net_output[lk].item()))
def compute_correct(logits):
if logits.numel() == 0:
return 0, 0
else:
assert logits.dim() > 1, logits.shape
max = logits.argmax(-1) == 0
min = logits.argmin(-1) == 0
both = max & min
corr = max.long().sum().item() - both.long().sum().item()
count = max.numel()
return corr, count
with torch.no_grad():
for i, logp_m in enumerate(logp_m_list):
corr_m, count_m = compute_correct(logp_m)
logging_output[f"correct_m_{i}"] = corr_m
logging_output[f"count_m_{i}"] = count_m
for i, logp_u in enumerate(logp_u_list):
corr_u, count_u = compute_correct(logp_u)
logging_output[f"correct_u_{i}"] = corr_u
logging_output[f"count_u_{i}"] = count_u
if self.dec_weight == 0.0:
logging_output["loss"] = loss.item() if reduce else loss
return loss, sample_size, logging_output
# ## dec loss
dec_loss, l1_loss, l2_loss, bce_loss, enc_dec_attn_loss = self.speech_criterion.compute_loss(model, net_output_dec, sample)
# Log tts loss
logging_output['dec_loss'] = dec_loss.item()
logging_output['l1_loss'] = l1_loss.item()
logging_output['l2_loss'] = l2_loss.item()
logging_output['bce_loss'] = bce_loss.item()
if enc_dec_attn_loss is not None:
logging_output['enc_dec_attn_loss'] = enc_dec_attn_loss.item()
loss = self.hubert_weight * loss + self.dec_weight * sample_size * dec_loss
logging_output["loss"] = loss.item() if reduce else loss
return loss, sample_size, logging_output
@staticmethod
def reduce_metrics(logging_outputs) -> None:
"""Aggregate logging outputs from data parallel training (copied from normal cross entropy)."""
loss_sum = sum(log.get("loss", 0) for log in logging_outputs)
ntokens = sum(log.get("ntokens", 0) for log in logging_outputs)
sample_size = sum(log.get("sample_size", 0) for log in logging_outputs)
dec_loss_sum = sum(log.get("dec_loss", 0) for log in logging_outputs)
l1_loss_sum = sum(log.get("l1_loss", 0) for log in logging_outputs)
l2_loss_sum = sum(log.get("l2_loss", 0) for log in logging_outputs)
bce_loss_sum = sum(log.get("bce_loss", 0) for log in logging_outputs)
ngpu = sum(log.get("ngpu", 0) for log in logging_outputs)
metrics.log_scalar("loss", loss_sum / sample_size / math.log(2), sample_size, round=3)
if sample_size != ntokens:
metrics.log_scalar("nll_loss", loss_sum / ntokens / math.log(2), ntokens, round=3)
metrics.log_derived("ppl", lambda meters: utils.get_perplexity(meters["nll_loss"].avg))
else:
metrics.log_derived("ppl", lambda meters: utils.get_perplexity(meters["loss"].avg))
counts = {}
for lk in logging_outputs[0].keys():
if lk.startswith("count_"):
val = sum(log[lk] for log in logging_outputs)
metrics.log_scalar(lk, val)
counts[lk] = val
for lk in logging_outputs[0].keys():
if lk.startswith("loss_"):
val = sum(log[lk] for log in logging_outputs)
metrics.log_scalar(lk, val / sample_size / math.log(2), round=3)
elif lk.startswith("correct_"):
val = sum(log[lk] for log in logging_outputs)
metrics.log_scalar(lk, val / counts[re.sub("correct", "count", lk)])
elif lk == 'code_perplexity':
val = sum(log[lk] for log in logging_outputs)
metrics.log_scalar(lk, val / len(logging_outputs), round=3)
metrics.log_scalar(
"dec_loss", dec_loss_sum / ngpu, sample_size, 2, round=5
)
metrics.log_scalar(
"l1_loss", l1_loss_sum / ngpu, sample_size, 2, round=5
)
metrics.log_scalar(
"l2_loss", l2_loss_sum / ngpu, sample_size, 2, round=5
)
metrics.log_scalar(
"bce_loss", bce_loss_sum / ngpu, sample_size, 2, round=5
)
if "enc_dec_attn_loss" in logging_outputs[0]:
enc_dec_attn_loss_sum = sum(log.get("enc_dec_attn_loss", 0) for log in logging_outputs)
metrics.log_scalar(
"enc_dec_attn_loss", enc_dec_attn_loss_sum / ngpu, sample_size, round=8
)
@staticmethod
def aggregate_logging_outputs(logging_outputs):
"""Aggregate logging outputs from data parallel training."""
raise NotImplementedError()
@staticmethod
def logging_outputs_can_be_summed() -> bool:
"""
Whether the logging outputs returned by `forward` can be summed
across workers prior to calling `reduce_metrics`. Setting this
to True will improves distributed training speed.
"""
return False
| EXA-1-master | exa/models/unilm-master/speecht5/speecht5/criterions/speech_pretrain_criterion.py |
# ----------------------------------------------------------------------------
# SpeechLM: Enhanced Speech Pre-Training with Unpaired Textual Data (https://arxiv.org/abs/2209.15329)
# Github source: https://github.com/microsoft/SpeechT5/tree/main/SpeechLM
# Code based on fairseq: https://github.com/facebookresearch/fairseq
#
# Copyright (c) 2022 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# ----------------------------------------------------------------------------
import copy
import logging
from typing import Dict, List, Optional, Tuple
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import Tensor
from modules import (
compute_mask_indices,
LayerNorm,
ConvFeatureExtractionModel,
GradMultiply,
TransformerEncoder,
TransformerEncoderBase,
)
# from fairseq.models.transformer import TransformerConfig
logger = logging.getLogger(__name__)
class DictConfig:
def __init__(self, cfg=None):
if cfg is not None:
self.update(cfg)
def update(self, cfg: dict):
self.__dict__.update(cfg)
class TransformerConfig:
def __init__(self, cfg=None):
if cfg is not None:
self.update(cfg)
def update(self, cfg: dict):
if 'encoder' in cfg:
self.encoder = DictConfig(cfg['encoder'])
del cfg['encoder']
if 'quant_noise' in cfg:
self.quant_noise = DictConfig(cfg['quant_noise'])
del cfg['quant_noise']
if 'decoder' in cfg:
del cfg['decoder']
self.__dict__.update(cfg)
class SpeechLMConfig:
def __init__(self, cfg=None):
self.label_rate: int = 50
self.extractor_mode: str = "default" # mode for feature extractor. default has a single group norm with d groups in the first conv block, whereas layer_norm has layer norms in every block (meant to use with normalize=True)
self.encoder_layers: int = 12 # num encoder layers in the transformer
self.encoder_embed_dim: int = 768 # encoder embedding dimension
self.encoder_embed_dim: int = 768 # encoder embedding dimension
self.encoder_ffn_embed_dim: int = 3072 # encoder embedding dimension for FFN
self.encoder_attention_heads: int = 12 # num encoder attention heads
self.activation_fn: str = "gelu" # activation function to use
self.layer_type: str = "transformer" # layer type in encoder
# dropouts
self.dropout: float = 0.1 # dropout probability for the transformer
self.attention_dropout: float = 0.1 # dropout probability for attention weights
self.activation_dropout: float = 0.0 # dropout probability after activation in FFN
self.encoder_layerdrop: float = 0.0 # probability of dropping a tarnsformer layer
self.dropout_input: float = 0.0 # dropout to apply to the input (after feat extr)
self.dropout_features: float = 0.0 # dropout to apply to the features (after feat extr)
self.final_dim: int = 256 # project final representations and targets to this many dimensions
self.layer_norm_first: bool = False # apply layernorm first in the transformer
self.conv_feature_layers: str = "[(512,10,5)] + [(512,3,2)] * 4 + [(512,2,2)] * 2" # string describing convolutional feature extraction layers in form of a python list that contains [(dim, kernel_size, stride), ...]
self.conv_bias: bool = False # include bias in conv encoder
self.feature_grad_mult: float = 1.0 # multiply feature extractor var grads by this
# masking
self.mask_length: int = 10 # mask length
self.mask_prob: float = 0.65 # probability of replacing a token with mask
self.mask_selection: str = "static" # how to choose mask length
self.mask_other: float = 0 # secondary mask argument (used for more complex distributions), see help in compute_mask_indicesh
self.no_mask_overlap: bool = False # whether to allow masks to overlap
self.mask_min_space: int = 1 # min space between spans (if no overlap is enabled)
# channel masking
self.mask_channel_length: int = 10 # length of the mask for features (channels)
self.mask_channel_prob: float = 0.0 # probability of replacing a feature with 0
self.mask_channel_selection: str = "static" # how to choose mask length for channel masking
self.mask_channel_other: float = 0 # secondary mask argument (used for more complex distributions), see help in compute_mask_indices
self.no_mask_channel_overlap: bool = False # whether to allow channel masks to overlap
self.mask_channel_min_space: int = 1 # min space between spans (if no overlap is enabled)
# positional embeddings
self.conv_pos: int = 128 # number of filters for convolutional positional embeddings
self.conv_pos_groups: int = 16 # number of groups for convolutional positional embedding
# loss computation
self.skip_masked: bool = False # skip computing losses over masked frames
self.skip_nomask: bool = False # skip computing losses over unmasked frames
self.checkpoint_activations: bool = False # recompute activations and save memory for extra compute
# FP16 optimization
self.required_seq_len_multiple: int = 2 # pad the input to encoder such that the sequence length is divisible by multiple
# Custom
self.use_rel_pos_enc: bool = False # whether to use relative positional encoding
self.scaling_for_att: float = 1.0 # scaling for attention weights to prevent overflow issue (for large model)
# unit encoder-decoder
self.add_unit_encoder: bool = False # add unit encoder
# embedding mixing
self.mix_with_unit: bool = True # mix with the unit embeddings
self.use_pred_unit: bool = False # use the embeddings of predicted units
self.l2_embedding: bool = False # compute l2 loss between unit embedding and unit hidden state
if cfg is not None:
self.update(cfg)
def update(self, cfg: dict):
model_cfg = copy.deepcopy(cfg)
self.text_transformer = TransformerConfig(model_cfg['text_transformer'])
del model_cfg['text_transformer']
self.__dict__.update(model_cfg)
class SpeechLM(nn.Module):
def __init__(
self,
cfg: SpeechLMConfig,
) -> None:
super().__init__()
self.cfg = cfg
feature_enc_layers = eval(cfg.conv_feature_layers) # noqa
self.embed = feature_enc_layers[-1][0]
self.feature_extractor = ConvFeatureExtractionModel(
conv_layers=feature_enc_layers,
dropout=0.0,
mode=cfg.extractor_mode,
conv_bias=cfg.conv_bias,
)
sample_rate = 16000
feature_ds_rate = np.prod([s for _, _, s in feature_enc_layers])
self.feat2tar_ratio = cfg.label_rate * feature_ds_rate / sample_rate
self.post_extract_proj = (
nn.Linear(self.embed, cfg.encoder_embed_dim)
if self.embed != cfg.encoder_embed_dim
else None
)
self.mask_prob = cfg.mask_prob
self.mask_selection = cfg.mask_selection
self.mask_other = cfg.mask_other
self.mask_length = cfg.mask_length
self.no_mask_overlap = cfg.no_mask_overlap
self.mask_min_space = cfg.mask_min_space
self.mask_channel_prob = cfg.mask_channel_prob
self.mask_channel_selection = cfg.mask_channel_selection
self.mask_channel_other = cfg.mask_channel_other
self.mask_channel_length = cfg.mask_channel_length
self.no_mask_channel_overlap = cfg.no_mask_channel_overlap
self.mask_channel_min_space = cfg.mask_channel_min_space
self.dropout_input = nn.Dropout(cfg.dropout_input)
self.dropout_features = nn.Dropout(cfg.dropout_features)
self.feature_grad_mult = cfg.feature_grad_mult
self.logit_temp = cfg.logit_temp
self.skip_masked = cfg.skip_masked
self.skip_nomask = cfg.skip_nomask
self.final_dim = cfg.final_dim if cfg.final_dim > 0 else cfg.encoder_embed_dim
self.final_proj_list = nn.ModuleList([
nn.Linear(cfg.encoder_embed_dim, self.final_dim) for _ in range(2)
])
self.mask_emb = nn.Parameter(
torch.FloatTensor(cfg.encoder_embed_dim).uniform_()
)
self.encoder = TransformerEncoder(cfg)
self.layer_norm = LayerNorm(self.embed)
### build unit encoder:
self.mask_u2t = cfg.mask_u2t
self.compute_mum = cfg.compute_mum
self.add_text_ctc = cfg.add_text_ctc
self.text_ctc_conv_kernel = cfg.text_ctc_conv_kernel
self.padding_idx = 1
self.add_unit_encoder = cfg.add_unit_encoder
self.mix_with_unit = cfg.mix_with_unit
self.use_pred_unit = cfg.use_pred_unit
self.l2_embedding = cfg.l2_embedding
if self.add_unit_encoder:
self.unit_embed_tokens = None
### build unit encoder
self.unit_encoder = TransformerEncoderBase(
cfg.text_transformer,
dictionary=None,
embed_tokens=self.unit_embed_tokens,
use_rel_pos_enc=cfg.use_rel_pos_enc,
scaling_for_att=cfg.scaling_for_att,
)
### build unit2text decoder, not available for now
self.add_decoder = cfg.add_decoder
def upgrade_state_dict_named(self, state_dict, name):
"""Upgrade a (possibly old) state dict for new versions."""
super().upgrade_state_dict_named(state_dict, name)
return state_dict
def apply_mask(self, x, padding_mask, target_list):
B, T, C = x.shape
if self.mask_prob > 0:
mask_indices = compute_mask_indices(
(B, T),
padding_mask,
self.mask_prob,
self.mask_length,
self.mask_selection,
self.mask_other,
min_masks=2,
no_overlap=self.no_mask_overlap,
min_space=self.mask_min_space,
)
mask_indices = torch.from_numpy(mask_indices).to(x.device)
x[mask_indices] = self.mask_emb
else:
mask_indices = None
if self.mask_channel_prob > 0:
mask_channel_indices = compute_mask_indices(
(B, C),
None,
self.mask_channel_prob,
self.mask_channel_length,
self.mask_channel_selection,
self.mask_channel_other,
no_overlap=self.no_mask_channel_overlap,
min_space=self.mask_channel_min_space,
)
mask_channel_indices = (
torch.from_numpy(mask_channel_indices)
.to(x.device)
.unsqueeze(1)
.expand(-1, T, -1)
)
x[mask_channel_indices] = 0
return x, mask_indices
def forward_features(self, source: torch.Tensor) -> torch.Tensor:
if self.feature_grad_mult > 0:
features = self.feature_extractor(source)
if self.feature_grad_mult != 1.0:
features = GradMultiply.apply(features, self.feature_grad_mult)
else:
with torch.no_grad():
features = self.feature_extractor(source)
return features
def forward_targets(
self,
features: torch.Tensor,
target_list: List[torch.Tensor],
) -> Tuple[torch.Tensor, torch.Tensor]:
# Trim features to ensure labels exist and then get aligned labels
feat_tsz = features.size(2)
targ_tsz = min([t.size(1) for t in target_list])
if self.feat2tar_ratio * feat_tsz > targ_tsz:
feat_tsz = int(targ_tsz / self.feat2tar_ratio)
features = features[..., :feat_tsz]
target_inds = torch.arange(feat_tsz).float() * self.feat2tar_ratio
target_inds += np.random.choice(int(self.feat2tar_ratio))
target_list = [t[:, target_inds.long()] for t in target_list]
return features, target_list
def forward_padding_mask(
self,
features: torch.Tensor,
padding_mask: torch.Tensor,
) -> torch.Tensor:
extra = padding_mask.size(1) % features.size(1)
if extra > 0:
padding_mask = padding_mask[:, :-extra]
padding_mask = padding_mask.view(padding_mask.size(0), features.size(1), -1)
padding_mask = padding_mask.all(-1)
return padding_mask
def get_normalized_probs(
self,
net_output: Tuple[Tensor, Optional[Dict[str, List[Optional[Tensor]]]]],
log_probs: bool,
sample: Optional[Dict[str, Tensor]] = None,
):
lprobs = self.get_normalized_probs_scriptable(net_output, log_probs, sample)
lprobs.batch_first = True
return lprobs
def downsample_ctc_padding_mask(self, padding_mask):
"""
padding_mask: (B, T)
"""
stride = self.text_ctc_conv_kernel // 2
return padding_mask[:, ::stride]
def compute_pred(self, proj_x, label_embs):
if self.target_glu:
label_embs = self.target_glu(label_embs)
x = F.normalize(proj_x.float(), dim=-1) # (S, D)
label_embs = F.normalize(label_embs.float(), dim=-1) # (C, D)
logits = torch.matmul(x, label_embs.T).type_as(proj_x) # (S, C)
logits /= self.logit_temp
return logits
def compute_hubert_logits(self, x, target, proj, label_embs, padding_mask, mask_indices):
if not self.skip_masked:
masked_indices = torch.logical_and(~padding_mask, mask_indices)
proj_x_m = proj(x[masked_indices])
logit_m_list = [(self.compute_pred(proj_x_m, label_embs), target[masked_indices])]
else:
logit_m_list = [None]
if not self.skip_nomask:
nomask_indices = torch.logical_and(~padding_mask, ~mask_indices)
proj_x_u = proj(x[nomask_indices])
logit_u_list = [(self.compute_pred(proj_x_u, label_embs), target[nomask_indices])]
else:
logit_u_list = [None]
return logit_m_list, logit_u_list
def convert_embeddings(self,
x,
padding_mask,
target=None,
mask_indices=None,
mix_with_unit=False,
use_pred_unit=False,
l2_embedding=False,
remask=False
):
"""
1. Mix with units if needed (default: True)
2. Prepare for unit_encoder inputs
Inputs:
x, (B, T, D)
Return:
src_tokens, (B, T)
soft_embeddings, (B, T, D)
l2_loss, a loss
"""
soft_embeddings = self.final_proj_list[0](x) if x.size(-1) == self.final_dim else x
if padding_mask is None:
padding_mask = soft_embeddings.new_zeros(soft_embeddings.size(0), soft_embeddings.size(1), dtype=torch.long)
if use_pred_unit:
src_tokens = self.compute_pred(self.final_proj_list[0](x), self.label_embs_list[0]).argmax(dim=-1)
src_tokens[padding_mask] = self.padding_idx
elif target is not None:
src_tokens = target
else:
src_tokens = padding_mask.long()
if l2_embedding | mix_with_unit:
unit_embeddings = self.unit_embed_tokens(src_tokens) # (B, T, D)
l2_loss = 0
if l2_embedding:
if mask_indices is not None:
l2_loss = (soft_embeddings - unit_embeddings)[mask_indices].float().pow(2).mean(dim=-1)
scale = unit_embeddings[mask_indices].float().pow(2).sum(dim=-1)
else:
l2_loss = (soft_embeddings - unit_embeddings).float().pow(2).mean(dim=-1)
scale = unit_embeddings.float().pow(2).sum(dim=-1)
l2_loss = (l2_loss / scale).mean()
if mix_with_unit:
B, T, D = x.shape
selected_indices = compute_mask_indices(
(B, T),
padding_mask,
self.mask_prob / 2,
self.mask_length // 2,
self.mask_selection,
self.mask_other,
min_masks=2,
no_overlap=self.no_mask_overlap,
min_space=self.mask_min_space,
)
selected_indices = torch.from_numpy(selected_indices).to(x.device)
if mask_indices is not None:
if remask:
remask_indices = torch.logical_and(selected_indices, mask_indices)
soft_embeddings[remask_indices] = self.mask_emb
swap_indices = torch.logical_and(selected_indices, ~mask_indices)
else:
swap_indices = selected_indices
soft_embeddings[swap_indices] = unit_embeddings[swap_indices]
soft_embeddings = soft_embeddings * (1 - padding_mask.unsqueeze(-1).type_as(x))
return src_tokens, soft_embeddings, l2_loss
def forward(
self,
source: torch.Tensor = None,
src_tokens: torch.Tensor = None,
src_lengths: torch.Tensor = None,
target_list: Optional[List[torch.Tensor]] = None,
padding_mask: Optional[torch.Tensor] = None,
mask: bool = True,
features_only: bool = False,
output_layer: Optional[int] = None,
) -> Dict[str, torch.Tensor]:
assert source is not None or src_tokens is not None
if source is not None:
return self.forward_speech(
source=source,
target_list=target_list,
padding_mask=padding_mask,
mask=mask,
features_only=features_only,
output_layer=output_layer,
)
else:
return self.forward_text(
src_tokens=src_tokens,
src_lengths=src_lengths,
mask=self.mask_u2t,
output_layer=output_layer,
)
def forward_speech(
self,
source: torch.Tensor = None,
target_list: Optional[List[torch.Tensor]] = None,
padding_mask: Optional[torch.Tensor] = None,
mask: bool = True,
features_only: bool = False,
output_layer: Optional[int] = None,
) -> Dict[str, torch.Tensor]:
"""output layer is 1-based"""
features = self.forward_features(source)
if target_list is not None:
features, target_list = self.forward_targets(features, target_list)
features_pen = features.float().pow(2).mean()
features = features.transpose(1, 2)
features = self.layer_norm(features)
unmasked_features = features.clone()
if padding_mask is not None:
padding_mask = self.forward_padding_mask(features, padding_mask)
if self.post_extract_proj is not None:
features = self.post_extract_proj(features)
features = self.dropout_input(features)
unmasked_features = self.dropout_features(unmasked_features)
if mask:
x, mask_indices = self.apply_mask(features, padding_mask, target_list)
else:
x = features
mask_indices = None
# feature: (B, T, D), float
# target: (B, T), long
# x: (B, T, D), float
# padding_mask: (B, T), bool
# mask_indices: (B, T), bool
x, layer_results = self.encoder(
x,
padding_mask=padding_mask,
layer=None if output_layer is None else output_layer - 1,
)
if features_only:
return {"x": x, "padding_mask": padding_mask, "features": features, "layer_results": layer_results}
logit_m_list, logit_u_list = self.compute_hubert_logits(
x,
target_list[0],
self.final_proj_list[0],
self.label_embs_list[0],
padding_mask,
mask_indices,
)
result = {
"logit_m_list": logit_m_list,
"logit_u_list": logit_u_list,
"padding_mask": padding_mask,
"features_pen": features_pen,
}
if self.add_unit_encoder:
src_tokens, x_emb, l2_loss = self.convert_embeddings(
x,
padding_mask, target_list[0],
mask_indices=mask_indices,
mix_with_unit=self.mix_with_unit,
use_pred_unit=self.use_pred_unit,
l2_embedding=self.l2_embedding,
)
encoder_out = self.unit_encoder(src_tokens, token_embeddings=x_emb)
result['encoder_out'] = encoder_out['encoder_out'] # [(T, B, D)]
result['encoder_padding_mask'] = encoder_out['encoder_padding_mask'] # [(B, T)]
if self.l2_embedding:
result['embedding_l2_loss'] = l2_loss
code_logit_m_list, code_logit_u_list = self.compute_hubert_logits(
encoder_out['encoder_out'][0].transpose(0, 1),
target_list[-1],
self.final_proj_list[-1],
self.label_embs_list[-1],
padding_mask,
mask_indices,
)
result['logit_m_list'] += code_logit_m_list
result['logit_u_list'] += code_logit_u_list
return result
def forward_text(
self,
src_tokens: torch.Tensor = None,
src_lengths: torch.Tensor = None,
target_list: Optional[List[torch.Tensor]] = None,
mask: bool = True,
output_layer: Optional[int] = None,
) -> Dict[str, torch.Tensor]:
assert self.add_unit_encoder, f"Can not forward unit-text branch without unit_encoder!"
padding_mask = src_tokens == self.padding_idx
unit_embeddings = self.unit_embed_tokens(src_tokens)
if mask:
unit_embeddings, mask_indices = self.apply_mask(unit_embeddings, padding_mask, [src_tokens])
else:
### If already applied mask on src_tokens, then the target_list should contains many padding_idx
mask_indices = target_list[-1] != self.padding_idx
unit_embeddings[mask_indices] = self.mask_emb
encoder_out = self.unit_encoder(
src_tokens,
token_embeddings=unit_embeddings,
return_all_hiddens=output_layer is not None,
)
result = {}
result["encoder_out"] = encoder_out["encoder_out"]
result["encoder_states"] = encoder_out["encoder_states"]
result["padding_mask"] = padding_mask
if self.compute_mum:
code_logit_m_list, code_logit_u_list = self.compute_hubert_logits(
encoder_out["encoder_out"].transpose(0, 1),
target_list[-1],
self.final_proj_list[-1],
self.label_embs_list[-1],
padding_mask,
mask_indices,
)
result["logit_m_list"] = code_logit_m_list
result["logit_u_list"] = code_logit_u_list
if self.add_text_ctc:
result["encoder_out_ctc"] = [self.unit_encoder_ctc_head(x) for x in encoder_out['encoder_out']]
result["encoder_padding_mask"] = [
self.downsample_ctc_padding_mask(padding_mask) for padding_mask in encoder_out['encoder_padding_mask']
]
return result
def extract_features(
self,
source: torch.Tensor,
padding_mask: Optional[torch.Tensor] = None,
mask: bool = False,
ret_conv: bool = False,
output_layer: Optional[int] = None,
ret_layer_results: bool = False,
) -> Tuple[torch.Tensor, torch.Tensor]:
"""Extract features for only speech input"""
with torch.no_grad():
res = self.forward(
source,
padding_mask=padding_mask,
mask=mask,
features_only=True,
output_layer=output_layer,
)
# {"x": x, "padding_mask": padding_mask, "features": features, "layer_results": layer_results}
x = res["x"] # B x T x D
padding_mask = res["padding_mask"]
if self.add_unit_encoder and (output_layer is None or output_layer > self.cfg.encoder_layers):
src_tokens, x, _ = self.convert_embeddings(
x,
padding_mask,
mix_with_unit=False,
use_pred_unit=False,
)
return_all_hiddens=output_layer is not None and output_layer > self.cfg.encoder_layers
encoder_out = self.unit_encoder(
src_tokens,
token_embeddings=x,
return_all_hiddens=return_all_hiddens,
)
res["x"] = encoder_out['encoder_out'][0].transpose(0, 1) # (B, T, D)
if return_all_hiddens:
res["layer_results"] += encoder_out['encoder_states'][1:1+output_layer-len(res["layer_results"])]
feature = res["features"] if ret_conv else res["x"]
if ret_layer_results:
feature = (feature, res["layer_results"])
return feature, padding_mask
def get_logits(self, net_output, is_masked=True):
if is_masked:
logits_list = net_output["logit_m_list"]
else:
logits_list = net_output["logit_u_list"]
logits_list = [x[0].float() for x in logits_list if x is not None]
return logits_list
def get_targets(self, net_output, is_masked=True):
if is_masked:
logits_list = net_output["logit_m_list"]
else:
logits_list = net_output["logit_u_list"]
targets_list = [x[1].long() for x in logits_list if x is not None]
return targets_list
def get_extra_losses(self, net_output):
extra_losses = []
names = []
if "features_pen" in net_output:
extra_losses.append(net_output["features_pen"])
names.append("features_pen")
if "embedding_l2_loss" in net_output:
extra_losses.append(net_output["embedding_l2_loss"])
names.append("embedding_l2_loss")
return extra_losses, names
def remove_pretraining_modules(self, step2=False):
self.target_glu = None
| EXA-1-master | exa/models/unilm-master/speechlm/SpeechLM.py |
# ----------------------------------------------------------------------------
# SpeechLM: Enhanced Speech Pre-Training with Unpaired Textual Data (https://arxiv.org/abs/2209.15329)
# Github source: https://github.com/microsoft/SpeechT5/tree/main/SpeechLM
# Code based on fairseq: https://github.com/facebookresearch/fairseq
#
# Copyright (c) 2022 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# ----------------------------------------------------------------------------
"""
We just merge all the required modules and functions into one python file.
It is for easily use the pre-trained model to extract features.
"""
import math
import numpy as np
import logging
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import Parameter
from torch import Tensor
from typing import Any, Dict, List, Tuple, Callable, Optional
logger = logging.getLogger(__name__)
# rewrite name for backward compatibility in `make_generation_fast_`
def module_name_fordropout(module_name: str) -> str:
if module_name == "TransformerEncoderBase":
return "TransformerEncoder"
else:
return module_name
def utils_make_positions(tensor, padding_idx: int, onnx_trace: bool = False):
"""Replace non-padding symbols with their position numbers.
Position numbers begin at padding_idx+1. Padding symbols are ignored.
"""
# The series of casts and type-conversions here are carefully
# balanced to both work with ONNX export and XLA. In particular XLA
# prefers ints, cumsum defaults to output longs, and ONNX doesn't know
# how to handle the dtype kwarg in cumsum.
mask = tensor.ne(padding_idx).int()
return (torch.cumsum(mask, dim=1).type_as(mask) * mask).long() + padding_idx
def utils_item(tensor):
# tpu-comment: making this a no-op for xla devices.
if torch.is_tensor(tensor) and tensor.device.type == "xla":
return tensor.detach()
if hasattr(tensor, "item"):
return tensor.item()
if hasattr(tensor, "__getitem__"):
return tensor[0]
return tensor
def fsdp_wrap(module, min_num_params: Optional[int] = None, **kwargs):
"""
Helper to wrap layers/modules in FSDP. This falls back to a no-op if
fairscale is not available.
Args:
module (nn.Module): module to (maybe) wrap
min_num_params (int, Optional): minimum number of layer params to wrap
"""
try:
from fairscale.nn import wrap
if min_num_params is not None:
num_params = sum(p.numel() for p in module.parameters())
if num_params >= min_num_params:
return wrap(module, **kwargs)
else:
return module
else:
return wrap(module, **kwargs)
except ImportError:
return module
def quant_noise(module, p, block_size):
"""
Wraps modules and applies quantization noise to the weights for
subsequent quantization with Iterative Product Quantization as
described in "Training with Quantization Noise for Extreme Model Compression"
Args:
- module: nn.Module
- p: amount of Quantization Noise
- block_size: size of the blocks for subsequent quantization with iPQ
Remarks:
- Module weights must have the right sizes wrt the block size
- Only Linear, Embedding and Conv2d modules are supported for the moment
- For more detail on how to quantize by blocks with convolutional weights,
see "And the Bit Goes Down: Revisiting the Quantization of Neural Networks"
- We implement the simplest form of noise here as stated in the paper
which consists in randomly dropping blocks
"""
# if no quantization noise, don't register hook
if p <= 0:
return module
# supported modules
assert isinstance(module, (nn.Linear, nn.Embedding, nn.Conv2d))
# test whether module.weight has the right sizes wrt block_size
is_conv = module.weight.ndim == 4
# 2D matrix
if not is_conv:
assert (
module.weight.size(1) % block_size == 0
), "Input features must be a multiple of block sizes"
# 4D matrix
else:
# 1x1 convolutions
if module.kernel_size == (1, 1):
assert (
module.in_channels % block_size == 0
), "Input channels must be a multiple of block sizes"
# regular convolutions
else:
k = module.kernel_size[0] * module.kernel_size[1]
assert k % block_size == 0, "Kernel size must be a multiple of block size"
def _forward_pre_hook(mod, input):
# no noise for evaluation
if mod.training:
if not is_conv:
# gather weight and sizes
weight = mod.weight
in_features = weight.size(1)
out_features = weight.size(0)
# split weight matrix into blocks and randomly drop selected blocks
mask = torch.zeros(
in_features // block_size * out_features, device=weight.device
)
mask.bernoulli_(p)
mask = mask.repeat_interleave(block_size, -1).view(-1, in_features)
else:
# gather weight and sizes
weight = mod.weight
in_channels = mod.in_channels
out_channels = mod.out_channels
# split weight matrix into blocks and randomly drop selected blocks
if mod.kernel_size == (1, 1):
mask = torch.zeros(
int(in_channels // block_size * out_channels),
device=weight.device,
)
mask.bernoulli_(p)
mask = mask.repeat_interleave(block_size, -1).view(-1, in_channels)
else:
mask = torch.zeros(
weight.size(0), weight.size(1), device=weight.device
)
mask.bernoulli_(p)
mask = (
mask.unsqueeze(2)
.unsqueeze(3)
.repeat(1, 1, mod.kernel_size[0], mod.kernel_size[1])
)
# scale weights and apply mask
mask = mask.to(
torch.bool
) # x.bool() is not currently supported in TorchScript
s = 1 / (1 - p)
mod.weight.data = s * weight.masked_fill(mask, 0)
module.register_forward_pre_hook(_forward_pre_hook)
return module
def relu_squared(x: torch.Tensor):
return F.relu(x).pow(2)
def gelu(x: torch.Tensor) -> torch.Tensor:
return torch.nn.functional.gelu(x.float()).type_as(x)
def gelu_accurate(x):
if not hasattr(gelu_accurate, "_a"):
gelu_accurate._a = math.sqrt(2 / math.pi)
return (
0.5 * x * (1 + torch.tanh(gelu_accurate._a * (x + 0.044715 * torch.pow(x, 3))))
)
def get_activation_fn(activation: str) -> Callable:
"""Returns the activation function corresponding to `activation`"""
if activation == "relu":
return F.relu
elif activation == "relu_squared":
return relu_squared
elif activation == "gelu":
return gelu
elif activation == "gelu_fast":
logger.warn(
"--activation-fn=gelu_fast has been renamed to gelu_accurate"
)
return gelu_accurate
elif activation == "gelu_accurate":
return gelu_accurate
elif activation == "tanh":
return torch.tanh
elif activation == "linear":
return lambda x: x
elif activation == "swish":
return torch.nn.SiLU
else:
raise RuntimeError("--activation-fn {} not supported".format(activation))
def softmax(x, dim: int, onnx_trace: bool = False):
if onnx_trace:
return F.softmax(x.float(), dim=dim)
else:
return F.softmax(x, dim=dim, dtype=torch.float32)
def compute_mask_indices(
shape: Tuple[int, int],
padding_mask: Optional[torch.Tensor],
mask_prob: float,
mask_length: int,
mask_type: str = "static",
mask_other: float = 0.0,
min_masks: int = 0,
no_overlap: bool = False,
min_space: int = 0,
require_same_masks: bool = True,
mask_dropout: float = 0.0,
) -> np.ndarray:
"""
Computes random mask spans for a given shape
Args:
shape: the the shape for which to compute masks.
should be of size 2 where first element is batch size and 2nd is timesteps
padding_mask: optional padding mask of the same size as shape, which will prevent masking padded elements
mask_prob: probability for each token to be chosen as start of the span to be masked. this will be multiplied by
number of timesteps divided by length of mask span to mask approximately this percentage of all elements.
however due to overlaps, the actual number will be smaller (unless no_overlap is True)
mask_type: how to compute mask lengths
static = fixed size
uniform = sample from uniform distribution [mask_other, mask_length*2]
normal = sample from normal distribution with mean mask_length and stdev mask_other. mask is min 1 element
poisson = sample from possion distribution with lambda = mask length
min_masks: minimum number of masked spans
no_overlap: if false, will switch to an alternative recursive algorithm that prevents spans from overlapping
min_space: only used if no_overlap is True, this is how many elements to keep unmasked between spans
require_same_masks: if true, will randomly drop out masks until same amount of masks remains in each sample
mask_dropout: randomly dropout this percentage of masks in each example
"""
bsz, all_sz = shape
mask = np.full((bsz, all_sz), False)
all_num_mask = int(
# add a random number for probabilistic rounding
mask_prob * all_sz / float(mask_length)
+ np.random.rand()
)
all_num_mask = max(min_masks, all_num_mask)
mask_idcs = []
for i in range(bsz):
if padding_mask is not None:
sz = all_sz - padding_mask[i].long().sum().item()
num_mask = int(
# add a random number for probabilistic rounding
mask_prob * sz / float(mask_length)
+ np.random.rand()
)
num_mask = max(min_masks, num_mask)
else:
sz = all_sz
num_mask = all_num_mask
if mask_type == "static":
lengths = np.full(num_mask, mask_length)
elif mask_type == "uniform":
lengths = np.random.randint(mask_other, mask_length * 2 + 1, size=num_mask)
elif mask_type == "normal":
lengths = np.random.normal(mask_length, mask_other, size=num_mask)
lengths = [max(1, int(round(x))) for x in lengths]
elif mask_type == "poisson":
lengths = np.random.poisson(mask_length, size=num_mask)
lengths = [int(round(x)) for x in lengths]
else:
raise Exception("unknown mask selection " + mask_type)
if sum(lengths) == 0:
lengths[0] = min(mask_length, sz - 1)
if no_overlap:
mask_idc = []
def arrange(s, e, length, keep_length):
span_start = np.random.randint(s, e - length)
mask_idc.extend(span_start + i for i in range(length))
new_parts = []
if span_start - s - min_space >= keep_length:
new_parts.append((s, span_start - min_space + 1))
if e - span_start - keep_length - min_space > keep_length:
new_parts.append((span_start + length + min_space, e))
return new_parts
parts = [(0, sz)]
min_length = min(lengths)
for length in sorted(lengths, reverse=True):
lens = np.fromiter(
(e - s if e - s >= length + min_space else 0 for s, e in parts),
np.int,
)
l_sum = np.sum(lens)
if l_sum == 0:
break
probs = lens / np.sum(lens)
c = np.random.choice(len(parts), p=probs)
s, e = parts.pop(c)
parts.extend(arrange(s, e, length, min_length))
mask_idc = np.asarray(mask_idc)
else:
min_len = min(lengths)
if sz - min_len <= num_mask:
min_len = sz - num_mask - 1
mask_idc = np.random.choice(sz - min_len, num_mask, replace=False)
mask_idc = np.asarray(
[
mask_idc[j] + offset
for j in range(len(mask_idc))
for offset in range(lengths[j])
]
)
mask_idcs.append(np.unique(mask_idc[mask_idc < sz]))
min_len = min([len(m) for m in mask_idcs])
for i, mask_idc in enumerate(mask_idcs):
if len(mask_idc) > min_len and require_same_masks:
mask_idc = np.random.choice(mask_idc, min_len, replace=False)
if mask_dropout > 0:
num_holes = np.rint(len(mask_idc) * mask_dropout).astype(int)
mask_idc = np.random.choice(
mask_idc, len(mask_idc) - num_holes, replace=False
)
mask[i, mask_idc] = True
return mask
def init_bert_params(module):
"""
Initialize the weights specific to the BERT Model.
This overrides the default initializations depending on the specified arguments.
1. If normal_init_linear_weights is set then weights of linear
layer will be initialized using the normal distribution and
bais will be set to the specified value.
2. If normal_init_embed_weights is set then weights of embedding
layer will be initialized using the normal distribution.
3. If normal_init_proj_weights is set then weights of
in_project_weight for MultiHeadAttention initialized using
the normal distribution (to be validated).
"""
def normal_(data):
# with FSDP, module params will be on CUDA, so we cast them back to CPU
# so that the RNG is consistent with and without FSDP
data.copy_(data.cpu().normal_(mean=0.0, std=0.02).to(data.device))
if isinstance(module, nn.Linear):
normal_(module.weight.data)
if module.bias is not None:
module.bias.data.zero_()
if isinstance(module, nn.Embedding):
normal_(module.weight.data)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
if isinstance(module, MultiheadAttention):
normal_(module.q_proj.weight.data)
normal_(module.k_proj.weight.data)
normal_(module.v_proj.weight.data)
def pad_to_multiple(x, multiple, dim=-1, value=0):
# Inspired from https://github.com/lucidrains/local-attention/blob/master/local_attention/local_attention.py#L41
if x is None:
return None, 0
tsz = x.size(dim)
m = tsz / multiple
remainder = math.ceil(m) * multiple - tsz
if m.is_integer():
return x, 0
pad_offset = (0,) * (-1 - dim) * 2
return F.pad(x, (*pad_offset, 0, remainder), value=value), remainder
def is_xla_tensor(tensor):
return torch.is_tensor(tensor) and tensor.device.type == "xla"
def index_put(tensor, indices, value):
if is_xla_tensor(tensor):
for _ in range(indices.dim(), tensor.dim()):
indices = indices.unsqueeze(-1)
if indices.size(-1) < tensor.size(-1):
indices = indices.expand_as(tensor)
tensor = torch.mul(tensor, ~indices) + torch.mul(value, indices)
else:
tensor[indices] = value
return tensor
def PositionalEmbedding(
num_embeddings: int,
embedding_dim: int,
padding_idx: int,
learned: bool = False,
):
if learned:
# if padding_idx is specified then offset the embedding ids by
# this index and adjust num_embeddings appropriately
# TODO: The right place for this offset would be inside
# LearnedPositionalEmbedding. Move this there for a cleaner implementation.
if padding_idx is not None:
num_embeddings = num_embeddings + padding_idx + 1
m = LearnedPositionalEmbedding(num_embeddings, embedding_dim, padding_idx)
nn.init.normal_(m.weight, mean=0, std=embedding_dim**-0.5)
if padding_idx is not None:
nn.init.constant_(m.weight[padding_idx], 0)
else:
m = SinusoidalPositionalEmbedding(
embedding_dim,
padding_idx,
init_size=num_embeddings + padding_idx + 1,
)
return m
def LayerNorm(normalized_shape, eps=1e-5, elementwise_affine=True, export=False):
if torch.jit.is_scripting() or torch.jit.is_tracing():
export = True
if not export and torch.cuda.is_available() and has_fused_layernorm:
return FusedLayerNorm(normalized_shape, eps, elementwise_affine)
return torch.nn.LayerNorm(normalized_shape, eps, elementwise_affine)
class TransformerEncoderBase(nn.Module):
"""
Transformer encoder consisting of *cfg.encoder.layers* layers. Each layer
is a :class:`TransformerEncoderLayer`.
Args:
args (argparse.Namespace): parsed command-line arguments
dictionary: deprecated(None)
embed_tokens (torch.nn.Embedding): input embedding
"""
def __init__(self, cfg, dictionary, embed_tokens, use_rel_pos_enc=False, scaling_for_att=1.0):
self.cfg = cfg
super().__init__()
self.register_buffer("version", torch.Tensor([3]))
self.dropout_module = FairseqDropout(
cfg.dropout, module_name=module_name_fordropout(self.__class__.__name__)
)
self.encoder_layerdrop = cfg.encoder.layerdrop
embed_dim = embed_tokens.embedding_dim if embed_tokens is not None else cfg.encoder.embed_dim
self.padding_idx = embed_tokens.padding_idx if embed_tokens is not None else 1
self.max_source_positions = cfg.max_source_positions
self.embed_tokens = embed_tokens
self.embed_scale = 1.0 if cfg.no_scale_embedding else math.sqrt(embed_dim)
self.embed_positions = (
PositionalEmbedding(
cfg.max_source_positions,
embed_dim,
self.padding_idx,
learned=cfg.encoder.learned_pos,
)
if not cfg.no_token_positional_embeddings
else None
)
if cfg.layernorm_embedding:
self.layernorm_embedding = LayerNorm(embed_dim, export=cfg.export)
else:
self.layernorm_embedding = None
if not cfg.adaptive_input and cfg.quant_noise.pq > 0:
self.quant_noise = quant_noise(
nn.Linear(embed_dim, embed_dim, bias=False),
cfg.quant_noise.pq,
cfg.quant_noise.pq_block_size,
)
else:
self.quant_noise = None
if self.encoder_layerdrop > 0.0:
self.layers = LayerDropModuleList(p=self.encoder_layerdrop)
else:
self.layers = nn.ModuleList([])
self.use_rel_pos_enc = use_rel_pos_enc
self.scaling_for_att = scaling_for_att
self.layers.extend(
[self.build_encoder_layer(cfg) for i in range(cfg.encoder.layers)]
)
self.num_layers = len(self.layers)
if cfg.encoder.normalize_before:
self.layer_norm = LayerNorm(embed_dim, export=cfg.export)
else:
self.layer_norm = None
if self.use_rel_pos_enc:
self.pos_emb = RelativePositionalEncoding(embed_dim // cfg.encoder.attention_heads, 160)
def build_encoder_layer(self, cfg):
layer = TransformerEncoderLayerBase(cfg, has_relative_attention_bias=self.use_rel_pos_enc, scaling_for_att=self.scaling_for_att)
checkpoint = cfg.checkpoint_activations
if checkpoint:
raise ValueError("We don't support checkpoint_activations for now! Please set cfg.checkpoint_activations=False.")
min_params_to_wrap = cfg.min_params_to_wrap if not checkpoint else 0
layer = fsdp_wrap(layer, min_num_params=min_params_to_wrap)
return layer
def forward_embedding(
self, src_tokens, token_embedding: Optional[torch.Tensor] = None
):
# embed tokens and positions
if token_embedding is None:
token_embedding = self.embed_tokens(src_tokens)
x = embed = self.embed_scale * token_embedding
if self.embed_positions is not None:
x = embed + self.embed_positions(src_tokens)
if self.layernorm_embedding is not None:
x = self.layernorm_embedding(x)
x = self.dropout_module(x)
if self.quant_noise is not None:
x = self.quant_noise(x)
return x, embed
def forward(
self,
src_tokens,
src_lengths: Optional[torch.Tensor] = None,
return_all_hiddens: bool = False,
token_embeddings: Optional[torch.Tensor] = None,
uniformity_layers: Optional[List[int]] = None,
):
"""
Args:
src_tokens (LongTensor): tokens in the source language of shape
`(batch, src_len)`
src_lengths (torch.LongTensor): lengths of each source sentence of
shape `(batch)`
return_all_hiddens (bool, optional): also return all of the
intermediate hidden states (default: False).
token_embeddings (torch.Tensor, optional): precomputed embeddings
default `None` will recompute embeddings
Returns:
dict:
- **encoder_out** (Tensor): the last encoder layer's output of
shape `(src_len, batch, embed_dim)`
- **encoder_padding_mask** (ByteTensor): the positions of
padding elements of shape `(batch, src_len)`
- **encoder_embedding** (Tensor): the (scaled) embedding lookup
of shape `(batch, src_len, embed_dim)`
- **encoder_states** (List[Tensor]): all intermediate
hidden states of shape `(src_len, batch, embed_dim)`.
Only populated if *return_all_hiddens* is True.
"""
return self.forward_scriptable(
src_tokens, src_lengths, return_all_hiddens, token_embeddings, uniformity_layers
)
# TorchScript doesn't support super() method so that the scriptable Subclass
# can't access the base class model in Torchscript.
# Current workaround is to add a helper function with different name and
# call the helper function from scriptable Subclass.
def forward_scriptable(
self,
src_tokens,
src_lengths: Optional[torch.Tensor] = None,
return_all_hiddens: bool = False,
token_embeddings: Optional[torch.Tensor] = None,
uniformity_layers: Optional[List[int]] = None,
):
"""
Args:
src_tokens (LongTensor): tokens in the source language of shape
`(batch, src_len)`
src_lengths (torch.LongTensor): lengths of each source sentence of
shape `(batch)`
return_all_hiddens (bool, optional): also return all of the
intermediate hidden states (default: False).
token_embeddings (torch.Tensor, optional): precomputed embeddings
default `None` will recompute embeddings
Returns:
dict:
- **encoder_out** (Tensor): the last encoder layer's output of
shape `(src_len, batch, embed_dim)`
- **encoder_padding_mask** (ByteTensor): the positions of
padding elements of shape `(batch, src_len)`
- **encoder_embedding** (Tensor): the (scaled) embedding lookup
of shape `(batch, src_len, embed_dim)`
- **encoder_states** (List[Tensor]): all intermediate
hidden states of shape `(src_len, batch, embed_dim)`.
Only populated if *return_all_hiddens* is True.
"""
# compute padding mask
encoder_padding_mask = src_tokens.eq(self.padding_idx)
has_pads = src_tokens.device.type == "xla" or encoder_padding_mask.any()
x, encoder_embedding = self.forward_embedding(src_tokens, token_embeddings)
# account for padding while computing the representation
if has_pads:
x = x * (1 - encoder_padding_mask.unsqueeze(-1).type_as(x))
# B x T x C -> T x B x C
x = x.transpose(0, 1)
if self.use_rel_pos_enc:
x_len = x.shape[0]
pos_seq = torch.arange(0, x_len).long().to(x.device)
pos_seq = pos_seq[:, None] - pos_seq[None, :]
pos_k, pos_v = self.pos_emb(pos_seq)
else:
pos_k = None
encoder_states = []
uniformity_hiddens = []
if return_all_hiddens:
encoder_states.append(x)
if uniformity_layers is not None and 0 in uniformity_layers:
x = F.normalize(x.float(), dim=-1).type_as(x)
uniformity_hiddens.append(x)
# encoder layers
for i, layer in enumerate(self.layers):
x = layer(
x, encoder_padding_mask=encoder_padding_mask if has_pads else None,
pos_bias=pos_k,
)
if uniformity_layers is not None and i+1 in uniformity_layers:
x = F.normalize(x.float(), dim=-1).type_as(x)
uniformity_hiddens.append(x)
if return_all_hiddens:
assert encoder_states is not None
encoder_states.append(x)
if self.layer_norm is not None:
x = self.layer_norm(x)
# The Pytorch Mobile lite interpreter does not supports returning NamedTuple in
# `forward` so we use a dictionary instead.
# TorchScript does not support mixed values so the values are all lists.
# The empty list is equivalent to None.
src_lengths = (
src_tokens.ne(self.padding_idx)
.sum(dim=1, dtype=torch.int32)
.reshape(-1, 1)
.contiguous()
)
return {
"encoder_out": [x], # T x B x C
"encoder_padding_mask": [encoder_padding_mask], # B x T
"encoder_embedding": [encoder_embedding], # B x T x C
"encoder_states": encoder_states, # List[T x B x C]
"uniformity_hiddens": uniformity_hiddens, # List[T x B x C]
"src_tokens": [],
"src_lengths": [src_lengths],
}
def forward_torchscript(self, net_input: Dict[str, Tensor]):
"""A TorchScript-compatible version of forward.
Encoders which use additional arguments may want to override
this method for TorchScript compatibility.
"""
if torch.jit.is_scripting():
return self.forward(
src_tokens=net_input["src_tokens"],
src_lengths=net_input["src_lengths"],
)
else:
return self.forward_non_torchscript(net_input)
@torch.jit.unused
def forward_non_torchscript(self, net_input: Dict[str, Tensor]):
encoder_input = {
k: v for k, v in net_input.items() if k != "prev_output_tokens"
}
return self.forward(**encoder_input)
@torch.jit.export
def reorder_encoder_out(self, encoder_out: Dict[str, List[Tensor]], new_order):
"""
Reorder encoder output according to *new_order*.
Args:
encoder_out: output from the ``forward()`` method
new_order (LongTensor): desired order
Returns:
*encoder_out* rearranged according to *new_order*
"""
if len(encoder_out["encoder_out"]) == 0:
new_encoder_out = []
else:
new_encoder_out = [encoder_out["encoder_out"][0].index_select(1, new_order)]
if len(encoder_out["encoder_padding_mask"]) == 0:
new_encoder_padding_mask = []
else:
new_encoder_padding_mask = [
encoder_out["encoder_padding_mask"][0].index_select(0, new_order)
]
if len(encoder_out["encoder_embedding"]) == 0:
new_encoder_embedding = []
else:
new_encoder_embedding = [
encoder_out["encoder_embedding"][0].index_select(0, new_order)
]
if len(encoder_out["src_tokens"]) == 0:
src_tokens = []
else:
src_tokens = [(encoder_out["src_tokens"][0]).index_select(0, new_order)]
if len(encoder_out["src_lengths"]) == 0:
src_lengths = []
else:
src_lengths = [(encoder_out["src_lengths"][0]).index_select(0, new_order)]
encoder_states = encoder_out["encoder_states"]
if len(encoder_states) > 0:
for idx, state in enumerate(encoder_states):
encoder_states[idx] = state.index_select(1, new_order)
return {
"encoder_out": new_encoder_out, # T x B x C
"encoder_padding_mask": new_encoder_padding_mask, # B x T
"encoder_embedding": new_encoder_embedding, # B x T x C
"encoder_states": encoder_states, # List[T x B x C]
"src_tokens": src_tokens, # B x T
"src_lengths": src_lengths, # B x 1
}
def max_positions(self):
"""Maximum input length supported by the encoder."""
if self.embed_positions is None:
return self.max_source_positions
return min(self.max_source_positions, self.embed_positions.max_positions)
def upgrade_state_dict_named(self, state_dict, name):
"""Upgrade a (possibly old) state dict for new versions."""
if isinstance(self.embed_positions, SinusoidalPositionalEmbedding):
weights_key = "{}.embed_positions.weights".format(name)
if weights_key in state_dict:
print("deleting {0}".format(weights_key))
del state_dict[weights_key]
state_dict[
"{}.embed_positions._float_tensor".format(name)
] = torch.FloatTensor(1)
for i in range(self.num_layers):
# update layer norms
self.layers[i].upgrade_state_dict_named(
state_dict, "{}.layers.{}".format(name, i)
)
version_key = "{}.version".format(name)
if utils_item(state_dict.get(version_key, torch.Tensor([1]))[0]) < 2:
# earlier checkpoints did not normalize after the stack of layers
self.layer_norm = None
self.normalize = False
state_dict[version_key] = torch.Tensor([1])
return state_dict
def set_num_updates(self, num_updates):
"""State from trainer to pass along to model at every update."""
def _apply(m):
if hasattr(m, "set_num_updates") and m != self:
m.set_num_updates(num_updates)
self.apply(_apply)
class TransformerEncoderLayerBase(nn.Module):
"""Encoder layer block.
In the original paper each operation (multi-head attention or FFN) is
postprocessed with: `dropout -> add residual -> layernorm`. In the
tensor2tensor code they suggest that learning is more robust when
preprocessing each layer with layernorm and postprocessing with:
`dropout -> add residual`. We default to the approach in the paper, but the
tensor2tensor approach can be enabled by setting
*cfg.encoder.normalize_before* to ``True``.
Args:
args (argparse.Namespace): parsed command-line arguments
"""
def __init__(self, cfg, has_relative_attention_bias=False, scaling_for_att=1.0):
super().__init__()
self.cfg = cfg
self.embed_dim = cfg.encoder.embed_dim
self.quant_noise = cfg.quant_noise.pq
self.quant_noise_block_size = cfg.quant_noise.pq_block_size
self.self_attn = self.build_self_attention(self.embed_dim, cfg, has_relative_attention_bias=has_relative_attention_bias, scaling_for_att=scaling_for_att)
self.self_attn_layer_norm = LayerNorm(self.embed_dim, export=cfg.export)
self.dropout_module = FairseqDropout(
cfg.dropout, module_name=self.__class__.__name__
)
self.activation_fn = get_activation_fn(activation=cfg.activation_fn)
activation_dropout_p = cfg.activation_dropout
if activation_dropout_p == 0:
# for backwards compatibility with models that use cfg.relu_dropout
activation_dropout_p = cfg.relu_dropout or 0
self.activation_dropout_module = FairseqDropout(
float(activation_dropout_p), module_name=self.__class__.__name__
)
self.normalize_before = cfg.encoder.normalize_before
self.fc1 = self.build_fc1(
self.embed_dim,
cfg.encoder.ffn_embed_dim,
self.quant_noise,
self.quant_noise_block_size,
)
self.fc2 = self.build_fc2(
cfg.encoder.ffn_embed_dim,
self.embed_dim,
self.quant_noise,
self.quant_noise_block_size,
)
self.final_layer_norm = LayerNorm(self.embed_dim, export=cfg.export)
if has_relative_attention_bias:
self.norm_k = LayerNorm(self.embed_dim // cfg.encoder.attention_heads)
def build_fc1(self, input_dim, output_dim, q_noise, qn_block_size):
return quant_noise(
nn.Linear(input_dim, output_dim), p=q_noise, block_size=qn_block_size
)
def build_fc2(self, input_dim, output_dim, q_noise, qn_block_size):
return quant_noise(
nn.Linear(input_dim, output_dim), p=q_noise, block_size=qn_block_size
)
def build_self_attention(self, embed_dim, cfg, has_relative_attention_bias=False, scaling_for_att=1.0):
return MultiheadAttention(
embed_dim,
cfg.encoder.attention_heads,
dropout=cfg.attention_dropout,
self_attention=True,
q_noise=self.quant_noise,
qn_block_size=self.quant_noise_block_size,
has_relative_attention_bias=has_relative_attention_bias,
scaling_for_att=scaling_for_att,
)
def residual_connection(self, x, residual):
return residual + x
def upgrade_state_dict_named(self, state_dict, name):
"""
Rename layer norm states from `...layer_norms.0.weight` to
`...self_attn_layer_norm.weight` and `...layer_norms.1.weight` to
`...final_layer_norm.weight`
"""
layer_norm_map = {"0": "self_attn_layer_norm", "1": "final_layer_norm"}
for old, new in layer_norm_map.items():
for m in ("weight", "bias"):
k = "{}.layer_norms.{}.{}".format(name, old, m)
if k in state_dict:
state_dict["{}.{}.{}".format(name, new, m)] = state_dict[k]
del state_dict[k]
def forward(
self,
x,
encoder_padding_mask: Optional[Tensor],
attn_mask: Optional[Tensor] = None,
pos_bias=None,
):
"""
Args:
x (Tensor): input to the layer of shape `(seq_len, batch, embed_dim)`
encoder_padding_mask (ByteTensor): binary ByteTensor of shape
`(batch, seq_len)` where padding elements are indicated by ``1``.
attn_mask (ByteTensor): binary tensor of shape `(tgt_len, src_len)`,
where `tgt_len` is the length of output and `src_len` is the
length of input, though here both are equal to `seq_len`.
`attn_mask[tgt_i, src_j] = 1` means that when calculating the
embedding for `tgt_i`, we exclude (mask out) `src_j`. This is
useful for strided self-attention.
Returns:
encoded output of shape `(seq_len, batch, embed_dim)`
"""
# anything in original attn_mask = 1, becomes -1e8
# anything in original attn_mask = 0, becomes 0
# Note that we cannot use -inf here, because at some edge cases,
# the attention weight (before softmax) for some padded element in query
# will become -inf, which results in NaN in model parameters
if attn_mask is not None:
attn_mask = attn_mask.masked_fill(
attn_mask.to(torch.bool), -1e8 if x.dtype == torch.float32 else -1e4
)
residual = x
if self.normalize_before:
x = self.self_attn_layer_norm(x)
if pos_bias is not None:
pos_bias = self.norm_k(pos_bias)
x, _ = self.self_attn(
query=x,
key=x,
value=x,
key_padding_mask=encoder_padding_mask,
need_weights=False,
attn_mask=attn_mask,
position_bias=pos_bias,
)
x = self.dropout_module(x)
x = self.residual_connection(x, residual)
if not self.normalize_before:
x = self.self_attn_layer_norm(x)
residual = x
if self.normalize_before:
x = self.final_layer_norm(x)
x = self.activation_fn(self.fc1(x))
x = self.activation_dropout_module(x)
x = self.fc2(x)
x = self.dropout_module(x)
x = self.residual_connection(x, residual)
if not self.normalize_before:
x = self.final_layer_norm(x)
return x
class TransformerEncoder(nn.Module):
"""
wav2vec-style transformer encoder.
"""
def __init__(self, args):
super().__init__()
self.dropout = args.dropout
self.embedding_dim = args.encoder_embed_dim
self.required_seq_len_multiple = args.required_seq_len_multiple
self.pos_conv = nn.Conv1d(
self.embedding_dim,
self.embedding_dim,
kernel_size=args.conv_pos,
padding=args.conv_pos // 2,
groups=args.conv_pos_groups,
)
dropout = 0
std = math.sqrt((4 * (1.0 - dropout)) / (args.conv_pos * self.embedding_dim))
nn.init.normal_(self.pos_conv.weight, mean=0, std=std)
nn.init.constant_(self.pos_conv.bias, 0)
self.pos_conv = nn.utils.weight_norm(self.pos_conv, name="weight", dim=2)
self.pos_conv = nn.Sequential(self.pos_conv, SamePad(args.conv_pos), nn.GELU())
layers = []
self.use_rel_pos_enc = getattr(args, "use_rel_pos_enc", False)
for _ in range(args.encoder_layers):
layer = TransformerSentenceEncoderLayer(
embedding_dim=self.embedding_dim,
ffn_embedding_dim=args.encoder_ffn_embed_dim,
num_attention_heads=args.encoder_attention_heads,
dropout=self.dropout,
attention_dropout=args.attention_dropout,
activation_dropout=args.activation_dropout,
activation_fn=args.activation_fn,
layer_norm_first=args.layer_norm_first,
has_relative_attention_bias=self.use_rel_pos_enc,
scaling_for_att=getattr(args, "scaling_for_att", 1.0)
)
if args.checkpoint_activations:
raise ValueError("We don't support checkpoint_activations for now! Please set checkpoint_activations=False.")
layers.append(layer)
self.layers = nn.ModuleList(layers)
self.layer_norm_first = args.layer_norm_first
self.layer_norm = LayerNorm(self.embedding_dim)
self.layerdrop = args.encoder_layerdrop
if self.use_rel_pos_enc:
self.pos_emb = RelativePositionalEncoding(args.encoder_embed_dim // args.encoder_attention_heads, 160)
self.apply(init_bert_params)
def forward(self, x, padding_mask=None, layer=None, conv_pos=True):
x, layer_results = self.extract_features(x, padding_mask, layer, conv_pos)
if self.layer_norm_first and (layer is None or layer >= len(self.layers) - 1):
x = self.layer_norm(x)
return x, layer_results
def extract_features(self, x, padding_mask=None, tgt_layer=None, conv_pos=True):
if padding_mask is not None:
x = index_put(x, padding_mask, 0)
if conv_pos:
x_conv = self.pos_conv(x.transpose(1, 2))
x_conv = x_conv.transpose(1, 2)
x = x + x_conv
if not self.layer_norm_first:
x = self.layer_norm(x)
# pad to the sequence length dimension
x, pad_length = pad_to_multiple(
x, self.required_seq_len_multiple, dim=-2, value=0
)
if pad_length > 0 and padding_mask is None:
padding_mask = x.new_zeros((x.size(0), x.size(1)), dtype=torch.bool)
padding_mask[:, -pad_length:] = True
else:
padding_mask, _ = pad_to_multiple(
padding_mask, self.required_seq_len_multiple, dim=-1, value=True
)
x = F.dropout(x, p=self.dropout, training=self.training)
# B x T x C -> T x B x C
x = x.transpose(0, 1)
if self.use_rel_pos_enc:
x_len = x.shape[0]
pos_seq = torch.arange(0, x_len).long().to(x.device)
pos_seq = pos_seq[:, None] - pos_seq[None, :]
pos_k, pos_v = self.pos_emb(pos_seq)
else:
pos_k = None
layer_results = []
r = None
for i, layer in enumerate(self.layers):
dropout_probability = np.random.random()
if not self.training or (dropout_probability > self.layerdrop):
x, z = layer(x, self_attn_padding_mask=padding_mask, need_weights=False, pos_bias=pos_k)
if tgt_layer is not None:
# unpad if needed
if pad_length > 0:
layer_results.append(
x[:-pad_length]
# (
# x[:-pad_length],
# z[:, :-pad_length, :-pad_length]
# if z is not None
# else z,
# )
)
else:
# layer_results.append((x, z))
layer_results.append(x)
if i == tgt_layer:
r = x
break
if r is not None:
x = r
# T x B x C -> B x T x C
x = x.transpose(0, 1)
# undo paddding
if pad_length > 0:
x = x[:, :-pad_length]
return x, layer_results
def max_positions(self):
"""Maximum output length supported by the encoder."""
return self.args.max_positions
def upgrade_state_dict_named(self, state_dict, name):
"""Upgrade a (possibly old) state dict for new versions of fairseq."""
return state_dict
class TransformerSentenceEncoderLayer(nn.Module):
"""
wav2vec-style transformer layer
"""
def __init__(
self,
embedding_dim: float = 768,
ffn_embedding_dim: float = 3072,
num_attention_heads: float = 8,
dropout: float = 0.1,
attention_dropout: float = 0.1,
activation_dropout: float = 0.1,
activation_fn: str = "relu",
layer_norm_first: bool = False,
has_relative_attention_bias: bool = False,
scaling_for_att: float = 1.0,
) -> None:
super().__init__()
# Initialize parameters
self.embedding_dim = embedding_dim
self.dropout = dropout
self.activation_dropout = activation_dropout
# Initialize blocks
self.activation_fn = get_activation_fn(activation_fn)
self.self_attn = MultiheadAttention(
self.embedding_dim,
num_attention_heads,
dropout=attention_dropout,
self_attention=True,
has_relative_attention_bias=has_relative_attention_bias,
scaling_for_att=scaling_for_att
)
self.dropout1 = nn.Dropout(dropout)
self.dropout2 = nn.Dropout(self.activation_dropout)
self.dropout3 = nn.Dropout(dropout)
self.layer_norm_first = layer_norm_first
# layer norm associated with the self attention layer
self.self_attn_layer_norm = LayerNorm(self.embedding_dim)
self.fc1 = nn.Linear(self.embedding_dim, ffn_embedding_dim)
self.fc2 = nn.Linear(ffn_embedding_dim, self.embedding_dim)
# layer norm associated with the position wise feed-forward NN
self.final_layer_norm = LayerNorm(self.embedding_dim)
if has_relative_attention_bias:
self.norm_k = LayerNorm(self.embedding_dim//num_attention_heads)
def forward(
self,
x: torch.Tensor,
self_attn_mask: torch.Tensor = None,
self_attn_padding_mask: torch.Tensor = None,
need_weights: bool = False,
att_args=None,
pos_bias=None,
):
"""
LayerNorm is applied either before or after the self-attention/ffn
modules similar to the original Transformer imlementation.
"""
residual = x
if self.layer_norm_first:
x = self.self_attn_layer_norm(x)
if pos_bias is not None:
pos_bias = self.norm_k(pos_bias)
x, attn = self.self_attn(
query=x,
key=x,
value=x,
key_padding_mask=self_attn_padding_mask,
attn_mask=self_attn_mask,
position_bias=pos_bias,
)
x = self.dropout1(x)
x = residual + x
residual = x
x = self.final_layer_norm(x)
x = self.activation_fn(self.fc1(x))
x = self.dropout2(x)
x = self.fc2(x)
x = self.dropout3(x)
x = residual + x
else:
x, attn = self.self_attn(
query=x,
key=x,
value=x,
key_padding_mask=self_attn_padding_mask,
position_bias=pos_bias,
)
x = self.dropout1(x)
x = residual + x
x = self.self_attn_layer_norm(x)
residual = x
x = self.activation_fn(self.fc1(x))
x = self.dropout2(x)
x = self.fc2(x)
x = self.dropout3(x)
x = residual + x
x = self.final_layer_norm(x)
return x, attn
class FairseqDropout(nn.Module):
def __init__(self, p, module_name=None):
super().__init__()
self.p = p
self.module_name = module_name
self.apply_during_inference = False
def forward(self, x, inplace: bool = False):
if self.p > 0 and (self.training or self.apply_during_inference):
return F.dropout(x, p=self.p, training=True, inplace=inplace)
else:
return x
def make_generation_fast_(
self,
name: str,
retain_dropout: bool = False,
retain_dropout_modules: Optional[List[str]] = None,
**kwargs
):
if retain_dropout:
if retain_dropout_modules is not None and self.module_name is None:
logger.warning(
"Cannot enable dropout during inference for module {} "
"because module_name was not set".format(name)
)
elif (
retain_dropout_modules is None # if None, apply to all modules
or self.module_name in retain_dropout_modules
):
logger.info(
"Enabling dropout during inference for module: {}".format(name)
)
self.apply_during_inference = True
else:
logger.info("Disabling dropout for module: {}".format(name))
class LearnedPositionalEmbedding(nn.Embedding):
"""
This module learns positional embeddings up to a fixed maximum size.
Padding ids are ignored by either offsetting based on padding_idx
or by setting padding_idx to None and ensuring that the appropriate
position ids are passed to the forward function.
"""
def __init__(self, num_embeddings: int, embedding_dim: int, padding_idx: int):
super().__init__(num_embeddings, embedding_dim, padding_idx)
self.onnx_trace = False
if self.padding_idx is not None:
self.max_positions = self.num_embeddings - self.padding_idx - 1
else:
self.max_positions = self.num_embeddings
def forward(
self,
input: Tensor,
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None,
positions: Optional[Tensor] = None,
):
"""Input is expected to be of size [bsz x seqlen]."""
assert (positions is None) or (
self.padding_idx is None
), "If positions is pre-computed then padding_idx should not be set."
if positions is None:
if incremental_state is not None:
# positions is the same for every token when decoding a single step
# Without the int() cast, it doesn't work in some cases when exporting to ONNX
positions = torch.zeros(
(1, 1), device=input.device, dtype=input.dtype
).fill_(int(self.padding_idx + input.size(1)))
else:
positions = utils_make_positions(
input, self.padding_idx, onnx_trace=self.onnx_trace
)
positions = torch.clamp(positions, max=self.padding_idx + self.max_positions)
return F.embedding(
positions,
self.weight,
self.padding_idx,
self.max_norm,
self.norm_type,
self.scale_grad_by_freq,
self.sparse,
)
class SinusoidalPositionalEmbedding(nn.Module):
"""This module produces sinusoidal positional embeddings of any length.
Padding symbols are ignored.
"""
def __init__(self, embedding_dim, padding_idx, init_size=1024):
super().__init__()
self.embedding_dim = embedding_dim
self.padding_idx = padding_idx if padding_idx is not None else 0
self.weights = SinusoidalPositionalEmbedding.get_embedding(
init_size, embedding_dim, padding_idx
)
self.onnx_trace = False
self.register_buffer("_float_tensor", torch.FloatTensor(1))
self.max_positions = int(1e5)
def prepare_for_onnx_export_(self):
self.onnx_trace = True
@staticmethod
def get_embedding(
num_embeddings: int, embedding_dim: int, padding_idx: Optional[int] = None
):
"""Build sinusoidal embeddings.
This matches the implementation in tensor2tensor, but differs slightly
from the description in Section 3.5 of "Attention Is All You Need".
"""
half_dim = embedding_dim // 2
emb = math.log(10000) / (half_dim - 1)
emb = torch.exp(torch.arange(half_dim, dtype=torch.float) * -emb)
emb = torch.arange(num_embeddings, dtype=torch.float).unsqueeze(
1
) * emb.unsqueeze(0)
emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1).view(
num_embeddings, -1
)
if embedding_dim % 2 == 1:
# zero pad
emb = torch.cat([emb, torch.zeros(num_embeddings, 1)], dim=1)
if padding_idx is not None:
emb[padding_idx, :] = 0
return emb
def forward(
self,
input,
incremental_state: Optional[Any] = None,
timestep: Optional[Tensor] = None,
positions: Optional[Any] = None,
):
"""Input is expected to be of size [bsz x seqlen]."""
bspair = torch.onnx.operators.shape_as_tensor(input)
bsz, seq_len = bspair[0], bspair[1]
max_pos = self.padding_idx + 1 + seq_len
if self.weights is None or max_pos > self.weights.size(0):
# recompute/expand embeddings if needed
self.weights = SinusoidalPositionalEmbedding.get_embedding(
max_pos, self.embedding_dim, self.padding_idx
)
self.weights = self.weights.to(self._float_tensor)
if incremental_state is not None:
# positions is the same for every token when decoding a single step
pos = timestep.view(-1)[0] + 1 if timestep is not None else seq_len
if self.onnx_trace:
return (
self.weights.index_select(index=self.padding_idx + pos, dim=0)
.unsqueeze(1)
.repeat(bsz, 1, 1)
)
return self.weights[self.padding_idx + pos, :].expand(bsz, 1, -1)
positions = utils_make_positions(
input, self.padding_idx, onnx_trace=self.onnx_trace
)
if self.onnx_trace:
flat_embeddings = self.weights.detach().index_select(0, positions.view(-1))
embedding_shape = torch.cat(
(bsz.view(1), seq_len.view(1), torch.tensor([-1], dtype=torch.long))
)
embeddings = torch.onnx.operators.reshape_from_tensor_shape(
flat_embeddings, embedding_shape
)
return embeddings
return (
self.weights.index_select(0, positions.view(-1))
.view(bsz, seq_len, -1)
.detach()
)
try:
from apex.normalization import FusedLayerNorm as _FusedLayerNorm
has_fused_layernorm = True
class FusedLayerNorm(_FusedLayerNorm):
@torch.jit.unused
def forward(self, x):
if not x.is_cuda:
return super().forward(x)
else:
with torch.cuda.device(x.device):
return super().forward(x)
except ImportError:
has_fused_layernorm = False
class Fp32LayerNorm(nn.LayerNorm):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def forward(self, input):
output = F.layer_norm(
input.float(),
self.normalized_shape,
self.weight.float() if self.weight is not None else None,
self.bias.float() if self.bias is not None else None,
self.eps,
)
return output.type_as(input)
class LayerDropModuleList(nn.ModuleList):
"""
A LayerDrop implementation based on :class:`torch.nn.ModuleList`.
We refresh the choice of which layers to drop every time we iterate
over the LayerDropModuleList instance. During evaluation we always
iterate over all layers.
Usage::
layers = LayerDropList(p=0.5, modules=[layer1, layer2, layer3])
for layer in layers: # this might iterate over layers 1 and 3
x = layer(x)
for layer in layers: # this might iterate over all layers
x = layer(x)
for layer in layers: # this might not iterate over any layers
x = layer(x)
Args:
p (float): probability of dropping out each layer
modules (iterable, optional): an iterable of modules to add
"""
def __init__(self, p, modules=None):
super().__init__(modules)
self.p = p
def __iter__(self):
dropout_probs = torch.empty(len(self)).uniform_()
for i, m in enumerate(super().__iter__()):
if not self.training or (dropout_probs[i] > self.p):
yield m
class RelativePositionalEncoding(torch.nn.Module):
def __init__(self, d_model, maxlen=1000, embed_v=False):
super(RelativePositionalEncoding, self).__init__()
self.d_model = d_model
self.maxlen = maxlen
self.pe_k = torch.nn.Embedding(2*maxlen, d_model)
if embed_v:
self.pe_v = torch.nn.Embedding(2*maxlen, d_model)
self.embed_v = embed_v
def forward(self, pos_seq, incremental_state=None):
pos_seq[pos_seq < -self.maxlen] = -self.maxlen
pos_seq[pos_seq >= self.maxlen] = self.maxlen - 1
pos_seq = pos_seq + self.maxlen
if incremental_state is not None:
pos_seq = pos_seq[-1:]
if self.embed_v:
return self.pe_k(pos_seq), self.pe_v(pos_seq)
else:
return self.pe_k(pos_seq), None
class MultiheadAttention(nn.Module):
"""Multi-headed attention.
See "Attention Is All You Need" for more details.
"""
def __init__(
self,
embed_dim,
num_heads,
kdim=None,
vdim=None,
dropout=0.0,
bias=True,
add_bias_kv=False,
add_zero_attn=False,
self_attention=False,
encoder_decoder_attention=False,
q_noise=0.0,
qn_block_size=8,
has_relative_attention_bias=False,
scaling_for_att=1.0
):
super().__init__()
self.embed_dim = embed_dim
self.kdim = kdim if kdim is not None else embed_dim
self.vdim = vdim if vdim is not None else embed_dim
self.qkv_same_dim = self.kdim == embed_dim and self.vdim == embed_dim
self.num_heads = num_heads
self.dropout_module = FairseqDropout(
dropout, module_name=self.__class__.__name__
)
self.has_relative_attention_bias = has_relative_attention_bias
self.head_dim = embed_dim // num_heads
assert (
self.head_dim * num_heads == self.embed_dim
), "embed_dim must be divisible by num_heads"
self.scaling = self.head_dim ** -0.5
self.scaling_for_att = scaling_for_att
self.self_attention = self_attention
self.encoder_decoder_attention = encoder_decoder_attention
assert not self.self_attention or self.qkv_same_dim, (
"Self-attention requires query, key and " "value to be of the same size"
)
self.k_proj = quant_noise(
nn.Linear(self.kdim, embed_dim, bias=bias), q_noise, qn_block_size
)
self.v_proj = quant_noise(
nn.Linear(self.vdim, embed_dim, bias=bias), q_noise, qn_block_size
)
self.q_proj = quant_noise(
nn.Linear(embed_dim, embed_dim, bias=bias), q_noise, qn_block_size
)
self.out_proj = quant_noise(
nn.Linear(embed_dim, embed_dim, bias=bias), q_noise, qn_block_size
)
if add_bias_kv:
self.bias_k = Parameter(torch.Tensor(1, 1, embed_dim))
self.bias_v = Parameter(torch.Tensor(1, 1, embed_dim))
else:
self.bias_k = self.bias_v = None
self.add_zero_attn = add_zero_attn
self.reset_parameters()
self.onnx_trace = False
def prepare_for_onnx_export_(self):
self.onnx_trace = True
def reset_parameters(self):
if self.qkv_same_dim:
# Empirically observed the convergence to be much better with
# the scaled initialization
nn.init.xavier_uniform_(self.k_proj.weight, gain=1 / math.sqrt(2))
nn.init.xavier_uniform_(self.v_proj.weight, gain=1 / math.sqrt(2))
nn.init.xavier_uniform_(self.q_proj.weight, gain=1 / math.sqrt(2))
else:
nn.init.xavier_uniform_(self.k_proj.weight)
nn.init.xavier_uniform_(self.v_proj.weight)
nn.init.xavier_uniform_(self.q_proj.weight)
nn.init.xavier_uniform_(self.out_proj.weight)
if self.out_proj.bias is not None:
nn.init.constant_(self.out_proj.bias, 0.0)
if self.bias_k is not None:
nn.init.xavier_normal_(self.bias_k)
if self.bias_v is not None:
nn.init.xavier_normal_(self.bias_v)
def forward(
self,
query,
key: Optional[Tensor],
value: Optional[Tensor],
key_padding_mask: Optional[Tensor] = None,
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None,
need_weights: bool = True,
static_kv: bool = False,
attn_mask: Optional[Tensor] = None,
before_softmax: bool = False,
need_head_weights: bool = False,
position_bias: Optional[Tensor] = None
) -> Tuple[Tensor, Optional[Tensor]]:
"""Input shape: Time x Batch x Channel
Args:
key_padding_mask (ByteTensor, optional): mask to exclude
keys that are pads, of shape `(batch, src_len)`, where
padding elements are indicated by 1s.
need_weights (bool, optional): return the attention weights,
averaged over heads (default: False).
attn_mask (ByteTensor, optional): typically used to
implement causal attention, where the mask prevents the
attention from looking forward in time (default: None).
before_softmax (bool, optional): return the raw attention
weights and values before the attention softmax.
need_head_weights (bool, optional): return the attention
weights for each head. Implies *need_weights*. Default:
return the average attention weights over all heads.
"""
if need_head_weights:
need_weights = True
is_tpu = query.device.type == "xla"
tgt_len, bsz, embed_dim = query.size()
src_len = tgt_len
assert embed_dim == self.embed_dim, f"query dim {embed_dim} != {self.embed_dim}"
assert list(query.size()) == [tgt_len, bsz, embed_dim]
if key is not None:
src_len, key_bsz, _ = key.size()
if not torch.jit.is_scripting():
assert key_bsz == bsz
assert value is not None
assert src_len, bsz == value.shape[:2]
if (
not self.onnx_trace
and not is_tpu # don't use PyTorch version on TPUs
and incremental_state is None
and not static_kv
# A workaround for quantization to work. Otherwise JIT compilation
# treats bias in linear module as method.
and not torch.jit.is_scripting()
and not self.has_relative_attention_bias
):
assert key is not None and value is not None
return F.multi_head_attention_forward(
query,
key,
value,
self.embed_dim,
self.num_heads,
torch.empty([0]),
torch.cat((self.q_proj.bias, self.k_proj.bias, self.v_proj.bias)),
self.bias_k,
self.bias_v,
self.add_zero_attn,
self.dropout_module.p,
self.out_proj.weight,
self.out_proj.bias,
self.training or self.dropout_module.apply_during_inference,
key_padding_mask,
need_weights,
attn_mask,
use_separate_proj_weight=True,
q_proj_weight=self.q_proj.weight,
k_proj_weight=self.k_proj.weight,
v_proj_weight=self.v_proj.weight,
)
if incremental_state is not None:
saved_state = self._get_input_buffer(incremental_state)
if saved_state is not None and "prev_key" in saved_state:
# previous time steps are cached - no need to recompute
# key and value if they are static
if static_kv:
assert self.encoder_decoder_attention and not self.self_attention
key = value = None
else:
saved_state = None
if self.self_attention:
q = self.q_proj(query)
k = self.k_proj(query)
v = self.v_proj(query)
elif self.encoder_decoder_attention:
# encoder-decoder attention
q = self.q_proj(query)
if key is None:
assert value is None
k = v = None
else:
k = self.k_proj(key)
v = self.v_proj(key)
else:
assert key is not None and value is not None
q = self.q_proj(query)
k = self.k_proj(key)
v = self.v_proj(value)
q *= self.scaling
q *= (1 / self.scaling_for_att)
if self.bias_k is not None:
assert self.bias_v is not None
k = torch.cat([k, self.bias_k.repeat(1, bsz, 1)])
v = torch.cat([v, self.bias_v.repeat(1, bsz, 1)])
if attn_mask is not None:
attn_mask = torch.cat(
[attn_mask, attn_mask.new_zeros(attn_mask.size(0), 1)], dim=1
)
if key_padding_mask is not None:
key_padding_mask = torch.cat(
[
key_padding_mask,
key_padding_mask.new_zeros(key_padding_mask.size(0), 1),
],
dim=1,
)
q = (
q.contiguous()
.view(tgt_len, bsz * self.num_heads, self.head_dim)
.transpose(0, 1)
)
if k is not None:
k = (
k.contiguous()
.view(-1, bsz * self.num_heads, self.head_dim)
.transpose(0, 1)
)
if v is not None:
v = (
v.contiguous()
.view(-1, bsz * self.num_heads, self.head_dim)
.transpose(0, 1)
)
if saved_state is not None:
# saved states are stored with shape (bsz, num_heads, seq_len, head_dim)
if "prev_key" in saved_state:
_prev_key = saved_state["prev_key"]
assert _prev_key is not None
prev_key = _prev_key.view(bsz * self.num_heads, -1, self.head_dim)
if static_kv:
k = prev_key
else:
assert k is not None
k = torch.cat([prev_key, k], dim=1)
src_len = k.size(1)
if "prev_value" in saved_state:
_prev_value = saved_state["prev_value"]
assert _prev_value is not None
prev_value = _prev_value.view(bsz * self.num_heads, -1, self.head_dim)
if static_kv:
v = prev_value
else:
assert v is not None
v = torch.cat([prev_value, v], dim=1)
prev_key_padding_mask: Optional[Tensor] = None
if "prev_key_padding_mask" in saved_state:
prev_key_padding_mask = saved_state["prev_key_padding_mask"]
assert k is not None and v is not None
key_padding_mask = MultiheadAttention._append_prev_key_padding_mask(
key_padding_mask=key_padding_mask,
prev_key_padding_mask=prev_key_padding_mask,
batch_size=bsz,
src_len=k.size(1),
static_kv=static_kv,
)
saved_state["prev_key"] = k.view(bsz, self.num_heads, -1, self.head_dim)
saved_state["prev_value"] = v.view(bsz, self.num_heads, -1, self.head_dim)
saved_state["prev_key_padding_mask"] = key_padding_mask
# In this branch incremental_state is never None
assert incremental_state is not None
incremental_state = self._set_input_buffer(incremental_state, saved_state)
assert k is not None
assert k.size(1) == src_len
# This is part of a workaround to get around fork/join parallelism
# not supporting Optional types.
if key_padding_mask is not None and key_padding_mask.dim() == 0:
key_padding_mask = None
if key_padding_mask is not None:
assert key_padding_mask.size(0) == bsz
assert key_padding_mask.size(1) == src_len
if self.add_zero_attn:
assert v is not None
src_len += 1
k = torch.cat([k, k.new_zeros((k.size(0), 1) + k.size()[2:])], dim=1)
v = torch.cat([v, v.new_zeros((v.size(0), 1) + v.size()[2:])], dim=1)
if attn_mask is not None:
attn_mask = torch.cat(
[attn_mask, attn_mask.new_zeros(attn_mask.size(0), 1)], dim=1
)
if key_padding_mask is not None:
key_padding_mask = torch.cat(
[
key_padding_mask,
torch.zeros(key_padding_mask.size(0), 1).type_as(
key_padding_mask
),
],
dim=1,
)
attn_weights = torch.bmm(q, k.transpose(1, 2))
attn_weights = self.apply_sparse_mask(attn_weights, tgt_len, src_len, bsz)
if position_bias is not None: ## first order
## position_bias: [241, 241, 64]
#print ("attn_weights: ", attn_weights.size()) # [492, 241, 241]
reshape_q = q.contiguous().view(bsz * self.num_heads, -1, self.head_dim).transpose(0,1) #[241, 492, 64]
#print ("reshape_q: ", reshape_q.size())
B = torch.matmul(reshape_q, position_bias.transpose(-2, -1))
#print ("B: ", B.size()) ## [241, 492, 241]
#B = B.transpose(0, 1).view(bsz, self.num_heads, position_bias.size(0), position_bias.size(1))
B = B.transpose(0, 1).view(bsz*self.num_heads, position_bias.size(0), position_bias.size(1))
#print ("B 2: ", B.size())
attn_weights += B
attn_weights *= self.scaling_for_att
assert list(attn_weights.size()) == [bsz * self.num_heads, tgt_len, src_len]
if attn_mask is not None:
attn_mask = attn_mask.unsqueeze(0)
if self.onnx_trace:
attn_mask = attn_mask.repeat(attn_weights.size(0), 1, 1)
attn_weights += attn_mask
if key_padding_mask is not None:
# don't attend to padding symbols
attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
if not is_tpu:
attn_weights = attn_weights.masked_fill(
key_padding_mask.unsqueeze(1).unsqueeze(2).to(torch.bool),
float("-inf"),
)
else:
attn_weights = attn_weights.transpose(0, 2)
attn_weights = attn_weights.masked_fill(key_padding_mask, float("-inf"))
attn_weights = attn_weights.transpose(0, 2)
attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
if self.scaling_for_att > 1.0:
attn_weights = attn_weights - attn_weights.detach().max(dim=-1, keepdim=True)[0]
if before_softmax:
return attn_weights, v
attn_weights_float = softmax(
attn_weights, dim=-1, onnx_trace=self.onnx_trace
)
attn_weights = attn_weights_float.type_as(attn_weights)
attn_probs = self.dropout_module(attn_weights)
assert v is not None
attn = torch.bmm(attn_probs, v)
assert list(attn.size()) == [bsz * self.num_heads, tgt_len, self.head_dim]
if self.onnx_trace and attn.size(1) == 1:
# when ONNX tracing a single decoder step (sequence length == 1)
# the transpose is a no-op copy before view, thus unnecessary
attn = attn.contiguous().view(tgt_len, bsz, embed_dim)
else:
attn = attn.transpose(0, 1).contiguous().view(tgt_len, bsz, embed_dim)
attn = self.out_proj(attn)
attn_weights: Optional[Tensor] = None
if need_weights:
attn_weights = attn_weights_float.view(
bsz, self.num_heads, tgt_len, src_len
).transpose(1, 0)
if not need_head_weights:
# average attention weights over heads
attn_weights = attn_weights.mean(dim=0)
return attn, attn_weights
@staticmethod
def _append_prev_key_padding_mask(
key_padding_mask: Optional[Tensor],
prev_key_padding_mask: Optional[Tensor],
batch_size: int,
src_len: int,
static_kv: bool,
) -> Optional[Tensor]:
# saved key padding masks have shape (bsz, seq_len)
if prev_key_padding_mask is not None and static_kv:
new_key_padding_mask = prev_key_padding_mask
elif prev_key_padding_mask is not None and key_padding_mask is not None:
new_key_padding_mask = torch.cat(
[prev_key_padding_mask.float(), key_padding_mask.float()], dim=1
)
# During incremental decoding, as the padding token enters and
# leaves the frame, there will be a time when prev or current
# is None
elif prev_key_padding_mask is not None:
if src_len > prev_key_padding_mask.size(1):
filler = torch.zeros(
(batch_size, src_len - prev_key_padding_mask.size(1)),
device=prev_key_padding_mask.device,
)
new_key_padding_mask = torch.cat(
[prev_key_padding_mask.float(), filler.float()], dim=1
)
else:
new_key_padding_mask = prev_key_padding_mask.float()
elif key_padding_mask is not None:
if src_len > key_padding_mask.size(1):
filler = torch.zeros(
(batch_size, src_len - key_padding_mask.size(1)),
device=key_padding_mask.device,
)
new_key_padding_mask = torch.cat(
[filler.float(), key_padding_mask.float()], dim=1
)
else:
new_key_padding_mask = key_padding_mask.float()
else:
new_key_padding_mask = prev_key_padding_mask
return new_key_padding_mask
@torch.jit.export
def reorder_incremental_state(
self,
incremental_state: Dict[str, Dict[str, Optional[Tensor]]],
new_order: Tensor,
):
"""Reorder buffered internal state (for incremental generation)."""
input_buffer = self._get_input_buffer(incremental_state)
if input_buffer is not None:
for k in input_buffer.keys():
input_buffer_k = input_buffer[k]
if input_buffer_k is not None:
if self.encoder_decoder_attention and input_buffer_k.size(
0
) == new_order.size(0):
break
input_buffer[k] = input_buffer_k.index_select(0, new_order)
incremental_state = self._set_input_buffer(incremental_state, input_buffer)
return incremental_state
def _get_input_buffer(
self, incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]]
) -> Dict[str, Optional[Tensor]]:
result = self.get_incremental_state(incremental_state, "attn_state")
if result is not None:
return result
else:
empty_result: Dict[str, Optional[Tensor]] = {}
return empty_result
def _set_input_buffer(
self,
incremental_state: Dict[str, Dict[str, Optional[Tensor]]],
buffer: Dict[str, Optional[Tensor]],
):
return self.set_incremental_state(incremental_state, "attn_state", buffer)
def apply_sparse_mask(self, attn_weights, tgt_len: int, src_len: int, bsz: int):
return attn_weights
def upgrade_state_dict_named(self, state_dict, name):
prefix = name + "." if name != "" else ""
items_to_add = {}
keys_to_remove = []
for k in state_dict.keys():
if k.endswith(prefix + "in_proj_weight"):
# in_proj_weight used to be q + k + v with same dimensions
dim = int(state_dict[k].shape[0] / 3)
items_to_add[prefix + "q_proj.weight"] = state_dict[k][:dim]
items_to_add[prefix + "k_proj.weight"] = state_dict[k][dim : 2 * dim]
items_to_add[prefix + "v_proj.weight"] = state_dict[k][2 * dim :]
keys_to_remove.append(k)
k_bias = prefix + "in_proj_bias"
if k_bias in state_dict.keys():
dim = int(state_dict[k].shape[0] / 3)
items_to_add[prefix + "q_proj.bias"] = state_dict[k_bias][:dim]
items_to_add[prefix + "k_proj.bias"] = state_dict[k_bias][
dim : 2 * dim
]
items_to_add[prefix + "v_proj.bias"] = state_dict[k_bias][2 * dim :]
keys_to_remove.append(prefix + "in_proj_bias")
for k in keys_to_remove:
del state_dict[k]
for key, value in items_to_add.items():
state_dict[key] = value
class ConvFeatureExtractionModel(nn.Module):
def __init__(
self,
conv_layers: List[Tuple[int, int, int]],
dropout: float = 0.0,
mode: str = "default",
conv_bias: bool = False,
):
super().__init__()
assert mode in {"default", "layer_norm"}
def block(
n_in,
n_out,
k,
stride,
is_layer_norm=False,
is_group_norm=False,
conv_bias=False,
):
def make_conv():
conv = nn.Conv1d(n_in, n_out, k, stride=stride, bias=conv_bias)
nn.init.kaiming_normal_(conv.weight)
return conv
assert (
is_layer_norm and is_group_norm
) == False, "layer norm and group norm are exclusive"
if is_layer_norm:
return nn.Sequential(
make_conv(),
nn.Dropout(p=dropout),
nn.Sequential(
TransposeLast(),
Fp32LayerNorm(dim, elementwise_affine=True),
TransposeLast(),
),
nn.GELU(),
)
elif is_group_norm:
return nn.Sequential(
make_conv(),
nn.Dropout(p=dropout),
Fp32GroupNorm(dim, dim, affine=True),
nn.GELU(),
)
else:
return nn.Sequential(make_conv(), nn.Dropout(p=dropout), nn.GELU())
in_d = 1
self.conv_layers = nn.ModuleList()
for i, cl in enumerate(conv_layers):
assert len(cl) == 3, "invalid conv definition: " + str(cl)
(dim, k, stride) = cl
self.conv_layers.append(
block(
in_d,
dim,
k,
stride,
is_layer_norm=mode == "layer_norm",
is_group_norm=mode == "default" and i == 0,
conv_bias=conv_bias,
)
)
in_d = dim
def forward(self, x):
# BxT -> BxCxT
x = x.unsqueeze(1)
for conv in self.conv_layers:
x = conv(x)
return x
class TransposeLast(nn.Module):
def __init__(self, deconstruct_idx=None):
super().__init__()
self.deconstruct_idx = deconstruct_idx
def forward(self, x):
if self.deconstruct_idx is not None:
x = x[self.deconstruct_idx]
return x.transpose(-2, -1)
class Fp32GroupNorm(nn.GroupNorm):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def forward(self, input):
output = F.group_norm(
input.float(),
self.num_groups,
self.weight.float() if self.weight is not None else None,
self.bias.float() if self.bias is not None else None,
self.eps,
)
return output.type_as(input)
class GradMultiply(torch.autograd.Function):
@staticmethod
def forward(ctx, x, scale):
ctx.scale = scale
res = x.new(x)
return res
@staticmethod
def backward(ctx, grad):
return grad * ctx.scale, None
class Rotate3D(nn.Module):
"""
(T, B, D) --> (B, D, T) --> (D, T, B) --> (T, B, D)
"""
def __init__(self):
super().__init__()
def forward(self, x):
return x.permute(1, 2, 0)
class SamePad(nn.Module):
def __init__(self, kernel_size, causal=False):
super().__init__()
if causal:
self.remove = kernel_size - 1
else:
self.remove = 1 if kernel_size % 2 == 0 else 0
def forward(self, x):
if self.remove > 0:
x = x[:, :, : -self.remove]
return x
| EXA-1-master | exa/models/unilm-master/speechlm/modules.py |
from . import data, tasks, criterions, models
| EXA-1-master | exa/models/unilm-master/speechlm/speechlm/__init__.py |
# ----------------------------------------------------------------------------
# SpeechLM: Enhanced Speech Pre-Training with Unpaired Textual Data (https://arxiv.org/abs/2209.15329)
# Github source: https://github.com/microsoft/SpeechT5/tree/main/SpeechLM
# Code based on fairseq: https://github.com/facebookresearch/fairseq/tree/272c4c5197250997148fb12c0db6306035f166a4
#
# Copyright (c) 2022 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# ----------------------------------------------------------------------------
"""
Modified form: https://github.com/facebookresearch/fairseq/blob/272c4c5197250997148fb12c0db6306035f166a4/fairseq/sequence_generator.py
"""
import torch
import numpy as np
from fairseq.data.audio.speech_to_text_dataset import S2TDataConfig
from fairseq.speech_generator import SpeechGenerator
class NonAutoregressiveUnitGenerator(SpeechGenerator):
@torch.no_grad()
def generate(self, model, sample, has_targ=False, **kwargs):
model.eval()
bsz, max_src_len = sample["net_input"]["src_tokens"].size()
n_frames_per_step = model.encoder.n_frames_per_step
out_dim = model.encoder.out_dim
raw_dim = out_dim // n_frames_per_step
logit, logit_post, out_lens, log_dur_out, _, _ = model(
src_tokens=sample["net_input"]["src_tokens"],
src_lengths=sample["net_input"]["src_lengths"],
speaker=sample["speaker"],
durations=sample["durations"],
pitches=sample["pitches"],
energies=sample["energies"],
)
if logit_post is not None:
logit = logit_post
logit = logit.view(bsz, -1, raw_dim)
pred = logit.argmax(dim=-1)
## get duration prediction
src_tokens = sample["net_input"]["src_tokens"]
src_lengths = sample["net_input"]["src_lengths"]
padding_mask = src_tokens.eq(model.encoder.padding_idx)
d_factor = 1.0 ## set by model
dur_out = torch.clamp(
torch.round((torch.exp(log_dur_out) - 1) * d_factor).long(), min=0
)
dur_out.masked_fill_(padding_mask, 0)
x = src_tokens.unsqueeze(-1)
x, src_out_lens = model.encoder.var_adaptor.length_regulator(x, dur_out)
fa_src_tokens = x.view(bsz, -1)
finalized = [
{
"unit": pred[b, :l],
"fa_src": fa_src_tokens[b, :l],
"duration": dur_out[b, :L],
}
for b, l, L in zip(range(bsz), out_lens, src_lengths)
]
return finalized
| EXA-1-master | exa/models/unilm-master/speechlm/speechlm/unit_generator.py |
# ----------------------------------------------------------------------------
# SpeechLM: Enhanced Speech Pre-Training with Unpaired Textual Data (https://arxiv.org/abs/2209.15329)
# Github source: https://github.com/microsoft/SpeechT5/tree/main/SpeechLM
# Code based on fairseq: https://github.com/facebookresearch/fairseq/tree/272c4c5197250997148fb12c0db6306035f166a4
#
# Copyright (c) 2022 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# ----------------------------------------------------------------------------
"""
Modified form: https://github.com/facebookresearch/fairseq/blob/272c4c5197250997148fb12c0db6306035f166a4/examples/speech_recognition/new/infer.py
1. add "utils.import_user_module(cfg.common)" so that usr-dir can be loaded
"""
import ast
import hashlib
import logging
import os
import shutil
import sys
from dataclasses import dataclass, field, is_dataclass
from pathlib import Path
from typing import Any, Dict, List, Optional, Tuple, Union
import editdistance
import torch
import torch.distributed as dist
import examples
from examples.speech_recognition.new.decoders.decoder_config import (
DecoderConfig,
FlashlightDecoderConfig,
)
from examples.speech_recognition.new.decoders.decoder import Decoder
from fairseq import checkpoint_utils, distributed_utils, progress_bar, tasks, utils
from fairseq.data.data_utils import post_process
from fairseq.dataclass.configs import (
CheckpointConfig,
CommonConfig,
CommonEvalConfig,
DatasetConfig,
DistributedTrainingConfig,
FairseqDataclass,
)
from fairseq.logging.meters import StopwatchMeter, TimeMeter
from fairseq.logging.progress_bar import BaseProgressBar
from fairseq.models.fairseq_model import FairseqModel
from omegaconf import OmegaConf
import hydra
from hydra.core.config_store import ConfigStore
logging.root.setLevel(logging.INFO)
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
config_path = Path(examples.speech_recognition.new.__path__[0]).resolve() / "conf"
@dataclass
class DecodingConfig(DecoderConfig, FlashlightDecoderConfig):
unique_wer_file: bool = field(
default=False,
metadata={"help": "If set, use a unique file for storing WER"},
)
results_path: Optional[str] = field(
default=None,
metadata={
"help": "If set, write hypothesis and reference sentences into this directory"
},
)
@dataclass
class InferConfig(FairseqDataclass):
task: Any = None
decoding: DecodingConfig = DecodingConfig()
common: CommonConfig = CommonConfig()
common_eval: CommonEvalConfig = CommonEvalConfig()
checkpoint: CheckpointConfig = CheckpointConfig()
distributed_training: DistributedTrainingConfig = DistributedTrainingConfig()
dataset: DatasetConfig = DatasetConfig()
is_ax: bool = field(
default=False,
metadata={
"help": "if true, assumes we are using ax for tuning and returns a tuple for ax to consume"
},
)
def reset_logging():
root = logging.getLogger()
for handler in root.handlers:
root.removeHandler(handler)
root.setLevel(os.environ.get("LOGLEVEL", "INFO").upper())
handler = logging.StreamHandler(sys.stdout)
handler.setFormatter(
logging.Formatter(
fmt="%(asctime)s | %(levelname)s | %(name)s | %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
)
)
root.addHandler(handler)
class InferenceProcessor:
cfg: InferConfig
def __init__(self, cfg: InferConfig) -> None:
self.cfg = cfg
self.task = tasks.setup_task(cfg.task)
models, saved_cfg = self.load_model_ensemble()
self.models = models
self.saved_cfg = saved_cfg
self.tgt_dict = self.task.target_dictionary
self.task.load_dataset(
self.cfg.dataset.gen_subset,
task_cfg=saved_cfg.task,
)
self.generator = Decoder(cfg.decoding, self.tgt_dict)
self.gen_timer = StopwatchMeter()
self.wps_meter = TimeMeter()
self.num_sentences = 0
self.total_errors = 0
self.total_length = 0
self.hypo_words_file = None
self.hypo_units_file = None
self.ref_words_file = None
self.ref_units_file = None
self.progress_bar = self.build_progress_bar()
def __enter__(self) -> "InferenceProcessor":
if self.cfg.decoding.results_path is not None:
self.hypo_words_file = self.get_res_file("hypo.word")
self.hypo_units_file = self.get_res_file("hypo.units")
self.ref_words_file = self.get_res_file("ref.word")
self.ref_units_file = self.get_res_file("ref.units")
return self
def __exit__(self, *exc) -> bool:
if self.cfg.decoding.results_path is not None:
self.hypo_words_file.close()
self.hypo_units_file.close()
self.ref_words_file.close()
self.ref_units_file.close()
return False
def __iter__(self) -> Any:
for sample in self.progress_bar:
if not self.cfg.common.cpu:
sample = utils.move_to_cuda(sample)
# Happens on the last batch.
if "net_input" not in sample:
continue
yield sample
def log(self, *args, **kwargs):
self.progress_bar.log(*args, **kwargs)
def print(self, *args, **kwargs):
self.progress_bar.print(*args, **kwargs)
def get_res_file(self, fname: str) -> None:
fname = os.path.join(self.cfg.decoding.results_path, fname)
if self.data_parallel_world_size > 1:
fname = f"{fname}.{self.data_parallel_rank}"
return open(fname, "w", buffering=1)
def merge_shards(self) -> None:
"""Merges all shard files into shard 0, then removes shard suffix."""
shard_id = self.data_parallel_rank
num_shards = self.data_parallel_world_size
if self.data_parallel_world_size > 1:
def merge_shards_with_root(fname: str) -> None:
fname = os.path.join(self.cfg.decoding.results_path, fname)
logger.info("Merging %s on shard %d", fname, shard_id)
base_fpath = Path(f"{fname}.0")
with open(base_fpath, "a") as out_file:
for s in range(1, num_shards):
shard_fpath = Path(f"{fname}.{s}")
with open(shard_fpath, "r") as in_file:
for line in in_file:
out_file.write(line)
shard_fpath.unlink()
shutil.move(f"{fname}.0", fname)
dist.barrier() # ensure all shards finished writing
if shard_id == (0 % num_shards):
merge_shards_with_root("hypo.word")
if shard_id == (1 % num_shards):
merge_shards_with_root("hypo.units")
if shard_id == (2 % num_shards):
merge_shards_with_root("ref.word")
if shard_id == (3 % num_shards):
merge_shards_with_root("ref.units")
dist.barrier()
def optimize_model(self, model: FairseqModel) -> None:
model.make_generation_fast_()
if self.cfg.common.fp16:
model.half()
if not self.cfg.common.cpu:
model.cuda()
def load_model_ensemble(self) -> Tuple[List[FairseqModel], FairseqDataclass]:
arg_overrides = ast.literal_eval(self.cfg.common_eval.model_overrides)
models, saved_cfg = checkpoint_utils.load_model_ensemble(
utils.split_paths(self.cfg.common_eval.path, separator="\\"),
arg_overrides=arg_overrides,
task=self.task,
suffix=self.cfg.checkpoint.checkpoint_suffix,
strict=(self.cfg.checkpoint.checkpoint_shard_count == 1),
num_shards=self.cfg.checkpoint.checkpoint_shard_count,
)
for model in models:
self.optimize_model(model)
return models, saved_cfg
def get_dataset_itr(self, disable_iterator_cache: bool = False) -> None:
return self.task.get_batch_iterator(
dataset=self.task.dataset(self.cfg.dataset.gen_subset),
max_tokens=self.cfg.dataset.max_tokens,
max_sentences=self.cfg.dataset.batch_size,
max_positions=(sys.maxsize, sys.maxsize),
ignore_invalid_inputs=self.cfg.dataset.skip_invalid_size_inputs_valid_test,
required_batch_size_multiple=self.cfg.dataset.required_batch_size_multiple,
seed=self.cfg.common.seed,
num_shards=self.data_parallel_world_size,
shard_id=self.data_parallel_rank,
num_workers=self.cfg.dataset.num_workers,
data_buffer_size=self.cfg.dataset.data_buffer_size,
disable_iterator_cache=disable_iterator_cache,
).next_epoch_itr(shuffle=False)
def build_progress_bar(
self,
epoch: Optional[int] = None,
prefix: Optional[str] = None,
default_log_format: str = "tqdm",
) -> BaseProgressBar:
return progress_bar.progress_bar(
iterator=self.get_dataset_itr(),
log_format=self.cfg.common.log_format,
log_interval=self.cfg.common.log_interval,
epoch=epoch,
prefix=prefix,
tensorboard_logdir=self.cfg.common.tensorboard_logdir,
default_log_format=default_log_format,
)
@property
def data_parallel_world_size(self):
if self.cfg.distributed_training.distributed_world_size == 1:
return 1
return distributed_utils.get_data_parallel_world_size()
@property
def data_parallel_rank(self):
if self.cfg.distributed_training.distributed_world_size == 1:
return 0
return distributed_utils.get_data_parallel_rank()
def process_sentence(
self,
sample: Dict[str, Any],
hypo: Dict[str, Any],
sid: int,
batch_id: int,
) -> Tuple[int, int]:
speaker = None # Speaker can't be parsed from dataset.
if "target_label" in sample:
toks = sample["target_label"]
else:
toks = sample["target"]
toks = toks[batch_id, :]
# Processes hypothesis.
hyp_pieces = self.tgt_dict.string(hypo["tokens"].int().cpu())
if "words" in hypo:
hyp_words = " ".join(hypo["words"])
else:
hyp_words = post_process(hyp_pieces, self.cfg.common_eval.post_process)
# Processes target.
target_tokens = utils.strip_pad(toks, self.tgt_dict.pad())
tgt_pieces = self.tgt_dict.string(target_tokens.int().cpu())
tgt_words = post_process(tgt_pieces, self.cfg.common_eval.post_process)
if self.cfg.decoding.results_path is not None:
print(f"{hyp_pieces} ({speaker}-{sid})", file=self.hypo_units_file)
print(f"{hyp_words} ({speaker}-{sid})", file=self.hypo_words_file)
print(f"{tgt_pieces} ({speaker}-{sid})", file=self.ref_units_file)
print(f"{tgt_words} ({speaker}-{sid})", file=self.ref_words_file)
if not self.cfg.common_eval.quiet:
logger.info(f"HYPO: {hyp_words}")
logger.info(f"REF: {tgt_words}")
logger.info("---------------------")
hyp_words, tgt_words = hyp_words.split(), tgt_words.split()
return editdistance.eval(hyp_words, tgt_words), len(tgt_words)
def process_sample(self, sample: Dict[str, Any]) -> None:
self.gen_timer.start()
hypos = self.task.inference_step(
generator=self.generator,
models=self.models,
sample=sample,
)
num_generated_tokens = sum(len(h[0]["tokens"]) for h in hypos)
self.gen_timer.stop(num_generated_tokens)
self.wps_meter.update(num_generated_tokens)
for batch_id, sample_id in enumerate(sample["id"].tolist()):
errs, length = self.process_sentence(
sample=sample,
sid=sample_id,
batch_id=batch_id,
hypo=hypos[batch_id][0],
)
self.total_errors += errs
self.total_length += length
self.log({"wps": round(self.wps_meter.avg)})
if "nsentences" in sample:
self.num_sentences += sample["nsentences"]
else:
self.num_sentences += sample["id"].numel()
def log_generation_time(self) -> None:
logger.info(
"Processed %d sentences (%d tokens) in %.1fs %.2f "
"sentences per second, %.2f tokens per second)",
self.num_sentences,
self.gen_timer.n,
self.gen_timer.sum,
self.num_sentences / (self.gen_timer.sum + 1e-6),
1.0 / (self.gen_timer.avg + 1e-6),
)
def parse_wer(wer_file: Path) -> float:
with open(wer_file, "r") as f:
return float(f.readline().strip().split(" ")[1])
def get_wer_file(cfg: InferConfig) -> Path:
"""Hashes the decoding parameters to a unique file ID."""
base_path = "wer"
if cfg.decoding.results_path is not None:
base_path = os.path.join(cfg.decoding.results_path, base_path)
if cfg.decoding.unique_wer_file:
yaml_str = OmegaConf.to_yaml(cfg.decoding)
fid = int(hashlib.md5(yaml_str.encode("utf-8")).hexdigest(), 16)
return Path(f"{base_path}.{fid % 1000000}")
else:
return Path(base_path)
def main(cfg: InferConfig) -> float:
"""Entry point for main processing logic.
Args:
cfg: The inferance configuration to use.
wer: Optional shared memory pointer for returning the WER. If not None,
the final WER value will be written here instead of being returned.
Returns:
The final WER if `wer` is None, otherwise None.
"""
utils.import_user_module(cfg.common)
yaml_str, wer_file = OmegaConf.to_yaml(cfg.decoding), get_wer_file(cfg)
# Validates the provided configuration.
if cfg.dataset.max_tokens is None and cfg.dataset.batch_size is None:
cfg.dataset.max_tokens = 4000000
if not cfg.common.cpu and not torch.cuda.is_available():
raise ValueError("CUDA not found; set `cpu=True` to run without CUDA")
logger.info(cfg.common_eval.path)
with InferenceProcessor(cfg) as processor:
for sample in processor:
processor.process_sample(sample)
processor.log_generation_time()
if cfg.decoding.results_path is not None:
processor.merge_shards()
errs_t, leng_t = processor.total_errors, processor.total_length
if cfg.common.cpu:
logger.warning("Merging WER requires CUDA.")
elif processor.data_parallel_world_size > 1:
stats = torch.LongTensor([errs_t, leng_t]).cuda()
dist.all_reduce(stats, op=dist.ReduceOp.SUM)
errs_t, leng_t = stats[0].item(), stats[1].item()
wer = errs_t * 100.0 / leng_t
if distributed_utils.is_master(cfg.distributed_training):
with open(wer_file, "w") as f:
f.write(
(
f"WER: {wer}\n"
f"err / num_ref_words = {errs_t} / {leng_t}\n\n"
f"{yaml_str}"
)
)
return wer
@hydra.main(config_path=config_path, config_name="infer")
def hydra_main(cfg: InferConfig) -> Union[float, Tuple[float, Optional[float]]]:
container = OmegaConf.to_container(cfg, resolve=True, enum_to_str=True)
cfg = OmegaConf.create(container)
OmegaConf.set_struct(cfg, True)
if cfg.common.reset_logging:
reset_logging()
utils.import_user_module(cfg.common)
# logger.info("Config:\n%s", OmegaConf.to_yaml(cfg))
wer = float("inf")
try:
if cfg.common.profile:
with torch.cuda.profiler.profile():
with torch.autograd.profiler.emit_nvtx():
distributed_utils.call_main(cfg, main)
else:
distributed_utils.call_main(cfg, main)
wer = parse_wer(get_wer_file(cfg))
except BaseException as e: # pylint: disable=broad-except
if not cfg.common.suppress_crashes:
raise
else:
logger.error("Crashed! %s", str(e))
logger.info("Word error rate: %.4f", wer)
if cfg.is_ax:
return wer, None
return wer
def cli_main() -> None:
try:
from hydra._internal.utils import (
get_args,
) # pylint: disable=import-outside-toplevel
cfg_name = get_args().config_name or "infer"
except ImportError:
logger.warning("Failed to get config name from hydra args")
cfg_name = "infer"
cs = ConfigStore.instance()
cs.store(name=cfg_name, node=InferConfig)
for k in InferConfig.__dataclass_fields__:
if is_dataclass(InferConfig.__dataclass_fields__[k].type):
v = InferConfig.__dataclass_fields__[k].default
cs.store(name=k, node=v)
hydra_main() # pylint: disable=no-value-for-parameter
if __name__ == "__main__":
cli_main()
| EXA-1-master | exa/models/unilm-master/speechlm/speechlm/infer.py |
# ----------------------------------------------------------------------------
# SpeechLM: Enhanced Speech Pre-Training with Unpaired Textual Data (https://arxiv.org/abs/2209.15329)
# Github source: https://github.com/microsoft/SpeechT5/tree/main/SpeechLM
# Code based on fairseq: https://github.com/facebookresearch/fairseq/tree/272c4c5197250997148fb12c0db6306035f166a4
#
# Copyright (c) 2022 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# ----------------------------------------------------------------------------
"""
Modified form: https://github.com/facebookresearch/fairseq/blob/272c4c5197250997148fb12c0db6306035f166a4/fairseq_cli/generate.py
"""
import ast
import logging
import math
import os
import sys
from argparse import Namespace
from itertools import chain
import numpy as np
import torch
from omegaconf import DictConfig
from fairseq import checkpoint_utils, options, scoring, tasks, utils
from fairseq.dataclass.utils import convert_namespace_to_omegaconf
from fairseq.logging import progress_bar
from fairseq.logging.meters import StopwatchMeter, TimeMeter
def main(cfg: DictConfig):
if isinstance(cfg, Namespace):
cfg = convert_namespace_to_omegaconf(cfg)
assert cfg.common_eval.path is not None, "--path required for generation!"
assert (
not cfg.generation.sampling or cfg.generation.nbest == cfg.generation.beam
), "--sampling requires --nbest to be equal to --beam"
assert (
cfg.generation.replace_unk is None or cfg.dataset.dataset_impl == "raw"
), "--replace-unk requires a raw text dataset (--dataset-impl=raw)"
if cfg.common_eval.results_path is not None:
os.makedirs(cfg.common_eval.results_path, exist_ok=True)
output_path = os.path.join(
cfg.common_eval.results_path,
"generate-{}.txt".format(cfg.dataset.gen_subset),
)
with open(output_path, "w", buffering=1, encoding="utf-8") as h:
return _main(cfg, h)
else:
return _main(cfg, sys.stdout)
def get_symbols_to_strip_from_output(generator):
if hasattr(generator, "symbols_to_strip_from_output"):
return generator.symbols_to_strip_from_output
else:
return {generator.eos}
def _main(cfg: DictConfig, output_file):
logging.basicConfig(
format="%(asctime)s | %(levelname)s | %(name)s | %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=os.environ.get("LOGLEVEL", "INFO").upper(),
stream=output_file,
)
logger = logging.getLogger("fairseq_cli.generate")
utils.import_user_module(cfg.common)
if cfg.dataset.max_tokens is None and cfg.dataset.batch_size is None:
cfg.dataset.max_tokens = 12000
logger.info(cfg)
# Fix seed for stochastic decoding
if cfg.common.seed is not None and not cfg.generation.no_seed_provided:
np.random.seed(cfg.common.seed)
utils.set_torch_seed(cfg.common.seed)
use_cuda = torch.cuda.is_available() and not cfg.common.cpu
# Load dataset splits
task = tasks.setup_task(cfg.task)
# Set dictionaries
try:
src_dict = getattr(task, "source_dictionary", None)
except NotImplementedError:
src_dict = None
tgt_dict = task.target_dictionary
overrides = ast.literal_eval(cfg.common_eval.model_overrides)
# Load ensemble
logger.info("loading model(s) from {}".format(cfg.common_eval.path))
models, saved_cfg = checkpoint_utils.load_model_ensemble(
utils.split_paths(cfg.common_eval.path),
arg_overrides=overrides,
task=task,
suffix=cfg.checkpoint.checkpoint_suffix,
strict=(cfg.checkpoint.checkpoint_shard_count == 1),
num_shards=cfg.checkpoint.checkpoint_shard_count,
)
# loading the dataset should happen after the checkpoint has been loaded so we can give it the saved task config
task.load_dataset(cfg.dataset.gen_subset, task_cfg=saved_cfg.task)
if cfg.generation.lm_path is not None:
overrides["data"] = cfg.task.data
try:
lms, _ = checkpoint_utils.load_model_ensemble(
[cfg.generation.lm_path], arg_overrides=overrides, task=None
)
except:
logger.warning(
f"Failed to load language model! Please make sure that the language model dict is the same "
f"as target dict and is located in the data dir ({cfg.task.data})"
)
raise
assert len(lms) == 1
else:
lms = [None]
# Optimize ensemble for generation
for model in chain(models, lms):
if model is None:
continue
if cfg.common.fp16:
model.half()
if use_cuda and not cfg.distributed_training.pipeline_model_parallel:
model.cuda()
model.prepare_for_inference_(cfg)
def _fp_convert_sample(sample):
def apply_half(t):
if t.dtype is torch.float32:
return t.to(dtype=torch.half)
return t
def apply_bfloat16(t):
if t.dtype is torch.float32:
return t.to(dtype=torch.bfloat16)
return t
if cfg.common.fp16:
sample = utils.apply_to_sample(apply_half, sample)
if cfg.common.bf16:
sample = utils.apply_to_sample(apply_bfloat16, sample)
return sample
# Load alignment dictionary for unknown word replacement
# (None if no unknown word replacement, empty if no path to align dictionary)
align_dict = utils.load_align_dict(cfg.generation.replace_unk)
# Load dataset (possibly sharded)
itr = task.get_batch_iterator(
dataset=task.dataset(cfg.dataset.gen_subset),
max_tokens=cfg.dataset.max_tokens,
max_sentences=cfg.dataset.batch_size,
max_positions=utils.resolve_max_positions(
task.max_positions(), *[m.max_positions() for m in models]
),
ignore_invalid_inputs=cfg.dataset.skip_invalid_size_inputs_valid_test,
required_batch_size_multiple=cfg.dataset.required_batch_size_multiple,
seed=cfg.common.seed,
num_shards=cfg.distributed_training.distributed_world_size,
shard_id=cfg.distributed_training.distributed_rank,
num_workers=cfg.dataset.num_workers,
data_buffer_size=cfg.dataset.data_buffer_size,
).next_epoch_itr(shuffle=False)
progress = progress_bar.progress_bar(
itr,
log_format=cfg.common.log_format,
log_interval=cfg.common.log_interval,
default_log_format=("tqdm" if not cfg.common.no_progress_bar else "simple"),
)
# Initialize generator
gen_timer = StopwatchMeter()
extra_gen_cls_kwargs = {"lm_model": lms[0], "lm_weight": cfg.generation.lm_weight}
generator = task.build_generator(
models, cfg.generation, extra_gen_cls_kwargs=extra_gen_cls_kwargs
)
# Handle tokenization and BPE
tokenizer = task.build_tokenizer(cfg.tokenizer)
bpe = task.build_bpe(cfg.bpe)
def decode_fn(x):
if bpe is not None:
x = bpe.decode(x)
if tokenizer is not None:
x = tokenizer.decode(x)
return x
scorer = scoring.build_scorer(cfg.scoring, None)
num_sentences = 0
has_target = True
wps_meter = TimeMeter()
for sample in progress:
sample = utils.move_to_cuda(sample) if use_cuda else sample
sample = _fp_convert_sample(sample)
if "net_input" not in sample:
continue
prefix_tokens = None
if cfg.generation.prefix_size > 0:
prefix_tokens = sample["target"][:, : cfg.generation.prefix_size]
constraints = None
if "constraints" in sample:
constraints = sample["constraints"]
gen_timer.start()
hypos = task.inference_step(
generator,
models[0],
sample,
prefix_tokens=prefix_tokens,
constraints=constraints,
)
num_generated_tokens = sum(len(h["unit"]) for h in hypos)
gen_timer.stop(num_generated_tokens)
for i, sample_id in enumerate(sample["id"].tolist()):
has_target = sample["target"] is not None
# Remove padding
if "src_tokens" in sample["net_input"]:
src_tokens = utils.strip_pad(
sample["net_input"]["src_tokens"][i, :], tgt_dict.pad()
).cpu()
else:
src_tokens = None
target_tokens = None
if has_target:
target_tokens = (
utils.strip_pad(sample["target"][i, :], tgt_dict.pad()).cpu()
)
# Either retrieve the original sentences or regenerate them from tokens.
if align_dict is not None:
src_str = task.dataset(cfg.dataset.gen_subset).src.get_original_text(
sample_id
)
target_str = task.dataset(cfg.dataset.gen_subset).tgt.get_original_text(
sample_id
)
else:
if src_dict is not None:
src_str = src_dict.string(src_tokens, cfg.common_eval.post_process)
else:
src_str = ""
if has_target:
target_str = " ".join(map(str, target_tokens.numpy().tolist()))
src_str = decode_fn(src_str)
if not cfg.common_eval.quiet:
if src_dict is not None:
print("S-{}\t{}".format(sample_id, src_str), file=output_file)
if has_target:
print("T-{}\t{}".format(sample_id, target_str), file=output_file)
# Process top predictions
j = 0
hypo = hypos[i]
hypo_tokens = hypo["unit"].int().cpu()
hypo_str = " ".join(map(str, hypo_tokens.numpy().tolist()))
alignment = None
detok_hypo_str = hypo_str
# add duration prediction
hypo_duration = " ".join(map(str, hypo["duration"].int().cpu().numpy().tolist()))
hypo_fa_src_str = src_dict.string(hypo["fa_src"].cpu().numpy(), cfg.common_eval.post_process)
# hypo_fa_src_str = " ".join(map(str, hypo["fa_src"].int().cpu().numpy() - 4))
if not cfg.common_eval.quiet:
# score = hypo["score"] / math.log(2) # convert to base 2
score = 0.00
# original hypothesis (after tokenization and BPE)
# print(
# "H-{}\t{}\t{}".format(sample_id, score, hypo_str),
# file=output_file,
# )
# detokenized hypothesis
print(
"D-{}\t{}\t{}".format(sample_id, score, detok_hypo_str),
file=output_file,
)
# duration prediction
print(
"L-{}\t{}\t{}".format(sample_id, score, hypo_duration),
file=output_file,
)
# force-aligned upsampled src-tokens
print(
"U-{}\t{}\t{}".format(sample_id, score, hypo_fa_src_str),
file=output_file,
)
# print(
# "P-{}\t{}".format(
# sample_id,
# " ".join(
# map(
# lambda x: "{:.4f}".format(x),
# # convert from base e to base 2
# hypo["positional_scores"]
# .div_(math.log(2))
# .tolist(),
# )
# ),
# ),
# file=output_file,
# )
if cfg.generation.print_alignment == "hard":
print(
"A-{}\t{}".format(
sample_id,
" ".join(
[
"{}-{}".format(src_idx, tgt_idx)
for src_idx, tgt_idx in alignment
]
),
),
file=output_file,
)
if cfg.generation.print_alignment == "soft":
print(
"A-{}\t{}".format(
sample_id,
" ".join(
[",".join(src_probs) for src_probs in alignment]
),
),
file=output_file,
)
# Score only the top hypothesis
if has_target and j == 0:
if hasattr(scorer, "add_string"):
scorer.add_string(target_str, detok_hypo_str)
else:
scorer.add(target_tokens, hypo_tokens)
wps_meter.update(num_generated_tokens)
progress.log({"wps": round(wps_meter.avg)})
num_sentences += (
sample["nsentences"] if "nsentences" in sample else sample["id"].numel()
)
logger.info("NOTE: hypothesis and token scores are output in base 2")
logger.info(
"Translated {:,} sentences ({:,} tokens) in {:.1f}s ({:.2f} sentences/s, {:.2f} tokens/s)".format(
num_sentences,
gen_timer.n,
gen_timer.sum,
num_sentences / gen_timer.sum,
1.0 / gen_timer.avg,
)
)
if has_target:
if cfg.bpe and not cfg.generation.sacrebleu:
if cfg.common_eval.post_process:
logger.warning(
"BLEU score is being computed by splitting detokenized string on spaces, this is probably not what you want. Use --sacrebleu for standard 13a BLEU tokenization"
)
else:
logger.warning(
"If you are using BPE on the target side, the BLEU score is computed on BPE tokens, not on proper words. Use --sacrebleu for standard 13a BLEU tokenization"
)
# use print to be consistent with other main outputs: S-, H-, T-, D- and so on
print(
"Generate {} with beam={}: {}".format(
cfg.dataset.gen_subset, cfg.generation.beam, scorer.result_string()
),
file=output_file,
)
return scorer
def cli_main():
parser = options.get_generation_parser()
# TODO: replace this workaround with refactoring of `AudioPretraining`
parser.add_argument(
"--arch",
"-a",
metavar="ARCH",
default="wav2vec2",
help="Model architecture. For constructing tasks that rely on "
"model args (e.g. `AudioPretraining`)",
)
args = options.parse_args_and_arch(parser)
main(args)
if __name__ == "__main__":
cli_main()
| EXA-1-master | exa/models/unilm-master/speechlm/speechlm/generate_unit.py |
# ----------------------------------------------------------------------------
# SpeechLM: Enhanced Speech Pre-Training with Unpaired Textual Data (https://arxiv.org/abs/2209.15329)
# Github source: https://github.com/microsoft/SpeechT5/tree/main/SpeechLM
# Code based on fairseq: https://github.com/facebookresearch/fairseq/tree/272c4c5197250997148fb12c0db6306035f166a4
#
# Copyright (c) 2022 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# ----------------------------------------------------------------------------
import logging
import os
import sys
from typing import Dict, List, Optional, Tuple
from pathlib import Path
import numpy as np
from argparse import Namespace
from collections import OrderedDict
import torch
from dataclasses import dataclass, field
from fairseq.data import (
Dictionary,
encoders,
data_utils,
StripTokenDataset,
PrependTokenDataset,
AppendTokenDataset,
DenoisingDataset,
ConcatDataset,
FairseqDataset,
iterators,
ResamplingDataset,
MaskTokensDataset,
LanguagePairDataset,
)
from fairseq.data.audio.speech_to_text_joint_dataset import S2TJointDataConfig
from fairseq.data.shorten_dataset import maybe_shorten_dataset
# from fairseq.data.encoders.utils import get_whole_word_mask
from fairseq.dataclass.configs import FairseqDataclass
from fairseq.tasks import register_task
from fairseq.tasks.fairseq_task import FairseqTask
from fairseq.dataclass.constants import ChoiceEnum
from omegaconf import MISSING
from speechlm.data.multimodal_corpus_dataset import MultiCorpusDataset
from speechlm.data.load_langpair_dataset import load_langpair_dataset
from speechlm.data.language_trible_dataset import LanguageTripleDataset, load_langtriple_dataset
from speechlm.data.hubert_dataset import HubertDataset
logger = logging.getLogger(__name__)
TOKENIZER_CHOICES = ChoiceEnum(["sentencepiece", "hubert_letters", "none"])
def _lang_token(lang: str):
return "<lang:{}>".format(lang)
def _lang_token_index(dic: Dictionary, lang: str):
"""Return language token index."""
idx = dic.index(_lang_token(lang))
assert idx != dic.unk_index, "cannot find language token for lang {}".format(lang)
return idx
class LabelEncoder(object):
def __init__(self, dictionary: Dictionary) -> None:
self.dictionary = dictionary
def __call__(self, label: str) -> List[str]:
return self.dictionary.encode_line(
label, append_eos=False, add_if_not_exist=False,
)
### wrap the initial get_whole_word_mask which needs bpe_tokenizer,
### here we just assume words are splited by "|" or "<SIL>"
def get_whole_word_mask(args, dictionary):
def is_beginning_of_word(i):
if i < dictionary.nspecial:
# special elements are always considered beginnings
return True
tok = dictionary[i]
if tok.startswith("madeupword"):
return True
elif tok in ["<unk>", "<s>", "</s>", "<pad>", "|", "<eps>"]:
return True
else:
return False
mask_whole_words = torch.ByteTensor(
list(map(is_beginning_of_word, range(len(dictionary))))
)
return mask_whole_words
def get_repeative_start(tokens):
"""
tokens: torch.Tensor with repeative tokens
"""
length = len(tokens)
rep_start_id = tokens[:-1] != tokens[1:]
return torch.cat([torch.tensor([True]), rep_start_id])
@dataclass
class TextPretrainingConfig(FairseqDataclass):
### added for joint pretraining
text_data: Optional[str] = field(
default=None,
metadata={
"help": "if set, path to text data directory",
},
)
seed: Optional[int] = field(
default=1,
metadata={
"help": "for ordered_indices in MulticorpusDataset",
},
)
tokens_per_sample: Optional[int] = field(
default=512,
metadata={
"help": "max number of total tokens over all segments per sample for dataset",
},
)
tokens_per_sample_tgt: Optional[int] = field(
default=512,
metadata={
"help": "max number of total tokens over all segments per target sample for dataset",
},
)
sample_break_mode: Optional[str] = field(
default="eos",
metadata={
"help": "mode for breaking sentence",
},
)
mask: Optional[float] = field(
default=0.3,
metadata={
"help": "fraction of words/subwords that will be masked",
},
)
leave_unmasked_prob: float = field(
default=0.1,
metadata={"help": "probability that a masked token is unmasked"},
)
mask_random: Optional[float] = field(
default=0.1,
metadata={
"help": "instead of using [MASK], use random token this often",
},
)
freq_weighted_replacement: bool = field(
default=False,
metadata={"help": "sample random replacement words based on word frequencies"},
)
mask_whole_words: bool = field(
default=True,
metadata={"help": "mask whole words; you may also want to set --bpe"},
)
mask_repeative_tokens: bool = field(
default=True,
metadata={"help": "mask repeative_tokens; if mask_whole_words=False"},
)
mask_multiple_length: int = field(
default=1,
metadata={"help": "repeat the mask indices multiple times"},
)
mask_stdev: float = field(
default=0.0,
metadata={"help": "stdev of the mask length"},
)
shorten_method: Optional[str] = field(
default="none",
metadata={
"help": "if not none, shorten sequences that exceed tokens_per_sample",
"choices": "none/truncate/random_crop"
},
)
shorten_data_split_list: Optional[str] = field(
default="",
metadata={
"help": "comma_separated list of dataset splits to apply shortening to, e.g., train,valid (default: all dataset splits)",
},
)
### below hypra-parameters is used in bart
insert: Optional[float] = field(
default=0.0,
metadata={
"help": "insert this percentage of additional random tokens",
},
)
permute: Optional[float] = field(
default=0.0,
metadata={
"help": "take this proportion of subwords and permute them",
},
)
rotate: Optional[float] = field(
default=0.0,
metadata={
"help": "rotate this proportion of inputs",
},
)
poisson_lambda: Optional[float] = field(
default=3.5,
metadata={
"help": "randomly shuffle sentences for this proportion of inputs",
},
)
permute_sentences: Optional[float] = field(
default=0.0,
metadata={
"help": "shuffle this proportion of sentences in all inputs",
},
)
mask_length: Optional[str] = field(
default="span-poisson",
metadata={
"help": "mask length to choose",
"choice": "subword/word/span-poisson"
},
)
replace_length: Optional[int] = field(
default=1,
metadata={
"help": "when masking N tokens, replace with 0, 1, or N tokens (use -1 for N)",
},
)
shuffle_instance: Optional[bool] = field(
default=False,
metadata={"help": "shuffle instance"},
)
max_source_positions: Optional[int] = field(
default=1024,
metadata={"help": "max number of tokens in the source sequence"},
)
max_target_positions: Optional[int] = field(
default=1024,
metadata={"help": "max number of tokens in the target sequence"},
)
bpe: Optional[str] = field(
default="",
metadata={
"help": "will wrapped by the text_data_config yaml",
},
)
data_config: Optional[str] = field(
default=None,
metadata={
"help": "a config yaml specify the bpe model of text data",
},
)
text_maxtokens_ratio: Optional[float] = field(
default=1.0,
metadata={
"help": "for text, max_tokens = max_tokens * text_maxtokens_ratio / 320 ",
},
)
prepend_tgt_lang_tag: bool = field(
default=False,
metadata={"help": "prepend tgt_lang_tag to replace <eos>"},
)
mask_text_ratio: Optional[float] = field(
default=0.0,
metadata={
"help": "mask_text_ratio, for paired data",
},
)
truncate_mono_source: bool = field(
default=True,
metadata={"help": "truncate mono source-side examples that exceed max-positions"},
)
@dataclass
class JointPretrainingConfig(FairseqDataclass):
data: str = field(
default=MISSING, metadata={"help": "path to speech data directory"}
)
fine_tuning: bool = field(
default=False, metadata={"help": "set to true if fine-tuning Hubert"}
)
labels: List[str] = field(
default_factory=lambda: ["ltr"],
metadata={
"help": (
"extension of the label files to load, frame-level labels for"
" pre-training, and sequence-level label for fine-tuning"
)
},
)
label_dir: Optional[str] = field(
default=None,
metadata={
"help": "if set, looks for labels in this directory instead",
},
)
label_rate: int = field(
default=-1,
metadata={"help": "label frame rate. -1 for sequence label"},
)
sample_rate: int = field(
default=16_000,
metadata={
"help": "target sample rate. audio files will be up/down "
"sampled to this rate"
},
)
normalize: bool = field(
default=False,
metadata={
"help": "if set, normalizes input to have 0 mean and unit variance"
},
)
enable_padding: bool = field(
default=False,
metadata={"help": "pad shorter samples instead of cropping"},
)
max_keep_size: Optional[int] = field(
default=None,
metadata={"help": "exclude sample longer than this"},
)
max_sample_size: Optional[int] = field(
default=None,
metadata={"help": "max sample size to crop to for batching"},
)
min_sample_size: Optional[int] = field(
default=None,
metadata={"help": "min sample size to crop to for batching"},
)
single_target: Optional[bool] = field(
default=False,
metadata={
"help": "if set, AddTargetDatasets outputs same keys "
"as AddTargetDataset"
},
)
random_crop: Optional[bool] = field(
default=True,
metadata={"help": "always crop from the beginning if false"},
)
pad_audio: Optional[bool] = field(
default=False,
metadata={"help": "pad audio to the longest one in the batch if true"},
)
store_labels: Optional[bool] = field(
default=True,
metadata={"help": "store spm labels in memory, should be true when fine-tune with bpe"},
)
add_decoder_target: bool = field(
default=False,
metadata={"help": "contral the model architecture, if set True, load reduced unit as target"},
)
split_modality_batch: bool = field(
default=False,
metadata={"help": "whether create all samples of different modalities in a batch"},
)
speech_tgt_lang: str = field(
default="",
metadata={"help": "prepend <tgt-id> to prev_output_tokens to replace <eos>, only used for decoder"},
)
speech_sampling_alpha: float = field(
default=0.2,
metadata={
"help": "Hyper-parameter alpha = 1/T for temperature-based speech resampling."
"(alpha = 1 for no resampling)"
},
)
text_sampling_alpha: float = field(
default=0.2,
metadata={
"help": "Hyper-parameter alpha = 1/T for temperature-based text resampling."
"(alpha = 1 for no resampling)"
},
)
hubert_tokenizer: Optional[TOKENIZER_CHOICES] = field(
default="none",
metadata={"help": "which tokenizer for processing text"},
)
sp_path: Optional[str] = field(
default=None,
metadata={"help": "sentencepiece model path if using bpe tokenizer"},
)
text_cfg: TextPretrainingConfig = TextPretrainingConfig()
@register_task("joint_sc2t_pretraining", dataclass=JointPretrainingConfig)
class Jsc2tPretrainingTask(FairseqTask):
cfg: JointPretrainingConfig
def __init__(
self,
cfg: JointPretrainingConfig,
) -> None:
super().__init__(cfg)
logger.info(f"current directory is {os.getcwd()}")
logger.info(f"JSTPretrainingTask Config {cfg}")
self.cfg = cfg
self.fine_tuning = cfg.fine_tuning
self.blank_symbol = "<s>"
self.state.add_factory("hubert_tokenizer", self.build_tokenizer)
if self.cfg.text_cfg.text_data is not None and os.path.exists(self.cfg.text_cfg.text_data):
self.state.add_factory("text_dictionary", self.load_text_dictionary)
self.state.add_factory("text_src_dictionary", self.load_text_src_dictionary)
if cfg.fine_tuning:
self.state.add_factory("target_dictionary", self.load_dictionaries)
else:
self.state.add_factory("dictionaries", self.load_dictionaries)
if cfg.text_cfg.data_config is not None:
self.text_data_cfg = S2TJointDataConfig(Path(f"{cfg.text_cfg.text_data}/{cfg.text_cfg.data_config}"))
self.cfg.text_cfg.bpe = self.text_data_cfg.bpe_tokenizer["bpe"]
else:
self.text_data_cfg = None
@property
def source_dictionary(self) -> Optional[Dictionary]:
return None
@property
def target_dictionary(self) -> Optional[Dictionary]:
return self.state.target_dictionary
@property
def dictionaries(self) -> List[Dictionary]:
return self.state.dictionaries
@property
def text_dictionary(self) -> Optional[Dictionary]:
return self.state.text_dictionary
@property
def text_src_dictionary(self) -> Optional[Dictionary]:
return self.state.text_src_dictionary
@property
def hubert_tokenizer(self):
return self.state.hubert_tokenizer
def load_dictionaries(self):
label_dir = self.cfg.data if self.cfg.label_dir is None else self.cfg.label_dir
dictionaries = [Dictionary.load(f"{label_dir}/dict.{label}.txt") for label in self.cfg.labels]
if not self.cfg.fine_tuning:
for dictionary in dictionaries:
dictionary.add_symbol("<mask>")
return dictionaries[0] if self.cfg.fine_tuning else dictionaries
def load_text_dictionary(self):
tgt_dict_path = f"{self.cfg.text_cfg.text_data}/{self.text_data_cfg.vocab_filename if self.text_data_cfg is not None else 'dict.txt'}"
if not os.path.isfile(tgt_dict_path):
raise FileNotFoundError(f"Dict not found: {tgt_dict_path}")
text_dictionary = Dictionary.load(tgt_dict_path)
self.mask_idx = text_dictionary.add_symbol("<mask>")
return text_dictionary
def load_text_src_dictionary(self):
src_dict_path = f"{self.cfg.text_cfg.text_data}/{self.text_data_cfg.src_vocab_filename if self.text_data_cfg is not None else 'dict.txt'}"
if not os.path.isfile(src_dict_path):
raise FileNotFoundError(f"Dict not found: {src_dict_path}")
src_text_dictionary = Dictionary.load(src_dict_path)
self.mask_idx = src_text_dictionary.add_symbol("<mask>")
return src_text_dictionary
@classmethod
def setup_task(
cls, cfg: JointPretrainingConfig, **kwargs
) -> "Jsc2tPretrainingTask":
return cls(cfg)
def get_label_dir(self) -> str:
if self.cfg.label_dir is None:
return self.cfg.data
return self.cfg.label_dir
def load_paired_dataset(self, text_split, truncate_source=False):
text_split, lp = text_split.rsplit('.', 1) # e.g. "libritext.ltr-ltr"
if len(lp.split("-")) == 2:
src, tgt = lp.split("-")
if src == tgt:
logger.warn(f"| trying to load monolingual dataset {text_split}.{lp}, please check your task is right.")
paired_dataset = self.load_char_bart_dataset(f"{text_split}.{lp}.{tgt}")
return paired_dataset
paired_dataset = load_langpair_dataset(
self.cfg.text_cfg.text_data,
text_split,
src,
self.text_src_dictionary,
tgt,
self.text_dictionary,
combine=True,
dataset_impl=None,
upsample_primary=1,
left_pad_source=False,
left_pad_target=False,
max_source_positions=self.cfg.text_cfg.tokens_per_sample,
max_target_positions=self.cfg.text_cfg.tokens_per_sample,
truncate_source=truncate_source,
prepend_bos=False,
load_alignments=False,
append_source_id=True if self.cfg.text_cfg.prepend_tgt_lang_tag else False,
lang_format="<lang:{}>" if self.cfg.text_cfg.prepend_tgt_lang_tag else "[{}]",
input_feeding=self.cfg.add_decoder_target,
)
if self.cfg.text_cfg.mask_text_ratio > 0:
# add mask
self.mask_idx = self.text_src_dictionary.index("<mask>")
mask_whole_words = None
if self.cfg.text_cfg.mask_whole_words:
mask_whole_words = get_whole_word_mask(self.cfg.text_cfg, self.text_src_dictionary)
elif self.cfg.text_cfg.mask_repeative_tokens:
mask_whole_words = get_repeative_start
src_dataset, src_unmasked_dataset = MaskTokensDataset.apply_mask(
paired_dataset.src,
self.text_src_dictionary,
pad_idx=self.text_src_dictionary.pad(),
mask_idx=self.mask_idx,
seed=self.cfg.text_cfg.seed,
mask_prob=self.cfg.text_cfg.mask_text_ratio,
leave_unmasked_prob=self.cfg.text_cfg.leave_unmasked_prob,
random_token_prob=self.cfg.text_cfg.mask_random,
freq_weighted_replacement=self.cfg.text_cfg.freq_weighted_replacement,
mask_whole_words=mask_whole_words,
mask_multiple_length=self.cfg.text_cfg.mask_multiple_length,
mask_stdev=self.cfg.text_cfg.mask_stdev,
)
tgt_dataset = paired_dataset.tgt if paired_dataset.tgt is not None else src_unmasked_dataset
paired_dataset = LanguageTripleDataset(
src_dataset,
src_dataset.sizes,
self.text_src_dictionary,
src_unmasked_dataset,
src_unmasked_dataset.sizes,
self.text_src_dictionary,
tgt_dataset,
tgt_dataset.sizes,
self.text_dictionary,
left_pad_source=False,
left_pad_target=False,
align_dataset=None,
eos=None,
num_buckets=0,
shuffle=True,
pad_to_multiple=1,
)
else:
src, ref, tgt = lp.split("-")
paired_dataset = load_langtriple_dataset(
self.cfg.text_cfg.text_data,
text_split,
src,
self.text_src_dictionary,
ref,
self.dictionaries[-1],
tgt,
self.text_dictionary,
combine=True,
dataset_impl=None,
upsample_primary=1,
left_pad_source=False,
left_pad_target=False,
max_source_positions=self.cfg.text_cfg.tokens_per_sample,
max_target_positions=self.cfg.text_cfg.tokens_per_sample,
truncate_source=truncate_source,
prepend_bos=False,
load_alignments=False,
append_source_id=True if self.cfg.text_cfg.prepend_tgt_lang_tag else False,
lang_format="<lang:{}>" if self.cfg.text_cfg.prepend_tgt_lang_tag else "[{}]",
)
return paired_dataset
def load_dataset(self, split: str, epoch=1, **kwargs) -> None:
"""
Create Wav dataset for audio, and Index dataset for phonemized text,
then concatenate them to by fairseq.data.multi_corpus_dataset.MultiCorpusDataset.
"""
speech_splits = split.split('+')[0].split(',')
### 1st, create a speech dataset using STSpeechDataset (modified from HubertDataset)
dicts = [self.target_dictionary] if self.cfg.fine_tuning else self.dictionaries
pad_list = [dict.pad() for dict in dicts]
eos_list = [dict.eos() for dict in dicts]
procs = [LabelEncoder(dict) for dict in dicts]
if self.cfg.speech_tgt_lang != "":
tgt_lang_idx = _lang_token_index(dicts[0], self.cfg.speech_tgt_lang)
logger.info(f"Will prepend <{tgt_lang_idx}> at the beginning of prev_output_tokens to replace <eos>")
else:
tgt_lang_idx = None
# hubert v1: pad_audio=True, random_crop=False;
speech_datasets = []
for speech_split in speech_splits:
paths = [
f"{self.get_label_dir()}/{speech_split}.{l}" for l in self.cfg.labels
]
speech_datasets.append(
HubertDataset(
f"{self.cfg.data}/{speech_split}.tsv",
sample_rate=self.cfg.sample_rate,
label_paths=paths,
label_rates=self.cfg.label_rate,
pad_list=pad_list,
eos_list=eos_list,
label_processors=procs,
max_keep_sample_size=self.cfg.max_keep_size,
min_keep_sample_size=self.cfg.min_sample_size,
max_sample_size=self.cfg.max_sample_size,
pad_audio=self.cfg.pad_audio,
normalize=self.cfg.normalize,
store_labels=self.cfg.store_labels,
random_crop=self.cfg.random_crop,
single_target=self.cfg.single_target,
tgt_dict=dicts[0],
add_decoder_target=self.cfg.add_decoder_target,
fine_tuning=self.cfg.fine_tuning,
tgt_lang_idx=tgt_lang_idx,
tokenizer=self.hubert_tokenizer,
)
)
if len(speech_datasets) > 1:
speech_dataset = ConcatDataset(speech_datasets)
else:
speech_dataset = speech_datasets[0]
has_text = len(split.split('+')) > 1
if not has_text:
assert speech_dataset is not None
self.datasets[split] = speech_dataset
return
### 2nd, create paired/mono text datasets using Langpairdataset
if split.split('+')[1] != '':
paired_splits = [paired_split for paired_split in split.split('+')[1].split(',') if paired_split != '']
paired_datasets = [self.load_paired_dataset(paired_split) for paired_split in paired_splits]
else:
paired_splits, paired_datasets = [], []
if len(split.split('+')) > 2 and split.split('+')[2] != '':
mono_splits = [mono_split for mono_split in split.split('+')[2].split(',') if mono_split != '']
mono_datasets = [self.load_paired_dataset(mono_split, truncate_source=self.cfg.text_cfg.truncate_mono_source) for mono_split in mono_splits]
else:
mono_splits, mono_datasets = [], []
assert len(mono_datasets + paired_datasets) > 0, f"split {split} has no text! you should check out for that"
### 3rd, if provided, create a supervised dataset with labeled data
if len(split.split('+')) > 3 and split.split('+')[3] != '':
assert len(paired_splits) > 0, f"supervised dataset can not be loaded without text paired dataset!"
tgt = paired_splits[0].rsplit('.', 1)[1].split("-")[1]
sup_split = split.split('+')[3]
sup_dataset = HubertDataset(
f"{self.cfg.data}/{sup_split}.tsv",
sample_rate=self.cfg.sample_rate,
label_paths=[f"{self.get_label_dir()}/{sup_split}.{tgt}"],
label_rates=[-1],
pad_list=[self.text_dictionary.pad()],
eos_list=[self.text_dictionary.eos()],
label_processors=[LabelEncoder(self.text_dictionary)],
max_keep_sample_size=self.cfg.max_keep_size,
min_keep_sample_size=None,
max_sample_size=None,
pad_audio=True,
normalize=self.cfg.normalize,
store_labels=self.cfg.store_labels,
random_crop=False,
single_target=True,
tgt_dict=self.text_dictionary,
add_decoder_target=self.cfg.add_decoder_target,
fine_tuning=True,
tgt_lang_idx=None,
tokenizer=None,
)
else:
sup_dataset = None
### 4th, compose a MultiCorpusDataset
dataset_dict, max_positions_dict, distributions, max_tokens_ratios = self.resample_multi_modality_dataset(
speech_dataset, sup_dataset, mono_datasets, paired_datasets, mono_splits, paired_splits, epoch=epoch,
)
self.datasets[split] = MultiCorpusDataset(
dataset_dict,
max_positions=max_positions_dict,
distribution=distributions,
max_tokens_ratio=max_tokens_ratios,
seed=self.cfg.text_cfg.seed,
sort_indices=True,
)
def max_positions(self) -> Tuple[int, int]:
return (sys.maxsize, sys.maxsize)
def filter_indices_by_size(
self, indices: np.array, *args, **kwargs
) -> np.array:
return indices
def get_batch_iterator(
self,
dataset,
max_tokens=None,
max_sentences=None,
max_positions=None,
ignore_invalid_inputs=False,
required_batch_size_multiple=1,
seed=1,
num_shards=1,
shard_id=0,
num_workers=0,
epoch=1,
data_buffer_size=0,
disable_iterator_cache=False,
skip_remainder_batch=False,
grouped_shuffling=False,
update_epoch_batch_itr=False,
):
"""
Get an iterator that yields batches of data from the given dataset.
Args:
dataset (~fairseq.data.FairseqDataset): dataset to batch
max_tokens (int, optional): max number of tokens in each batch
(default: None).
max_sentences (int, optional): max number of sentences in each
batch (default: None).
max_positions (optional): max sentence length supported by the
model (default: None).
ignore_invalid_inputs (bool, optional): don't raise Exception for
sentences that are too long (default: False).
required_batch_size_multiple (int, optional): require batch size to
be a multiple of N (default: 1).
seed (int, optional): seed for random number generator for
reproducibility (default: 1).
num_shards (int, optional): shard the data iterator into N
shards (default: 1).
shard_id (int, optional): which shard of the data iterator to
return (default: 0).
num_workers (int, optional): how many subprocesses to use for data
loading. 0 means the data will be loaded in the main process
(default: 0).
epoch (int, optional): the epoch to start the iterator from
(default: 1).
data_buffer_size (int, optional): number of batches to
preload (default: 0).
disable_iterator_cache (bool, optional): don't cache the
EpochBatchIterator (ignores `FairseqTask::can_reuse_epoch_itr`)
(default: False).
skip_remainder_batch (bool, optional): if set, discard the last
batch in each training epoch, as the last batch is often smaller than
local_batch_size * distributed_word_size (default: ``True``).
grouped_shuffling (bool, optional): group batches with each groups
containing num_shards batches and shuffle groups. Reduces difference
between sequence lengths among workers for batches sorted by length.
update_epoch_batch_itr (bool optional): if true then donot use the cached
batch iterator for the epoch
Returns:
~fairseq.iterators.EpochBatchIterator: a batched iterator over the
given dataset split
"""
if self.fine_tuning or not isinstance(dataset, MultiCorpusDataset):
return super().get_batch_iterator(
dataset,
max_tokens=max_tokens,
max_sentences=max_sentences,
max_positions=max_positions,
ignore_invalid_inputs=ignore_invalid_inputs,
required_batch_size_multiple=required_batch_size_multiple,
seed=seed,
num_shards=num_shards,
shard_id=shard_id,
num_workers=num_workers,
epoch=epoch,
data_buffer_size=data_buffer_size,
disable_iterator_cache=disable_iterator_cache,
skip_remainder_batch=skip_remainder_batch,
grouped_shuffling=grouped_shuffling,
update_epoch_batch_itr=update_epoch_batch_itr,
)
can_reuse_epoch_itr = (
not disable_iterator_cache
and not update_epoch_batch_itr
and self.can_reuse_epoch_itr(dataset)
)
if can_reuse_epoch_itr and dataset in self.dataset_to_epoch_iter:
logger.debug("reusing EpochBatchIterator for epoch {}".format(epoch))
return self.dataset_to_epoch_iter[dataset]
assert isinstance(dataset, FairseqDataset)
# initialize the dataset with the correct starting epoch
dataset.set_epoch(epoch)
# get indices ordered by example size
with data_utils.numpy_seed(seed):
indices = dataset.ordered_indices()
# filter examples that are too large
if max_positions is not None:
indices = self.filter_indices_by_size(
indices, dataset, max_positions, ignore_invalid_inputs
)
# create mini-batches with given size constraints
batch_sampler = dataset.get_batch_sampler(
indices,
num_shards,
seed,
max_tokens=max_tokens,
max_sentences=max_sentences,
required_batch_size_multiple=required_batch_size_multiple,
split_modality_batch=self.cfg.split_modality_batch,
)
# return a reusable, sharded iterator
epoch_iter = iterators.EpochBatchIterator(
dataset=dataset,
collate_fn=dataset.collater,
batch_sampler=batch_sampler,
seed=seed,
num_shards=num_shards,
shard_id=shard_id,
num_workers=num_workers,
epoch=epoch,
buffer_size=data_buffer_size,
skip_remainder_batch=skip_remainder_batch,
disable_shuffling=True,
grouped_shuffling=grouped_shuffling,
)
if can_reuse_epoch_itr:
self.dataset_to_epoch_iter[dataset] = epoch_iter
return epoch_iter
@classmethod
def _get_size_ratios(cls, ids: List[str], sizes: List[int], alpha: float = 1.0):
"""Size ratios for temperature-based sampling
(https://arxiv.org/abs/1907.05019)"""
_sizes = np.array(sizes)
prob = _sizes / _sizes.sum()
smoothed_prob = prob ** alpha
smoothed_prob = smoothed_prob / smoothed_prob.sum()
size_ratio = (smoothed_prob * _sizes.sum()) / _sizes
o_str = str({_i: f"{prob[i]:.3f}" for i, _i in enumerate(ids)})
logger.info(f"original sampling probability: {o_str}")
p_str = str({_i: f"{smoothed_prob[i]:.3f}" for i, _i in enumerate(ids)})
logger.info(f"balanced sampling probability: {p_str}")
sr_str = str({_id: f"{size_ratio[i]:.3f}" for i, _id in enumerate(ids)})
logger.info(f"balanced sampling size ratio: {sr_str}")
return size_ratio.tolist()
def resample_multi_modality_dataset(self, speech_dataset, sup_dataset, mono_datasets, paired_datasets, mono_splits, paired_splits, epoch=1, train=True):
assert len(mono_datasets+paired_datasets) > 0, f"No text data loaded!"
if len(mono_datasets) > 1 and self.cfg.text_sampling_alpha != 1.0:
size_ratios = self._get_size_ratios(
mono_splits, [len(s) for s in mono_datasets], alpha=self.cfg.text_sampling_alpha
)
mono_datasets = [
ResamplingDataset(
d, size_ratio=r, seed=0, epoch=epoch, replace=(r >= 1.0)
) for d, r in zip(mono_datasets, size_ratios)
]
if len(paired_datasets) > 1 and self.cfg.text_sampling_alpha != 1.0:
size_ratios = self._get_size_ratios(
paired_splits, [len(s) for s in paired_datasets], alpha=self.cfg.text_sampling_alpha
)
paired_datasets = [
ResamplingDataset(
d, size_ratio=r, seed=0, epoch=epoch, replace=(r >= 1.0)
) for d, r in zip(paired_datasets, size_ratios)
]
dataset_list = [speech_dataset, sup_dataset]
for datasets in [mono_datasets, paired_datasets]:
if len(datasets) > 1:
dataset_list.append(ConcatDataset(datasets))
elif len(datasets) == 1:
dataset_list.append(datasets[0])
else:
dataset_list.append(None)
### match speech/text datasets according to modality
dataset_dict = OrderedDict((name, d) for name, d in zip(["speech", "speech_sup", "text_mono", "text_paired"], dataset_list) if d is not None)
max_positions_dict = {
"speech": None,
"speech_sup": None,
"text_mono": (self.cfg.text_cfg.tokens_per_sample, self.cfg.text_cfg.tokens_per_sample),
"text_paired": (self.cfg.text_cfg.tokens_per_sample, self.cfg.text_cfg.tokens_per_sample),
}
max_positions_dict = OrderedDict((name, max_positions_dict[name]) for name in dataset_dict.keys())
max_tokens_ratios_dict = {
"speech": 1.0,
"speech_sup": 1.0,
"text_mono": 1.0 / 320 / self.cfg.text_cfg.text_maxtokens_ratio,
"text_paired": 1.0 / 320 / self.cfg.text_cfg.text_maxtokens_ratio,
}
max_tokens_ratios = [max_tokens_ratios_dict[name] for name in dataset_dict.keys()]
dataset_lens = np.array([len(dataset) for dataset in dataset_dict.values()])
dataset_avg_sample_lens = np.array([
sum([dataset.num_tokens(i) for i in np.random.randint(low=0, high=len(dataset), size=10000)]) / 10000.0
for dataset in dataset_dict.values()
])
if not "speech" in dataset_dict:
distributions = [l / sum(dataset_lens) for l in dataset_lens]
else:
## we just keep the batches of speech and non-speech the same, expand_coef is to ensure speech batches is less than others
first_ratio = dataset_lens[0] / sum(dataset_lens)
expand_coef = 1.8 if sup_dataset is None else 1.1 * sum(dataset_lens[0:2]) / dataset_lens[0]
distributions = [expand_coef * max_tokens_ratios[i] * dataset_avg_sample_lens[0] / l for (i, l) in enumerate(dataset_avg_sample_lens)]
distributions[0] = 1.0
if sup_dataset is not None:
distributions[1] = dataset_lens[1] / dataset_lens[0]
distributions = [first_ratio * d for d in distributions]
logging.info(f"Number samples of datasets is {dataset_lens}")
logging.info(f"Avg sample length of datasets is {dataset_avg_sample_lens}")
logging.info(f"Sampling distributions is {distributions}")
logging.info(f"Maxtokens ratio is {max_tokens_ratios}")
return dataset_dict, max_positions_dict, distributions, max_tokens_ratios
def build_tokenizer(self, cfg=None):
logger.info(f"tokenizer: {self.cfg.hubert_tokenizer}")
if self.cfg.hubert_tokenizer != "none":
return encoders.build_bpe(Namespace(**{"bpe": self.cfg.hubert_tokenizer, "sentencepiece_model": self.cfg.sp_path}))
else:
return None
def load_char_bart_dataset(self, split):
mono_dataset = data_utils.load_indexed_dataset(
f"{self.cfg.text_cfg.text_data}/{split}",
self.text_dictionary,
)
mono_dataset = StripTokenDataset(mono_dataset, self.text_dictionary.eos())
mono_dataset = maybe_shorten_dataset(
mono_dataset,
split,
self.cfg.text_cfg.shorten_data_split_list,
self.cfg.text_cfg.shorten_method,
self.cfg.text_cfg.tokens_per_sample - 2,
self.cfg.text_cfg.seed,
)
logger.info("loaded {} samples from: {}".format(len(mono_dataset), mono_dataset))
### prepend bos and eos to dataset
mono_dataset = PrependTokenDataset(mono_dataset, self.text_dictionary.bos())
mono_dataset = AppendTokenDataset(mono_dataset, self.text_dictionary.eos())
mask_whole_words = (
get_whole_word_mask(None, self.text_dictionary)
if self.cfg.text_cfg.mask_whole_words
else None
)
lang=self.cfg.speech_tgt_lang
mono_dataset = DenoisingDataset(
mono_dataset,
mono_dataset.sizes,
self.text_dictionary,
self.mask_idx,
mask_whole_words,
shuffle=self.cfg.text_cfg.shuffle_instance,
seed=self.cfg.text_cfg.seed,
args=self.cfg.text_cfg,
tgt_lang_idx=_lang_token_index(self.text_dictionary, lang) if self.cfg.text_cfg.prepend_tgt_lang_tag else None,
)
return mono_dataset
| EXA-1-master | exa/models/unilm-master/speechlm/speechlm/tasks/joint_sc2t_pretrain.py |
# ----------------------------------------------------------------------------
# SpeechLM: Enhanced Speech Pre-Training with Unpaired Textual Data (https://arxiv.org/abs/2209.15329)
# Github source: https://github.com/microsoft/SpeechT5/tree/main/SpeechLM
# Code based on fairseq: https://github.com/facebookresearch/fairseq/tree/272c4c5197250997148fb12c0db6306035f166a4
#
# Copyright (c) 2022 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# ----------------------------------------------------------------------------
import torch
import numpy as np
import logging
from pathlib import Path
from argparse import Namespace
from fairseq.tasks import LegacyFairseqTask, register_task
from fairseq.data import Dictionary, encoders
from fairseq.data.audio.speech_to_text_joint_dataset import S2TJointDataConfig
from speechlm.unit_generator import NonAutoregressiveUnitGenerator
from speechlm.data.text_to_unit_dataset import Text2UnitDatasetCreator
logging.basicConfig(
format="%(asctime)s | %(levelname)s | %(name)s | %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=logging.INFO,
)
logger = logging.getLogger(__name__)
@register_task("fast_text_to_unit")
class FastTextToUnitTask(LegacyFairseqTask):
@staticmethod
def add_args(parser):
parser.add_argument("data", help="manifest root path")
parser.add_argument(
"--config-yaml",
type=str,
default="config.yaml",
help="Configuration YAML filename (under manifest root)",
)
parser.add_argument(
"--max-source-positions",
default=2048,
type=int,
metavar="N",
help="max number of tokens in the source sequence",
)
parser.add_argument(
"--max-target-positions",
default=1024,
type=int,
metavar="N",
help="max number of tokens in the target sequence",
)
parser.add_argument("--n-frames-per-step", type=int, default=1)
parser.add_argument("--eos-prob-threshold", type=float, default=0.5)
parser.add_argument("--eval-inference", action="store_true")
parser.add_argument("--eval-tb-nsample", type=int, default=8)
parser.add_argument("--vocoder", type=str, default="griffin_lim")
parser.add_argument("--spec-bwd-max-iter", type=int, default=8)
def __init__(self, args, src_dict, tgt_dict):
super().__init__(args)
self.src_dict = src_dict
self.tgt_dict = tgt_dict
self.data_cfg = S2TJointDataConfig(Path(args.data) / args.config_yaml)
self.speaker_to_id = self._get_speaker_to_id()
@classmethod
def setup_task(cls, args, **kwargs):
data_cfg = S2TJointDataConfig(Path(args.data) / args.config_yaml)
src_dict_path = Path(args.data) / data_cfg.src_vocab_filename
if not src_dict_path.is_file():
raise FileNotFoundError(f"Dict not found: {src_dict_path.as_posix()}")
src_dict = Dictionary.load(src_dict_path.as_posix())
logger.info(
f"Source dictionary size ({data_cfg.src_vocab_filename}): " f"{len(src_dict):,}"
)
tgt_dict_path = Path(args.data) / data_cfg.vocab_filename
if not tgt_dict_path.is_file():
raise FileNotFoundError(f"Dict not found: {tgt_dict_path.as_posix()}")
tgt_dict = Dictionary.load(tgt_dict_path.as_posix())
logger.info(
f"Target dictionary size ({data_cfg.vocab_filename}): " f"{len(tgt_dict):,}"
)
if getattr(args, "train_subset", None) is not None:
if not all(s.startswith("train") for s in args.train_subset.split(",")):
raise ValueError('Train splits should be named like "train*".')
return cls(args, src_dict, tgt_dict)
def load_dataset(self, split, epoch=1, combine=False, **kwargs):
is_train_split = split.startswith("train")
pre_tokenizer = self.build_tokenizer(self.args)
bpe_tokenizer = self.build_bpe(self.args)
self.datasets[split] = Text2UnitDatasetCreator.from_tsv(
self.args.data,
self.data_cfg,
split,
self.src_dict,
pre_tokenizer,
bpe_tokenizer,
is_train_split=is_train_split,
epoch=epoch,
seed=self.args.seed,
n_frames_per_step=self.args.n_frames_per_step,
speaker_to_id=self.speaker_to_id,
)
@property
def target_dictionary(self):
return self.tgt_dict
@property
def source_dictionary(self):
return self.src_dict
def max_positions(self):
return self.args.max_source_positions, self.args.max_target_positions
def _get_speaker_to_id(self):
speaker_to_id = None
speaker_set_filename = self.data_cfg.config.get("speaker_set_filename")
if speaker_set_filename is not None:
speaker_set_path = Path(self.args.data) / speaker_set_filename
with open(speaker_set_path) as f:
speaker_to_id = {r.strip(): i for i, r in enumerate(f)}
return speaker_to_id
@classmethod
def get_speaker_embeddings(cls, args):
# It Will be used in FastText2UnitModel model, insdead of nn.Embedding on speaker-id, we default to use x-vectors extracted ahead.
# This is for varying the speaker information when generating units from text.
if args.speaker_to_id is not None:
embed_speaker = torch.nn.Embedding(
len(args.speaker_to_id), args.speaker_embed_dim
)
elif args.speaker_embedding_type == "x-vector":
# return LayerNorm(args.speaker_embed_dim)
return lambda x: x.unsqueeze(1)
elif args.speaker_embedding_type == "i-vector":
# return LayerNorm(args.speaker_embed_dim)
return lambda x: x
else:
embed_speaker = None
return embed_speaker
def build_model(self, cfg):
cfg.pitch_min = self.data_cfg.config["features"].get("pitch_min", None)
cfg.pitch_max = self.data_cfg.config["features"].get("pitch_max", None)
cfg.energy_min = self.data_cfg.config["features"].get("energy_min", None)
cfg.energy_max = self.data_cfg.config["features"].get("energy_max", None)
cfg.speaker_to_id = self.speaker_to_id
cfg.speaker_embedding_type = self.data_cfg.config.get("speaker_embedding_type", None)
model = super().build_model(cfg)
self.generator = None
if getattr(cfg, "eval_inference", False):
self.generator = self.build_generator([model], cfg)
return model
def build_generator(self, models, cfg, vocoder=None, **unused):
model = models[0]
assert getattr(model, "NON_AUTOREGRESSIVE") is True
return NonAutoregressiveUnitGenerator(model, vocoder, self.data_cfg)
def build_tokenizer(self, args):
logger.info(f"pre-tokenizer: {self.data_cfg.pre_tokenizer}")
return encoders.build_tokenizer(Namespace(**self.data_cfg.pre_tokenizer))
def build_bpe(self, args):
logger.info(f"tokenizer: {self.data_cfg.bpe_tokenizer}")
return encoders.build_bpe(Namespace(**self.data_cfg.bpe_tokenizer))
| EXA-1-master | exa/models/unilm-master/speechlm/speechlm/tasks/fast_text_to_unit.py |
# ----------------------------------------------------------------------------
# SpeechLM: Enhanced Speech Pre-Training with Unpaired Textual Data (https://arxiv.org/abs/2209.15329)
# Github source: https://github.com/microsoft/SpeechT5/tree/main/SpeechLM
# Code based on fairseq: https://github.com/facebookresearch/fairseq/tree/272c4c5197250997148fb12c0db6306035f166a4
#
# Copyright (c) 2022 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# ----------------------------------------------------------------------------
import logging
from dataclasses import dataclass, field
from typing import Dict, List, Optional, Tuple
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from fairseq import utils, checkpoint_utils
from fairseq.data.data_utils import compute_mask_indices
from fairseq.data.dictionary import Dictionary
from fairseq.dataclass import ChoiceEnum
from fairseq.models import BaseFairseqModel, register_model
from fairseq.models.transformer import Embedding
from fairseq.file_io import PathManager
from torch import Tensor
from fairseq.models.wav2vec.wav2vec2 import ConvFeatureExtractionModel
from fairseq.modules import GradMultiply, LayerNorm
from fairseq.tasks.hubert_pretraining import (
HubertPretrainingConfig,
HubertPretrainingTask,
)
from fairseq.models.hubert import HubertConfig
from fairseq.models.transformer import TransformerConfig
from speechlm.modules.w2v_encoder import TransformerEncoder
from speechlm.modules.transformer_encoder import TransformerEncoderBase
logger = logging.getLogger(__name__)
EXTRACTOR_MODE_CHOICES = ChoiceEnum(["default", "layer_norm"])
MASKING_DISTRIBUTION_CHOICES = ChoiceEnum(["static", "uniform", "normal", "poisson"])
@dataclass
class SpeechlmConfig(HubertConfig):
use_rel_pos_enc: bool = field(
default=False,
metadata={"help": "whether to use relative positional encoding"},
)
scaling_for_att: float = field(
default=1.0,
metadata={"help": "scaling for attention weights to prevent overflow issue (for large model)"},
)
# unit encoder-decoder
text_transformer: TransformerConfig = TransformerConfig()
add_unit_encoder: bool = field(
default=False,
metadata={"help": "add unit encoder"},
)
add_decoder: bool = field(
default=False,
metadata={"help": "add decoder"},
)
add_text_ctc: bool = field(
default=False,
metadata={"help": "add_text_ctc head"},
)
text_ctc_conv_kernel: int = field(
default=2,
metadata={"help": "text_ctc_conv kernel size"},
)
mask_u2t: bool = field(
default=True,
metadata={"help": "mask the unit input in unit-to-text task"},
)
compute_mum: bool = field(
default=False,
metadata={"help": "compute MLM loss in unit-to-text task"},
)
# embedding mixing
mix_with_unit: bool = field(
default=True,
metadata={"help": "mix with the unit embeddings"},
)
use_pred_unit: bool = field(
default=False,
metadata={"help": "use the embeddings of predicted units"},
)
l2_embedding: bool = field(
default=False,
metadata={"help": "compute l2 loss between unit embedding and unit hidden state"},
)
# Finetune related
encoder_dict_size: int = field(
default=-1,
metadata={"help": "text encoder dictionary dimension"},
)
decoder_dict_size: int = field(
default=-1,
metadata={"help": "decoder dictionary dimension"},
)
@register_model("speechlm", dataclass=SpeechlmConfig)
class SpeechlmModel(BaseFairseqModel):
def __init__(
self,
cfg: SpeechlmConfig,
task_cfg: HubertPretrainingConfig,
dictionaries: List[Dictionary],
unit_dictionary: Dictionary = None,
text_tgt_dictionary: Dictionary = None,
) -> None:
super().__init__()
logger.info(f"SpeechlmModel Config: {cfg}")
feature_enc_layers = eval(cfg.conv_feature_layers) # noqa
self.embed = feature_enc_layers[-1][0]
self.feature_extractor = ConvFeatureExtractionModel(
conv_layers=feature_enc_layers,
dropout=0.0,
mode=cfg.extractor_mode,
conv_bias=cfg.conv_bias,
)
feature_ds_rate = np.prod([s for _, _, s in feature_enc_layers])
self.feat2tar_ratio = cfg.label_rate * feature_ds_rate / task_cfg.sample_rate
self.post_extract_proj = (
nn.Linear(self.embed, cfg.encoder_embed_dim)
if self.embed != cfg.encoder_embed_dim
else None
)
self.mask_prob = cfg.mask_prob
self.mask_selection = cfg.mask_selection
self.mask_other = cfg.mask_other
self.mask_length = cfg.mask_length
self.no_mask_overlap = cfg.no_mask_overlap
self.mask_min_space = cfg.mask_min_space
self.mask_channel_prob = cfg.mask_channel_prob
self.mask_channel_selection = cfg.mask_channel_selection
self.mask_channel_other = cfg.mask_channel_other
self.mask_channel_length = cfg.mask_channel_length
self.no_mask_channel_overlap = cfg.no_mask_channel_overlap
self.mask_channel_min_space = cfg.mask_channel_min_space
self.dropout_input = nn.Dropout(cfg.dropout_input)
self.dropout_features = nn.Dropout(cfg.dropout_features)
self.feature_grad_mult = cfg.feature_grad_mult
self.logit_temp = cfg.logit_temp
self.skip_masked = cfg.skip_masked
self.skip_nomask = cfg.skip_nomask
final_dim = cfg.final_dim if cfg.final_dim > 0 else cfg.encoder_embed_dim
self.mask_emb = nn.Parameter(
torch.FloatTensor(cfg.encoder_embed_dim).uniform_()
)
self.encoder = TransformerEncoder(cfg)
self.layer_norm = LayerNorm(self.embed)
self.target_glu = None
if cfg.target_glu:
self.target_glu = nn.Sequential(
nn.Linear(final_dim, final_dim * 2), nn.GLU()
)
self.final_dim = final_dim
assert len(dictionaries) <= 2, f"Only support <=2 kinds of targets, get {len(dictionaries)} dictionaries"
if len(dictionaries) == 1:
dictionaries = [dictionaries[0], dictionaries[0]]
self.final_proj_list = nn.ModuleList([
nn.Linear(cfg.encoder_embed_dim, final_dim) for _ in dictionaries
])
self.num_classes = [len(d) for d in dictionaries]
self.label_embs_list = nn.ParameterList([
nn.Parameter(torch.FloatTensor(n, final_dim)) for n in self.num_classes
])
for i in range(len(self.num_classes)):
nn.init.uniform_(self.label_embs_list[i])
### build unit encoder:
self.mask_u2t = cfg.mask_u2t
self.compute_mum = cfg.compute_mum
self.add_text_ctc = cfg.add_text_ctc
self.text_ctc_conv_kernel = cfg.text_ctc_conv_kernel
self.padding_idx = unit_dictionary.pad()
self.unit_mask_idx = unit_dictionary.index("<mask>")
self.add_unit_encoder = cfg.add_unit_encoder
self.mix_with_unit = cfg.mix_with_unit
self.use_pred_unit = cfg.use_pred_unit
self.l2_embedding = cfg.l2_embedding
if self.add_unit_encoder:
assert len(unit_dictionary) == self.num_classes[0], f"unit_dictionary: {len(unit_dictionary)}, self.num_classes[0]: {self.num_classes[0]}"
### build unit pre-net, and shared with hubert label_embs if needed (default: False)
self.unit_embed_tokens = self.build_embedding(
unit_dictionary,
cfg.text_transformer.encoder.embed_dim,
)
if self.final_dim == cfg.text_transformer.encoder.embed_dim:
logger.info("Share label_embs[0] with unit_embed_tokens ...")
nn.init.uniform_(self.unit_embed_tokens.weight)
self.label_embs_list[0] = self.unit_embed_tokens.weight
### build unit encoder
self.unit_encoder = TransformerEncoderBase(
cfg.text_transformer,
unit_dictionary,
self.unit_embed_tokens,
use_rel_pos_enc=cfg.use_rel_pos_enc,
scaling_for_att=cfg.scaling_for_att,
)
### build text ctc head
if self.add_text_ctc:
conv = nn.Conv1d(
cfg.text_transformer.encoder.embed_dim, cfg.text_transformer.encoder.embed_dim,
self.text_ctc_conv_kernel,
stride=self.text_ctc_conv_kernel // 2,
bias=False,
padding=self.text_ctc_conv_kernel // 2,
)
nn.init.kaiming_normal_(conv.weight)
self.unit_encoder_ctc_head = nn.Sequential(
Rotate3D(),
conv,
nn.Dropout(p=0.1),
nn.Sequential(
Rotate3D(),
Rotate3D(),
LayerNorm(cfg.text_transformer.encoder.embed_dim),
),
nn.GELU(),
nn.Linear(cfg.text_transformer.encoder.embed_dim, len(text_tgt_dictionary)),
)
### build unit2text decoder, not available for now
self.add_decoder = cfg.add_decoder
def build_embedding(self, dictionary, embed_dim):
num_embeddings = len(dictionary)
padding_idx = dictionary.pad()
return Embedding(num_embeddings, embed_dim, padding_idx)
def upgrade_state_dict_named(self, state_dict, name):
"""Upgrade a (possibly old) state dict for new versions of fairseq."""
super().upgrade_state_dict_named(state_dict, name)
return state_dict
@classmethod
def build_model(cls, cfg: SpeechlmConfig, task: HubertPretrainingTask):
"""Build a new model instance."""
unit_dictionary = getattr(task, "text_src_dictionary", None)
text_tgt_dictionary = getattr(task, "text_dictionary", None)
model = SpeechlmModel(cfg, task.cfg, task.dictionaries, unit_dictionary, text_tgt_dictionary)
return model
def apply_mask(self, x, padding_mask, target_list):
B, T, C = x.shape
if self.mask_prob > 0:
mask_indices = compute_mask_indices(
(B, T),
padding_mask,
self.mask_prob,
self.mask_length,
self.mask_selection,
self.mask_other,
min_masks=2,
no_overlap=self.no_mask_overlap,
min_space=self.mask_min_space,
)
mask_indices = torch.from_numpy(mask_indices).to(x.device)
x[mask_indices] = self.mask_emb
else:
mask_indices = None
if self.mask_channel_prob > 0:
mask_channel_indices = compute_mask_indices(
(B, C),
None,
self.mask_channel_prob,
self.mask_channel_length,
self.mask_channel_selection,
self.mask_channel_other,
no_overlap=self.no_mask_channel_overlap,
min_space=self.mask_channel_min_space,
)
mask_channel_indices = (
torch.from_numpy(mask_channel_indices)
.to(x.device)
.unsqueeze(1)
.expand(-1, T, -1)
)
x[mask_channel_indices] = 0
return x, mask_indices
def forward_features(self, source: torch.Tensor) -> torch.Tensor:
if self.feature_grad_mult > 0:
features = self.feature_extractor(source)
if self.feature_grad_mult != 1.0:
features = GradMultiply.apply(features, self.feature_grad_mult)
else:
with torch.no_grad():
features = self.feature_extractor(source)
return features
def forward_targets(
self,
features: torch.Tensor,
target_list: List[torch.Tensor],
) -> Tuple[torch.Tensor, torch.Tensor]:
# Trim features to ensure labels exist and then get aligned labels
feat_tsz = features.size(2)
targ_tsz = min([t.size(1) for t in target_list])
if self.feat2tar_ratio * feat_tsz > targ_tsz:
feat_tsz = int(targ_tsz / self.feat2tar_ratio)
features = features[..., :feat_tsz]
target_inds = torch.arange(feat_tsz).float() * self.feat2tar_ratio
target_inds += np.random.choice(int(self.feat2tar_ratio))
target_list = [t[:, target_inds.long()] for t in target_list]
return features, target_list
def forward_padding_mask(
self,
features: torch.Tensor,
padding_mask: torch.Tensor,
) -> torch.Tensor:
extra = padding_mask.size(1) % features.size(1)
if extra > 0:
padding_mask = padding_mask[:, :-extra]
padding_mask = padding_mask.view(padding_mask.size(0), features.size(1), -1)
padding_mask = padding_mask.all(-1)
return padding_mask
def get_normalized_probs(
self,
net_output: Tuple[Tensor, Optional[Dict[str, List[Optional[Tensor]]]]],
log_probs: bool,
sample: Optional[Dict[str, Tensor]] = None,
):
lprobs = self.get_normalized_probs_scriptable(net_output, log_probs, sample)
lprobs.batch_first = True
return lprobs
def downsample_ctc_padding_mask(self, padding_mask):
"""
padding_mask: (B, T)
"""
stride = self.text_ctc_conv_kernel // 2
return padding_mask[:, ::stride]
def compute_pred(self, proj_x, label_embs):
if self.target_glu:
label_embs = self.target_glu(label_embs)
x = F.normalize(proj_x.float(), dim=-1) # (S, D)
label_embs = F.normalize(label_embs.float(), dim=-1) # (C, D)
logits = torch.matmul(x, label_embs.T).type_as(proj_x) # (S, C)
logits /= self.logit_temp
return logits
def compute_hubert_logits(self, x, target, proj, label_embs, padding_mask, mask_indices):
if not self.skip_masked:
masked_indices = torch.logical_and(~padding_mask, mask_indices)
proj_x_m = proj(x[masked_indices])
logit_m_list = [(self.compute_pred(proj_x_m, label_embs), target[masked_indices])]
else:
logit_m_list = [None]
if not self.skip_nomask:
nomask_indices = torch.logical_and(~padding_mask, ~mask_indices)
proj_x_u = proj(x[nomask_indices])
logit_u_list = [(self.compute_pred(proj_x_u, label_embs), target[nomask_indices])]
else:
logit_u_list = [None]
return logit_m_list, logit_u_list
def convert_embeddings(self,
x,
padding_mask,
target=None,
mask_indices=None,
mix_with_unit=False,
use_pred_unit=False,
l2_embedding=False,
remask=False
):
"""
1. Mix with units if needed (default: True)
2. Prepare for unit_encoder inputs
Inputs:
x, (B, T, D)
Return:
src_tokens, (B, T)
soft_embeddings, (B, T, D)
l2_loss, a loss
"""
soft_embeddings = self.final_proj_list[0](x) if x.size(-1) == self.final_dim else x
if padding_mask is None:
padding_mask = soft_embeddings.new_zeros(soft_embeddings.size(0), soft_embeddings.size(1), dtype=torch.long)
if use_pred_unit:
src_tokens = self.compute_pred(self.final_proj_list[0](x), self.label_embs_list[0]).argmax(dim=-1)
src_tokens[padding_mask] = self.padding_idx
elif target is not None:
src_tokens = target
else:
src_tokens = padding_mask.long()
if l2_embedding | mix_with_unit:
unit_embeddings = self.unit_embed_tokens(src_tokens) # (B, T, D)
l2_loss = 0
if l2_embedding:
if mask_indices is not None:
l2_loss = (soft_embeddings - unit_embeddings)[mask_indices].float().pow(2).mean(dim=-1)
scale = unit_embeddings[mask_indices].float().pow(2).sum(dim=-1)
else:
l2_loss = (soft_embeddings - unit_embeddings).float().pow(2).mean(dim=-1)
scale = unit_embeddings.float().pow(2).sum(dim=-1)
l2_loss = (l2_loss / scale).mean()
if mix_with_unit:
B, T, D = x.shape
selected_indices = compute_mask_indices(
(B, T),
padding_mask,
self.mask_prob / 2,
self.mask_length // 2,
self.mask_selection,
self.mask_other,
min_masks=2,
no_overlap=self.no_mask_overlap,
min_space=self.mask_min_space,
)
selected_indices = torch.from_numpy(selected_indices).to(x.device)
if mask_indices is not None:
if remask:
remask_indices = torch.logical_and(selected_indices, mask_indices)
soft_embeddings[remask_indices] = self.mask_emb
swap_indices = torch.logical_and(selected_indices, ~mask_indices)
else:
swap_indices = selected_indices
soft_embeddings[swap_indices] = unit_embeddings[swap_indices]
soft_embeddings = soft_embeddings * (1 - padding_mask.unsqueeze(-1).type_as(x))
return src_tokens, soft_embeddings, l2_loss
def forward(
self,
source: torch.Tensor = None,
src_tokens: torch.Tensor = None,
src_lengths: torch.Tensor = None,
target_list: Optional[List[torch.Tensor]] = None,
padding_mask: Optional[torch.Tensor] = None,
mask: bool = True,
features_only: bool = False,
output_layer: Optional[int] = None,
) -> Dict[str, torch.Tensor]:
assert source is not None or src_tokens is not None
if source is not None:
return self.forward_speech(
source=source,
target_list=target_list,
padding_mask=padding_mask,
mask=mask,
features_only=features_only,
output_layer=output_layer,
)
else:
return self.forward_text(
src_tokens=src_tokens,
src_lengths=src_lengths,
mask=self.mask_u2t,
output_layer=output_layer,
)
def forward_speech(
self,
source: torch.Tensor = None,
target_list: Optional[List[torch.Tensor]] = None,
padding_mask: Optional[torch.Tensor] = None,
mask: bool = True,
features_only: bool = False,
output_layer: Optional[int] = None,
) -> Dict[str, torch.Tensor]:
"""output layer is 1-based"""
features = self.forward_features(source)
if target_list is not None:
features, target_list = self.forward_targets(features, target_list)
features_pen = features.float().pow(2).mean()
features = features.transpose(1, 2)
features = self.layer_norm(features)
unmasked_features = features.clone()
if padding_mask is not None:
padding_mask = self.forward_padding_mask(features, padding_mask)
if self.post_extract_proj is not None:
features = self.post_extract_proj(features)
features = self.dropout_input(features)
unmasked_features = self.dropout_features(unmasked_features)
if mask:
x, mask_indices = self.apply_mask(features, padding_mask, target_list)
else:
x = features
mask_indices = None
# feature: (B, T, D), float
# target: (B, T), long
# x: (B, T, D), float
# padding_mask: (B, T), bool
# mask_indices: (B, T), bool
x, _ = self.encoder(
x,
padding_mask=padding_mask,
layer=None if output_layer is None else output_layer - 1,
)
if features_only:
return {"x": x, "padding_mask": padding_mask, "features": features}
logit_m_list, logit_u_list = self.compute_hubert_logits(
x,
target_list[0],
self.final_proj_list[0],
self.label_embs_list[0],
padding_mask,
mask_indices,
)
result = {
"logit_m_list": logit_m_list,
"logit_u_list": logit_u_list,
"padding_mask": padding_mask,
"features_pen": features_pen,
}
if self.add_unit_encoder:
src_tokens, x_emb, l2_loss = self.convert_embeddings(
x,
padding_mask, target_list[0],
mask_indices=mask_indices,
mix_with_unit=self.mix_with_unit,
use_pred_unit=self.use_pred_unit,
l2_embedding=self.l2_embedding,
)
encoder_out = self.unit_encoder(src_tokens, token_embeddings=x_emb)
result['encoder_out'] = encoder_out['encoder_out'] # [(T, B, D)]
result['encoder_padding_mask'] = encoder_out['encoder_padding_mask'] # [(B, T)]
if self.l2_embedding:
result['embedding_l2_loss'] = l2_loss
code_logit_m_list, code_logit_u_list = self.compute_hubert_logits(
encoder_out['encoder_out'][0].transpose(0, 1),
target_list[-1],
self.final_proj_list[-1],
self.label_embs_list[-1],
padding_mask,
mask_indices,
)
result['logit_m_list'] += code_logit_m_list
result['logit_u_list'] += code_logit_u_list
return result
def forward_text(
self,
src_tokens: torch.Tensor = None,
src_lengths: torch.Tensor = None,
target_list: Optional[List[torch.Tensor]] = None,
mask: bool = True,
output_layer: Optional[int] = None,
) -> Dict[str, torch.Tensor]:
assert self.add_unit_encoder, f"Can not forward unit-text branch without unit_encoder!"
padding_mask = src_tokens == self.padding_idx
unit_embeddings = self.unit_embed_tokens(src_tokens)
if mask:
unit_embeddings, mask_indices = self.apply_mask(unit_embeddings, padding_mask, [src_tokens])
else:
### If already applied mask on src_tokens, then the target_list should contains many padding_idx
mask_indices = target_list[-1] != self.padding_idx
unit_embeddings[mask_indices] = self.mask_emb
encoder_out = self.unit_encoder(
src_tokens,
token_embeddings=unit_embeddings,
return_all_hiddens=output_layer is not None,
)
result = {}
result["encoder_out"] = encoder_out["encoder_out"]
result["encoder_states"] = encoder_out["encoder_states"]
result["padding_mask"] = padding_mask
if self.compute_mum:
code_logit_m_list, code_logit_u_list = self.compute_hubert_logits(
encoder_out["encoder_out"].transpose(0, 1),
target_list[-1],
self.final_proj_list[-1],
self.label_embs_list[-1],
padding_mask,
mask_indices,
)
result["logit_m_list"] = code_logit_m_list
result["logit_u_list"] = code_logit_u_list
if self.add_text_ctc:
result["encoder_out_ctc"] = [self.unit_encoder_ctc_head(x) for x in encoder_out['encoder_out']]
result["encoder_padding_mask"] = [
self.downsample_ctc_padding_mask(padding_mask) for padding_mask in encoder_out['encoder_padding_mask']
]
return result
def extract_features(
self,
source: torch.Tensor,
padding_mask: Optional[torch.Tensor] = None,
mask: bool = False,
ret_conv: bool = False,
output_layer: Optional[int] = None,
**kwargs,
) -> Tuple[torch.Tensor, torch.Tensor]:
"""Extract features for only speech input"""
res = self.forward(
source,
padding_mask=padding_mask,
mask=mask,
features_only=True,
output_layer=output_layer,
)
x = res["x"] # B x T x D
padding_mask = res["padding_mask"]
if self.add_unit_encoder:
src_tokens, x, _ = self.convert_embeddings(
x,
padding_mask,
mix_with_unit=False,
use_pred_unit=False,
)
encoder_out = self.unit_encoder(
src_tokens,
token_embeddings=x,
return_all_hiddens=output_layer is not None
)
res["x"] = encoder_out['encoder_out'][0].transpose(0, 1) # (B, T, D)
feature = res["features"] if ret_conv else res["x"]
if output_layer is not None:
feature = encoder_out['encoder_states']
return feature, padding_mask
def get_logits(self, net_output, is_masked=True):
if is_masked:
logits_list = net_output["logit_m_list"]
else:
logits_list = net_output["logit_u_list"]
logits_list = [x[0].float() for x in logits_list if x is not None]
return logits_list
def get_targets(self, net_output, is_masked=True):
if is_masked:
logits_list = net_output["logit_m_list"]
else:
logits_list = net_output["logit_u_list"]
targets_list = [x[1].long() for x in logits_list if x is not None]
return targets_list
def get_extra_losses(self, net_output):
extra_losses = []
names = []
if "features_pen" in net_output:
extra_losses.append(net_output["features_pen"])
names.append("features_pen")
if "embedding_l2_loss" in net_output:
extra_losses.append(net_output["embedding_l2_loss"])
names.append("embedding_l2_loss")
return extra_losses, names
def remove_pretraining_modules(self, step2=False):
self.target_glu = None
def load_checkpoint(self, checkpoint: str):
if not PathManager.exists(checkpoint):
raise IOError("Model file not found: {}".format(checkpoint))
state = checkpoint_utils.load_checkpoint_to_cpu(checkpoint)
return state
class Rotate3D(nn.Module):
"""
(T, B, D) --> (B, D, T) --> (D, T, B) --> (T, B, D)
"""
def __init__(self):
super().__init__()
def forward(self, x):
return x.permute(1, 2, 0)
| EXA-1-master | exa/models/unilm-master/speechlm/speechlm/models/speechlm.py |
EXA-1-master | exa/models/unilm-master/speechlm/speechlm/models/__init__.py |
|
# ----------------------------------------------------------------------------
# SpeechLM: Enhanced Speech Pre-Training with Unpaired Textual Data (https://arxiv.org/abs/2209.15329)
# Github source: https://github.com/microsoft/SpeechT5/tree/main/SpeechLM
# Code based on fairseq: https://github.com/facebookresearch/fairseq/tree/272c4c5197250997148fb12c0db6306035f166a4
#
# Copyright (c) 2022 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# ----------------------------------------------------------------------------
import contextlib
import torch
import torch.nn as nn
from argparse import Namespace
from dataclasses import dataclass, field
from typing import Any
from fairseq import checkpoint_utils, tasks, utils
from fairseq.models import FairseqEncoderDecoderModel, register_model
from fairseq.models.fairseq_decoder import FairseqDecoder
from fairseq.models.fairseq_encoder import FairseqEncoder
from fairseq.tasks import FairseqTask
from fairseq.dataclass import ChoiceEnum
from fairseq.dataclass.utils import convert_namespace_to_omegaconf
from fairseq.data.data_utils import lengths_to_padding_mask
from fairseq.models.hubert import HubertAsrConfig
from speechlm.modules.transformer_decoder import TransformerDecoderScriptable
@dataclass
class SpeechLMS2TConfig(HubertAsrConfig):
activation_fn: ChoiceEnum(utils.get_available_activation_fns()) = field(
default="gelu", metadata={"help": "activation function to use"}
)
use_rel_pos_enc: bool = field(
default=True,
metadata={"help": "whether to use relative positional encoding for decoder"},
)
encoder_embed_dim: int = field(
default=768, metadata={"help": "encoder embedding dimension, used for enc-dec att"}
)
decoder_embed_dim: int = field(
default=768, metadata={"help": "decoder embedding dimension"}
)
decoder_output_dim: int = field(
default=768, metadata={"help": "decoder output dimension"}
)
decoder_ffn_embed_dim: int = field(
default=3072, metadata={"help": "decoder embedding dimension for FFN"}
)
decoder_layers: int = field(default=6, metadata={"help": "num of decoder layers"})
decoder_layerdrop: float = field(
default=0.0, metadata={"help": "decoder layerdrop chance"}
)
decoder_attention_heads: int = field(
default=12, metadata={"help": "num decoder attention heads"}
)
decoder_learned_pos: bool = field(
default=False,
metadata={"help": "use learned positional embeddings in the decoder"},
)
decoder_normalize_before: bool = field(
default=False, metadata={"help": "apply layernorm before each decoder block"}
)
no_token_positional_embeddings: bool = field(
default=False,
metadata={
"help": "if set, disables positional embeddings (outside self attention)"
},
)
decoder_dropout: float = field(
default=0.0, metadata={"help": "dropout probability in the decoder"}
)
decoder_attention_dropout: float = field(
default=0.0,
metadata={
"help": "dropout probability for attention weights inside the decoder"
},
)
decoder_activation_dropout: float = field(
default=0.0,
metadata={
"help": "dropout probability after activation in FFN inside the decoder"
},
)
share_decoder_input_output_embed: bool = field(
default=False, metadata={"help": "share decoder input and output embeddings"}
)
### the following config is only for the compatibility to fairseq speech_to_text task
input_feat_per_channel: Any = None
input_channels: Any = None
speaker_to_id: Any = None
@register_model("speechlm_st_legacy", dataclass=SpeechLMS2TConfig)
class SpeechLMS2T(FairseqEncoderDecoderModel):
def __init__(self, cfg: SpeechLMS2TConfig, encoder: FairseqEncoder, decoder: FairseqDecoder):
super().__init__(encoder, decoder)
self.cfg = cfg
def upgrade_state_dict_named(self, state_dict, name):
super().upgrade_state_dict_named(state_dict, name)
return state_dict
@classmethod
def build_model(cls, cfg: SpeechLMS2TConfig, task: FairseqTask):
"""Build a new model instance."""
def build_embedding(dictionary, embed_dim):
num_embeddings = len(dictionary)
padding_idx = dictionary.pad()
return Embedding(num_embeddings, embed_dim, padding_idx)
src_dict, tgt_dict = task.source_dictionary, task.target_dictionary
encoder = SpeechLMEncoder(cfg, task)
assert cfg.encoder_embed_dim == encoder.w2v_model.encoder.embedding_dim
decoder_embed_tokens = build_embedding(tgt_dict, cfg.decoder_embed_dim)
decoder = TransformerDecoderScriptable(cfg, tgt_dict, decoder_embed_tokens)
return cls(cfg, encoder, decoder)
class SpeechLMEncoder(FairseqEncoder):
"""
Modified from fairseq.models.hubert.hubert_asr.HubertEncoder
1. make it compatible with fairseq speech_to_text task
2. make it compatible with encoder-decoder model
"""
def __init__(self, cfg: HubertAsrConfig, task):
self.apply_mask = cfg.apply_mask
arg_overrides = {
"dropout": cfg.dropout,
"activation_dropout": cfg.activation_dropout,
"dropout_input": cfg.dropout_input,
"attention_dropout": cfg.attention_dropout,
"mask_length": cfg.mask_length,
"mask_prob": cfg.mask_prob,
"mask_selection": cfg.mask_selection,
"mask_other": cfg.mask_other,
"no_mask_overlap": cfg.no_mask_overlap,
"mask_channel_length": cfg.mask_channel_length,
"mask_channel_prob": cfg.mask_channel_prob,
"mask_channel_selection": cfg.mask_channel_selection,
"mask_channel_other": cfg.mask_channel_other,
"no_mask_channel_overlap": cfg.no_mask_channel_overlap,
"encoder_layerdrop": cfg.layerdrop,
"feature_grad_mult": cfg.feature_grad_mult,
}
if cfg.w2v_args is None:
state = checkpoint_utils.load_checkpoint_to_cpu(cfg.w2v_path, arg_overrides)
w2v_args = state.get("cfg", None)
if w2v_args is None:
w2v_args = convert_namespace_to_omegaconf(state["args"])
cfg.w2v_args = w2v_args
else:
state = None
w2v_args = cfg.w2v_args
if isinstance(w2v_args, Namespace):
cfg.w2v_args = w2v_args = convert_namespace_to_omegaconf(w2v_args)
assert task.data_cfg.standardize_audio() == w2v_args.task.normalize, (
"Fine-tuning works best when data normalization is the same. "
"Please check that --normalize is set or unset for "
"both pre-training and here"
)
w2v_args.task.data = cfg.data
pretrain_task = tasks.setup_task(w2v_args.task)
if state is not None and "task_state" in state:
# This will load the stored "dictionaries" object
pretrain_task.load_state_dict(state["task_state"])
else:
pretrain_task.load_state_dict(task.state_dict())
model = pretrain_task.build_model(w2v_args.model, from_checkpoint=True)
if state is not None and not cfg.no_pretrained_weights:
# set strict=False because we omit some modules
model.load_state_dict(state["model"], strict=False)
model.remove_pretraining_modules()
super().__init__(pretrain_task.source_dictionary)
d = w2v_args.model.encoder_embed_dim
self.w2v_model = model
self.final_dropout = nn.Dropout(cfg.final_dropout)
self.freeze_finetune_updates = cfg.freeze_finetune_updates
self.num_updates = 0
def set_num_updates(self, num_updates):
"""Set the number of parameters updates."""
super().set_num_updates(num_updates)
self.num_updates = num_updates
def forward(self, src_tokens=None, src_lengths=None, **kwargs):
w2v_args = {
"source": src_tokens,
"padding_mask": lengths_to_padding_mask(src_lengths),
"mask": self.apply_mask and self.training,
}
ft = self.freeze_finetune_updates <= self.num_updates
with torch.no_grad() if not ft else contextlib.ExitStack():
x, padding_mask = self.w2v_model.extract_features(**w2v_args)
# B x T x C -> T x B x C
x = x.transpose(0, 1)
x = self.final_dropout(x)
return {
"encoder_out": [x], # T x B x C
"encoder_padding_mask": [padding_mask], # B x T
"padding_mask": [padding_mask],
}
def forward_torchscript(self, net_input):
"""A TorchScript-compatible version of forward.
Encoders which use additional arguments may want to override
this method for TorchScript compatibility.
"""
_net_input = {
"source": net_input["src_tokens"],
"padding_mask": lengths_to_padding_mask(net_input["src_lengths"]),
"mask": False,
}
x, padding_mask = self.w2v_model.extract_features(**_net_input)
# B x T x C -> T x B x C
x = x.transpose(0, 1)
encoder_out = {
"encoder_out" : [x],
"encoder_padding_mask" : [padding_mask],
}
return encoder_out
def reorder_encoder_out(self, encoder_out, new_order):
if encoder_out["encoder_out"] is not None:
encoder_out["encoder_out"] = [
x.index_select(1, new_order) for x in encoder_out["encoder_out"]
]
if encoder_out["encoder_padding_mask"] is not None:
encoder_out["encoder_padding_mask"] = [
x.index_select(0, new_order) for x in encoder_out["encoder_padding_mask"]
]
return encoder_out
def max_positions(self):
"""Maximum input length supported by the encoder."""
return None
def upgrade_state_dict_named(self, state_dict, name):
return state_dict
def Embedding(num_embeddings, embedding_dim, padding_idx):
m = nn.Embedding(num_embeddings, embedding_dim, padding_idx=padding_idx)
nn.init.normal_(m.weight, mean=0, std=embedding_dim**-0.5)
nn.init.constant_(m.weight[padding_idx], 0)
return m
def Linear(in_features, out_features, bias=True):
m = nn.Linear(in_features, out_features, bias)
nn.init.xavier_uniform_(m.weight)
if bias:
nn.init.constant_(m.bias, 0.0)
return m
| EXA-1-master | exa/models/unilm-master/speechlm/speechlm/models/speechlm_st.py |
# ----------------------------------------------------------------------------
# SpeechLM: Enhanced Speech Pre-Training with Unpaired Textual Data (https://arxiv.org/abs/2209.15329)
# Github source: https://github.com/microsoft/SpeechT5/tree/main/SpeechLM
# Code based on fairseq: https://github.com/facebookresearch/fairseq/tree/272c4c5197250997148fb12c0db6306035f166a4
#
# Copyright (c) 2022 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# ----------------------------------------------------------------------------
import logging
import torch
from fairseq import utils
from fairseq.models import (
FairseqEncoderModel,
register_model,
register_model_architecture,
)
from fairseq.models.text_to_speech import fastspeech2
logger = logging.getLogger(__name__)
class VarianceAdaptor(fastspeech2.VarianceAdaptor):
def __init__(self, args):
super().__init__(args)
self.use_pitch = args.use_pitch
self.use_energe = args.use_energe
def forward(
self,
x,
padding_mask,
durations=None,
pitches=None,
energies=None,
d_factor=1.0,
p_factor=1.0,
e_factor=1.0,
):
# x: B x T x C
log_dur_out = self.duration_predictor(x)
dur_out = torch.clamp(
torch.round((torch.exp(log_dur_out) - 1) * d_factor).long(), min=0
)
dur_out.masked_fill_(padding_mask, 0)
if self.use_pitch:
pitch_out, pitch_emb = self.get_pitch_emb(x, pitches, p_factor)
x = x + pitch_emb
else:
pitch_out = None
if self.use_energe:
energy_out, energy_emb = self.get_energy_emb(x, energies, e_factor)
x = x + energy_emb
else:
energy_out = None
x, out_lens = self.length_regulator(
x, dur_out if durations is None else durations
)
return x, out_lens, log_dur_out, pitch_out, energy_out
class FastSpeech2Encoder(fastspeech2.FastSpeech2Encoder):
def __init__(self, args, src_dict, embed_speaker):
super().__init__(args, src_dict, embed_speaker)
self.var_adaptor = VarianceAdaptor(args)
self.apply(fastspeech2.model_init)
@register_model("fasttext2unit")
class FastText2UnitModel(FairseqEncoderModel):
"""
Implementation for https://arxiv.org/abs/2006.04558
"""
NON_AUTOREGRESSIVE = True
@staticmethod
def add_args(parser):
parser.add_argument("--dropout", type=float)
parser.add_argument("--output-frame-dim", type=int)
parser.add_argument("--speaker-embed-dim", type=int)
# FFT blocks
parser.add_argument("--fft-hidden-dim", type=int)
parser.add_argument("--fft-kernel-size", type=int)
parser.add_argument("--attention-dropout", type=float)
parser.add_argument("--encoder-layers", type=int)
parser.add_argument("--encoder-embed-dim", type=int)
parser.add_argument("--encoder-attention-heads", type=int)
parser.add_argument("--decoder-layers", type=int)
parser.add_argument("--decoder-embed-dim", type=int)
parser.add_argument("--decoder-attention-heads", type=int)
# variance predictor
parser.add_argument("--var-pred-n-bins", type=int)
parser.add_argument("--var-pred-hidden-dim", type=int)
parser.add_argument("--var-pred-kernel-size", type=int)
parser.add_argument("--var-pred-dropout", type=float)
# postnet
parser.add_argument("--add-postnet", action="store_true")
parser.add_argument("--postnet-dropout", type=float)
parser.add_argument("--postnet-layers", type=int)
parser.add_argument("--postnet-conv-dim", type=int)
parser.add_argument("--postnet-conv-kernel-size", type=int)
# pitch & energe
parser.add_argument("--use-pitch", action="store_true")
parser.add_argument("--use-energe", action="store_true")
def __init__(self, encoder, args, src_dict):
super().__init__(encoder)
self._num_updates = 0
@classmethod
def build_model(cls, args, task):
embed_speaker = task.get_speaker_embeddings(args)
if args.output_frame_dim == -1:
args.output_frame_dim = len(task.tgt_dict)
encoder = FastSpeech2Encoder(args, task.src_dict, embed_speaker)
return cls(encoder, args, task.src_dict)
def set_num_updates(self, num_updates):
super().set_num_updates(num_updates)
self._num_updates = num_updates
def get_normalized_probs(self, net_output, log_probs, sample=None):
logits = net_output[0]
if log_probs:
return utils.log_softmax(logits.float(), dim=-1)
else:
return utils.softmax(logits.float(), dim=-1)
@register_model_architecture("fasttext2unit", "fasttext2unit_s")
def base_architecture(args):
args.dropout = getattr(args, "dropout", 0.2)
args.output_frame_dim = getattr(args, "output_frame_dim", -1)
args.speaker_embed_dim = getattr(args, "speaker_embed_dim", 256)
# FFT blocks
args.fft_hidden_dim = getattr(args, "fft_hidden_dim", 1024)
args.fft_kernel_size = getattr(args, "fft_kernel_size", 9)
args.attention_dropout = getattr(args, "attention_dropout", 0.0)
args.encoder_layers = getattr(args, "encoder_layers", 4)
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 256)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 2)
args.decoder_layers = getattr(args, "decoder_layers", 4)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 256)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 2)
# variance predictor
args.var_pred_n_bins = getattr(args, "var_pred_n_bins", 256)
args.var_pred_hidden_dim = getattr(args, "var_pred_hidden_dim", 256)
args.var_pred_kernel_size = getattr(args, "var_pred_kernel_size", 3)
args.var_pred_dropout = getattr(args, "var_pred_dropout", 0.5)
# postnet
args.add_postnet = getattr(args, "add_postnet", False)
args.postnet_dropout = getattr(args, "postnet_dropout", 0.5)
args.postnet_layers = getattr(args, "postnet_layers", 5)
args.postnet_conv_dim = getattr(args, "postnet_conv_dim", 512)
args.postnet_conv_kernel_size = getattr(args, "postnet_conv_kernel_size", 5)
# pitch & energe
args.use_pitch = getattr(args, "use_pitch", False)
args.use_energe = getattr(args, "use_energe", False)
@register_model_architecture("fasttext2unit", "fasttext2unit_m")
def base_architecture(args):
args.dropout = getattr(args, "dropout", 0.2)
args.output_frame_dim = getattr(args, "output_frame_dim", -1)
args.speaker_embed_dim = getattr(args, "speaker_embed_dim", 256)
# FFT blocks
args.fft_hidden_dim = getattr(args, "fft_hidden_dim", 1024)
args.fft_kernel_size = getattr(args, "fft_kernel_size", 9)
args.attention_dropout = getattr(args, "attention_dropout", 0.0)
args.encoder_layers = getattr(args, "encoder_layers", 6)
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 256)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 2)
args.decoder_layers = getattr(args, "decoder_layers", 6)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 256)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 2)
# variance predictor
args.var_pred_n_bins = getattr(args, "var_pred_n_bins", 256)
args.var_pred_hidden_dim = getattr(args, "var_pred_hidden_dim", 256)
args.var_pred_kernel_size = getattr(args, "var_pred_kernel_size", 3)
args.var_pred_dropout = getattr(args, "var_pred_dropout", 0.5)
# postnet
args.add_postnet = getattr(args, "add_postnet", False)
args.postnet_dropout = getattr(args, "postnet_dropout", 0.5)
args.postnet_layers = getattr(args, "postnet_layers", 5)
args.postnet_conv_dim = getattr(args, "postnet_conv_dim", 512)
args.postnet_conv_kernel_size = getattr(args, "postnet_conv_kernel_size", 5)
# pitch & energe
args.use_pitch = getattr(args, "use_pitch", False)
args.use_energe = getattr(args, "use_energe", False)
@register_model_architecture("fasttext2unit", "fasttext2unit_l")
def base_architecture(args):
args.dropout = getattr(args, "dropout", 0.2)
args.output_frame_dim = getattr(args, "output_frame_dim", -1)
args.speaker_embed_dim = getattr(args, "speaker_embed_dim", 256)
# FFT blocks
args.fft_hidden_dim = getattr(args, "fft_hidden_dim", 1536)
args.fft_kernel_size = getattr(args, "fft_kernel_size", 9)
args.attention_dropout = getattr(args, "attention_dropout", 0.1)
args.encoder_layers = getattr(args, "encoder_layers", 6)
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 384)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 6)
args.decoder_layers = getattr(args, "decoder_layers", 6)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 384)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 6)
# variance predictor
args.var_pred_n_bins = getattr(args, "var_pred_n_bins", 256)
args.var_pred_hidden_dim = getattr(args, "var_pred_hidden_dim", 256)
args.var_pred_kernel_size = getattr(args, "var_pred_kernel_size", 3)
args.var_pred_dropout = getattr(args, "var_pred_dropout", 0.5)
# postnet
args.add_postnet = getattr(args, "add_postnet", False)
args.postnet_dropout = getattr(args, "postnet_dropout", 0.5)
args.postnet_layers = getattr(args, "postnet_layers", 5)
args.postnet_conv_dim = getattr(args, "postnet_conv_dim", 512)
args.postnet_conv_kernel_size = getattr(args, "postnet_conv_kernel_size", 5)
# pitch & energe
args.use_pitch = getattr(args, "use_pitch", False)
args.use_energe = getattr(args, "use_energe", False)
| EXA-1-master | exa/models/unilm-master/speechlm/speechlm/models/fasttext2unit.py |
# ----------------------------------------------------------------------------
# SpeechLM: Enhanced Speech Pre-Training with Unpaired Textual Data (https://arxiv.org/abs/2209.15329)
# Github source: https://github.com/microsoft/SpeechT5/tree/main/SpeechLM
# Code based on fairseq: https://github.com/facebookresearch/fairseq/tree/272c4c5197250997148fb12c0db6306035f166a4
#
# Copyright (c) 2022 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# ----------------------------------------------------------------------------
from dataclasses import dataclass
from fairseq.models import BaseFairseqModel, register_model
from fairseq.tasks import FairseqTask
from fairseq.models.hubert import HubertAsrConfig, HubertCtc, HubertEncoder
@dataclass
class SpeechLMCtcConfig(HubertAsrConfig):
pass
@register_model("speechlm_ctc", dataclass=SpeechLMCtcConfig)
class SpeechLMCtc(HubertCtc):
def __init__(self, cfg: SpeechLMCtcConfig, w2v_encoder: BaseFairseqModel):
super().__init__(cfg, w2v_encoder)
@classmethod
def build_model(cls, cfg: SpeechLMCtcConfig, task: FairseqTask):
"""Build a new model instance."""
w2v_encoder = SpeechLMEncoder(cfg, task)
return cls(cfg, w2v_encoder)
class SpeechLMEncoder(HubertEncoder):
def __init__(self, cfg: HubertAsrConfig, task):
super().__init__(cfg, task)
if (task.target_dictionary is not None) and (
hasattr(self.w2v_model, "unit_encoder_ctc_head")
):
self.proj = self.w2v_model.unit_encoder_ctc_head
self.conv_ctc_proj = True
else:
self.conv_ctc_proj = False
def forward(self, source, padding_mask, tbc=True, **kwargs):
results = super().forward(
source,
padding_mask,
tbc,
**kwargs,
)
if self.conv_ctc_proj:
results["padding_mask"] = self.w2v_model.downsample_ctc_padding_mask(results["padding_mask"])
return results
| EXA-1-master | exa/models/unilm-master/speechlm/speechlm/models/speechlm_ctcasr.py |
# ----------------------------------------------------------------------------
# SpeechLM: Enhanced Speech Pre-Training with Unpaired Textual Data (https://arxiv.org/abs/2209.15329)
# Github source: https://github.com/microsoft/SpeechT5/tree/main/SpeechLM
# Code based on fairseq: https://github.com/facebookresearch/fairseq/tree/272c4c5197250997148fb12c0db6306035f166a4
#
# Copyright (c) 2022 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# ----------------------------------------------------------------------------
import math
from typing import Dict, List, Optional
import torch
import torch.nn as nn
import torch.nn.functional as F
from fairseq import utils
from fairseq.distributed import fsdp_wrap
from fairseq.models import FairseqEncoder
from fairseq.modules import (
FairseqDropout,
LayerDropModuleList,
LayerNorm,
SinusoidalPositionalEmbedding,
)
from fairseq.modules.checkpoint_activations import checkpoint_wrapper
from fairseq.modules.quant_noise import quant_noise as apply_quant_noise_
from torch import Tensor
from fairseq.models.transformer import (
TransformerConfig,
)
from speechlm.modules import transformer_layer, LearnedPositionalEmbedding
from speechlm.modules.relative_pos_enc import RelativePositionalEncoding
# rewrite name for backward compatibility in `make_generation_fast_`
def module_name_fordropout(module_name: str) -> str:
if module_name == "TransformerEncoderBase":
return "TransformerEncoder"
else:
return module_name
class TransformerEncoderBase(FairseqEncoder):
"""
Transformer encoder consisting of *cfg.encoder.layers* layers. Each layer
is a :class:`TransformerEncoderLayer`.
Args:
args (argparse.Namespace): parsed command-line arguments
dictionary (~fairseq.data.Dictionary): encoding dictionary
embed_tokens (torch.nn.Embedding): input embedding
"""
def __init__(self, cfg, dictionary, embed_tokens, use_rel_pos_enc=False, scaling_for_att=1.0):
self.cfg = cfg
super().__init__(dictionary)
self.register_buffer("version", torch.Tensor([3]))
self.dropout_module = FairseqDropout(
cfg.dropout, module_name=module_name_fordropout(self.__class__.__name__)
)
self.encoder_layerdrop = cfg.encoder.layerdrop
embed_dim = embed_tokens.embedding_dim
self.padding_idx = embed_tokens.padding_idx
self.max_source_positions = cfg.max_source_positions
self.embed_tokens = embed_tokens
self.embed_scale = 1.0 if cfg.no_scale_embedding else math.sqrt(embed_dim)
self.embed_positions = (
PositionalEmbedding(
cfg.max_source_positions,
embed_dim,
self.padding_idx,
learned=cfg.encoder.learned_pos,
)
if not cfg.no_token_positional_embeddings
else None
)
if cfg.layernorm_embedding:
self.layernorm_embedding = LayerNorm(embed_dim, export=cfg.export)
else:
self.layernorm_embedding = None
if not cfg.adaptive_input and cfg.quant_noise.pq > 0:
self.quant_noise = apply_quant_noise_(
nn.Linear(embed_dim, embed_dim, bias=False),
cfg.quant_noise.pq,
cfg.quant_noise.pq_block_size,
)
else:
self.quant_noise = None
if self.encoder_layerdrop > 0.0:
self.layers = LayerDropModuleList(p=self.encoder_layerdrop)
else:
self.layers = nn.ModuleList([])
self.use_rel_pos_enc = use_rel_pos_enc
self.scaling_for_att = scaling_for_att
self.layers.extend(
[self.build_encoder_layer(cfg) for i in range(cfg.encoder.layers)]
)
self.num_layers = len(self.layers)
if cfg.encoder.normalize_before:
self.layer_norm = LayerNorm(embed_dim, export=cfg.export)
else:
self.layer_norm = None
if self.use_rel_pos_enc:
self.pos_emb = RelativePositionalEncoding(embed_dim // cfg.encoder.attention_heads, 160)
def build_encoder_layer(self, cfg):
layer = transformer_layer.TransformerEncoderLayerBase(cfg, has_relative_attention_bias=self.use_rel_pos_enc, scaling_for_att=self.scaling_for_att)
checkpoint = cfg.checkpoint_activations
if checkpoint:
offload_to_cpu = cfg.offload_activations
layer = checkpoint_wrapper(layer, offload_to_cpu=offload_to_cpu)
# if we are checkpointing, enforce that FSDP always wraps the
# checkpointed layer, regardless of layer size
min_params_to_wrap = cfg.min_params_to_wrap if not checkpoint else 0
layer = fsdp_wrap(layer, min_num_params=min_params_to_wrap)
return layer
def forward_embedding(
self, src_tokens, token_embedding: Optional[torch.Tensor] = None
):
# embed tokens and positions
if token_embedding is None:
token_embedding = self.embed_tokens(src_tokens)
x = embed = self.embed_scale * token_embedding
if self.embed_positions is not None:
x = embed + self.embed_positions(src_tokens)
if self.layernorm_embedding is not None:
x = self.layernorm_embedding(x)
x = self.dropout_module(x)
if self.quant_noise is not None:
x = self.quant_noise(x)
return x, embed
def forward(
self,
src_tokens,
src_lengths: Optional[torch.Tensor] = None,
return_all_hiddens: bool = False,
token_embeddings: Optional[torch.Tensor] = None,
uniformity_layers: Optional[List[int]] = None,
):
"""
Args:
src_tokens (LongTensor): tokens in the source language of shape
`(batch, src_len)`
src_lengths (torch.LongTensor): lengths of each source sentence of
shape `(batch)`
return_all_hiddens (bool, optional): also return all of the
intermediate hidden states (default: False).
token_embeddings (torch.Tensor, optional): precomputed embeddings
default `None` will recompute embeddings
Returns:
dict:
- **encoder_out** (Tensor): the last encoder layer's output of
shape `(src_len, batch, embed_dim)`
- **encoder_padding_mask** (ByteTensor): the positions of
padding elements of shape `(batch, src_len)`
- **encoder_embedding** (Tensor): the (scaled) embedding lookup
of shape `(batch, src_len, embed_dim)`
- **encoder_states** (List[Tensor]): all intermediate
hidden states of shape `(src_len, batch, embed_dim)`.
Only populated if *return_all_hiddens* is True.
"""
return self.forward_scriptable(
src_tokens, src_lengths, return_all_hiddens, token_embeddings, uniformity_layers
)
# TorchScript doesn't support super() method so that the scriptable Subclass
# can't access the base class model in Torchscript.
# Current workaround is to add a helper function with different name and
# call the helper function from scriptable Subclass.
def forward_scriptable(
self,
src_tokens,
src_lengths: Optional[torch.Tensor] = None,
return_all_hiddens: bool = False,
token_embeddings: Optional[torch.Tensor] = None,
uniformity_layers: Optional[List[int]] = None,
):
"""
Args:
src_tokens (LongTensor): tokens in the source language of shape
`(batch, src_len)`
src_lengths (torch.LongTensor): lengths of each source sentence of
shape `(batch)`
return_all_hiddens (bool, optional): also return all of the
intermediate hidden states (default: False).
token_embeddings (torch.Tensor, optional): precomputed embeddings
default `None` will recompute embeddings
Returns:
dict:
- **encoder_out** (Tensor): the last encoder layer's output of
shape `(src_len, batch, embed_dim)`
- **encoder_padding_mask** (ByteTensor): the positions of
padding elements of shape `(batch, src_len)`
- **encoder_embedding** (Tensor): the (scaled) embedding lookup
of shape `(batch, src_len, embed_dim)`
- **encoder_states** (List[Tensor]): all intermediate
hidden states of shape `(src_len, batch, embed_dim)`.
Only populated if *return_all_hiddens* is True.
"""
# compute padding mask
encoder_padding_mask = src_tokens.eq(self.padding_idx)
has_pads = src_tokens.device.type == "xla" or encoder_padding_mask.any()
x, encoder_embedding = self.forward_embedding(src_tokens, token_embeddings)
# account for padding while computing the representation
if has_pads:
x = x * (1 - encoder_padding_mask.unsqueeze(-1).type_as(x))
# B x T x C -> T x B x C
x = x.transpose(0, 1)
if self.use_rel_pos_enc:
x_len = x.shape[0]
pos_seq = torch.arange(0, x_len).long().to(x.device)
pos_seq = pos_seq[:, None] - pos_seq[None, :]
pos_k, pos_v = self.pos_emb(pos_seq)
else:
pos_k = None
encoder_states = []
uniformity_hiddens = []
if return_all_hiddens:
encoder_states.append(x)
if uniformity_layers is not None and 0 in uniformity_layers:
x = F.normalize(x.float(), dim=-1).type_as(x)
uniformity_hiddens.append(x)
# encoder layers
for i, layer in enumerate(self.layers):
x = layer(
x, encoder_padding_mask=encoder_padding_mask if has_pads else None,
pos_bias=pos_k,
)
if uniformity_layers is not None and i+1 in uniformity_layers:
x = F.normalize(x.float(), dim=-1).type_as(x)
uniformity_hiddens.append(x)
if return_all_hiddens:
assert encoder_states is not None
encoder_states.append(x)
if self.layer_norm is not None:
x = self.layer_norm(x)
# The Pytorch Mobile lite interpreter does not supports returning NamedTuple in
# `forward` so we use a dictionary instead.
# TorchScript does not support mixed values so the values are all lists.
# The empty list is equivalent to None.
src_lengths = (
src_tokens.ne(self.padding_idx)
.sum(dim=1, dtype=torch.int32)
.reshape(-1, 1)
.contiguous()
)
return {
"encoder_out": [x], # T x B x C
"encoder_padding_mask": [encoder_padding_mask], # B x T
"encoder_embedding": [encoder_embedding], # B x T x C
"encoder_states": encoder_states, # List[T x B x C]
"uniformity_hiddens": uniformity_hiddens, # List[T x B x C]
"src_tokens": [],
"src_lengths": [src_lengths],
}
@torch.jit.export
def reorder_encoder_out(self, encoder_out: Dict[str, List[Tensor]], new_order):
"""
Reorder encoder output according to *new_order*.
Args:
encoder_out: output from the ``forward()`` method
new_order (LongTensor): desired order
Returns:
*encoder_out* rearranged according to *new_order*
"""
if len(encoder_out["encoder_out"]) == 0:
new_encoder_out = []
else:
new_encoder_out = [encoder_out["encoder_out"][0].index_select(1, new_order)]
if len(encoder_out["encoder_padding_mask"]) == 0:
new_encoder_padding_mask = []
else:
new_encoder_padding_mask = [
encoder_out["encoder_padding_mask"][0].index_select(0, new_order)
]
if len(encoder_out["encoder_embedding"]) == 0:
new_encoder_embedding = []
else:
new_encoder_embedding = [
encoder_out["encoder_embedding"][0].index_select(0, new_order)
]
if len(encoder_out["src_tokens"]) == 0:
src_tokens = []
else:
src_tokens = [(encoder_out["src_tokens"][0]).index_select(0, new_order)]
if len(encoder_out["src_lengths"]) == 0:
src_lengths = []
else:
src_lengths = [(encoder_out["src_lengths"][0]).index_select(0, new_order)]
encoder_states = encoder_out["encoder_states"]
if len(encoder_states) > 0:
for idx, state in enumerate(encoder_states):
encoder_states[idx] = state.index_select(1, new_order)
return {
"encoder_out": new_encoder_out, # T x B x C
"encoder_padding_mask": new_encoder_padding_mask, # B x T
"encoder_embedding": new_encoder_embedding, # B x T x C
"encoder_states": encoder_states, # List[T x B x C]
"src_tokens": src_tokens, # B x T
"src_lengths": src_lengths, # B x 1
}
def max_positions(self):
"""Maximum input length supported by the encoder."""
if self.embed_positions is None:
return self.max_source_positions
return min(self.max_source_positions, self.embed_positions.max_positions)
def upgrade_state_dict_named(self, state_dict, name):
"""Upgrade a (possibly old) state dict for new versions of fairseq."""
if isinstance(self.embed_positions, SinusoidalPositionalEmbedding):
weights_key = "{}.embed_positions.weights".format(name)
if weights_key in state_dict:
print("deleting {0}".format(weights_key))
del state_dict[weights_key]
state_dict[
"{}.embed_positions._float_tensor".format(name)
] = torch.FloatTensor(1)
for i in range(self.num_layers):
# update layer norms
self.layers[i].upgrade_state_dict_named(
state_dict, "{}.layers.{}".format(name, i)
)
version_key = "{}.version".format(name)
if utils.item(state_dict.get(version_key, torch.Tensor([1]))[0]) < 2:
# earlier checkpoints did not normalize after the stack of layers
self.layer_norm = None
self.normalize = False
state_dict[version_key] = torch.Tensor([1])
return state_dict
class TransformerEncoder(TransformerEncoderBase):
def __init__(self, args, dictionary, embed_tokens):
self.args = args
super().__init__(
TransformerConfig.from_namespace(args),
dictionary,
embed_tokens,
use_rel_pos_enc=getattr(args, "use_rel_pos_enc", False),
scaling_for_att=getattr(args, "scaling_for_att", 1.0),
)
def build_encoder_layer(self, args):
return super().build_encoder_layer(
TransformerConfig.from_namespace(args),
)
def PositionalEmbedding(
num_embeddings: int,
embedding_dim: int,
padding_idx: int,
learned: bool = False,
):
if learned:
# if padding_idx is specified then offset the embedding ids by
# this index and adjust num_embeddings appropriately
# TODO: The right place for this offset would be inside
# LearnedPositionalEmbedding. Move this there for a cleaner implementation.
if padding_idx is not None:
num_embeddings = num_embeddings + padding_idx + 1
m = LearnedPositionalEmbedding(num_embeddings, embedding_dim, padding_idx)
nn.init.normal_(m.weight, mean=0, std=embedding_dim**-0.5)
if padding_idx is not None:
nn.init.constant_(m.weight[padding_idx], 0)
else:
m = SinusoidalPositionalEmbedding(
embedding_dim,
padding_idx,
init_size=num_embeddings + padding_idx + 1,
)
return m
| EXA-1-master | exa/models/unilm-master/speechlm/speechlm/modules/transformer_encoder.py |
# --------------------------------------------------------
# Pre-Training Transformer Decoder for End-to-End ASR Model with Unpaired Speech Data (https://arxiv.org/abs/2203.17113)
# Github source: https://github.com/microsoft/SpeechT5/tree/main/Speech2C
# Copyright (c) 2022 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Based on fairseq code bases
# https://github.com/pytorch/fairseq
# --------------------------------------------------------
from typing import Dict, Optional, Tuple
import torch
import torch.nn.functional as F
from fairseq import utils
from torch import Tensor
from fairseq.modules import MultiheadAttention as FairseqMultiheadAttention
class MultiheadAttention(FairseqMultiheadAttention):
"""Multi-headed attention.
See "Attention Is All You Need" for more details.
"""
def __init__(
self,
embed_dim,
num_heads,
kdim=None,
vdim=None,
dropout=0.0,
bias=True,
add_bias_kv=False,
add_zero_attn=False,
self_attention=False,
encoder_decoder_attention=False,
q_noise=0.0,
qn_block_size=8,
scaling_for_att=1.0
):
super().__init__(
embed_dim,
num_heads,
kdim,
vdim,
dropout,
bias,
add_bias_kv,
add_zero_attn,
self_attention,
encoder_decoder_attention,
q_noise,
qn_block_size,
)
self.scaling_for_att = scaling_for_att
def forward(
self,
query,
key: Optional[Tensor],
value: Optional[Tensor],
key_padding_mask: Optional[Tensor] = None,
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None,
need_weights: bool = True,
static_kv: bool = False,
attn_mask: Optional[Tensor] = None,
before_softmax: bool = False,
need_head_weights: bool = False,
position_bias: Optional[Tensor] = None,
) -> Tuple[Tensor, Optional[Tensor]]:
"""Input shape: Time x Batch x Channel
Args:
key_padding_mask (ByteTensor, optional): mask to exclude
keys that are pads, of shape `(batch, src_len)`, where
padding elements are indicated by 1s.
need_weights (bool, optional): return the attention weights,
averaged over heads (default: False).
attn_mask (ByteTensor, optional): typically used to
implement causal attention, where the mask prevents the
attention from looking forward in time (default: None).
before_softmax (bool, optional): return the raw attention
weights and values before the attention softmax.
need_head_weights (bool, optional): return the attention
weights for each head. Implies *need_weights*. Default:
return the average attention weights over all heads.
"""
if need_head_weights:
need_weights = True
is_tpu = query.device.type == "xla"
tgt_len, bsz, embed_dim = query.size()
src_len = tgt_len
assert embed_dim == self.embed_dim, f"query dim {embed_dim} != {self.embed_dim}"
assert list(query.size()) == [tgt_len, bsz, embed_dim]
if key is not None:
src_len, key_bsz, _ = key.size()
if not torch.jit.is_scripting():
assert key_bsz == bsz
assert value is not None
assert src_len, bsz == value.shape[:2]
if (
not self.onnx_trace
and not is_tpu # don't use PyTorch version on TPUs
and incremental_state is None
and not static_kv
# A workaround for quantization to work. Otherwise JIT compilation
# treats bias in linear module as method.
and not torch.jit.is_scripting()
and position_bias is None
):
assert key is not None and value is not None
return F.multi_head_attention_forward(
query,
key,
value,
self.embed_dim,
self.num_heads,
torch.empty([0]),
torch.cat((self.q_proj.bias, self.k_proj.bias, self.v_proj.bias)),
self.bias_k,
self.bias_v,
self.add_zero_attn,
self.dropout_module.p,
self.out_proj.weight,
self.out_proj.bias,
self.training or self.dropout_module.apply_during_inference,
key_padding_mask,
need_weights,
attn_mask,
use_separate_proj_weight=True,
q_proj_weight=self.q_proj.weight,
k_proj_weight=self.k_proj.weight,
v_proj_weight=self.v_proj.weight,
)
if incremental_state is not None:
saved_state = self._get_input_buffer(incremental_state)
if saved_state is not None and "prev_key" in saved_state:
# previous time steps are cached - no need to recompute
# key and value if they are static
if static_kv:
assert self.encoder_decoder_attention and not self.self_attention
key = value = None
else:
saved_state = None
if self.self_attention:
q = self.q_proj(query)
k = self.k_proj(query)
v = self.v_proj(query)
elif self.encoder_decoder_attention:
# encoder-decoder attention
q = self.q_proj(query)
if key is None:
assert value is None
k = v = None
else:
k = self.k_proj(key)
v = self.v_proj(key)
else:
assert key is not None and value is not None
q = self.q_proj(query)
k = self.k_proj(key)
v = self.v_proj(value)
q *= self.scaling
q *= (1 / self.scaling_for_att)
if self.bias_k is not None:
assert self.bias_v is not None
k = torch.cat([k, self.bias_k.repeat(1, bsz, 1)])
v = torch.cat([v, self.bias_v.repeat(1, bsz, 1)])
if attn_mask is not None:
attn_mask = torch.cat(
[attn_mask, attn_mask.new_zeros(attn_mask.size(0), 1)], dim=1
)
if key_padding_mask is not None:
key_padding_mask = torch.cat(
[
key_padding_mask,
key_padding_mask.new_zeros(key_padding_mask.size(0), 1),
],
dim=1,
)
q = (
q.contiguous()
.view(tgt_len, bsz * self.num_heads, self.head_dim)
.transpose(0, 1)
)
if k is not None:
k = (
k.contiguous()
.view(-1, bsz * self.num_heads, self.head_dim)
.transpose(0, 1)
)
if v is not None:
v = (
v.contiguous()
.view(-1, bsz * self.num_heads, self.head_dim)
.transpose(0, 1)
)
if saved_state is not None:
# saved states are stored with shape (bsz, num_heads, seq_len, head_dim)
if "prev_key" in saved_state:
_prev_key = saved_state["prev_key"]
assert _prev_key is not None
prev_key = _prev_key.view(bsz * self.num_heads, -1, self.head_dim)
if static_kv:
k = prev_key
else:
assert k is not None
k = torch.cat([prev_key, k], dim=1)
src_len = k.size(1)
if "prev_value" in saved_state:
_prev_value = saved_state["prev_value"]
assert _prev_value is not None
prev_value = _prev_value.view(bsz * self.num_heads, -1, self.head_dim)
if static_kv:
v = prev_value
else:
assert v is not None
v = torch.cat([prev_value, v], dim=1)
prev_key_padding_mask: Optional[Tensor] = None
if "prev_key_padding_mask" in saved_state:
prev_key_padding_mask = saved_state["prev_key_padding_mask"]
assert k is not None and v is not None
key_padding_mask = MultiheadAttention._append_prev_key_padding_mask(
key_padding_mask=key_padding_mask,
prev_key_padding_mask=prev_key_padding_mask,
batch_size=bsz,
src_len=k.size(1),
static_kv=static_kv,
)
saved_state["prev_key"] = k.view(bsz, self.num_heads, -1, self.head_dim)
saved_state["prev_value"] = v.view(bsz, self.num_heads, -1, self.head_dim)
saved_state["prev_key_padding_mask"] = key_padding_mask
# In this branch incremental_state is never None
assert incremental_state is not None
incremental_state = self._set_input_buffer(incremental_state, saved_state)
assert k is not None
assert k.size(1) == src_len
# This is part of a workaround to get around fork/join parallelism
# not supporting Optional types.
if key_padding_mask is not None and key_padding_mask.dim() == 0:
key_padding_mask = None
if key_padding_mask is not None:
assert key_padding_mask.size(0) == bsz
assert key_padding_mask.size(1) == src_len
if self.add_zero_attn:
assert v is not None
src_len += 1
k = torch.cat([k, k.new_zeros((k.size(0), 1) + k.size()[2:])], dim=1)
v = torch.cat([v, v.new_zeros((v.size(0), 1) + v.size()[2:])], dim=1)
if attn_mask is not None:
attn_mask = torch.cat(
[attn_mask, attn_mask.new_zeros(attn_mask.size(0), 1)], dim=1
)
if key_padding_mask is not None:
key_padding_mask = torch.cat(
[
key_padding_mask,
torch.zeros(key_padding_mask.size(0), 1).type_as(
key_padding_mask
),
],
dim=1,
)
attn_weights = torch.bmm(q, k.transpose(1, 2))
attn_weights = self.apply_sparse_mask(attn_weights, tgt_len, src_len, bsz)
if position_bias is not None: ## first order
## position_bias: [241, 241, 64]
#print ("attn_weights: ", attn_weights.size()) # [492, 241, 241]
reshape_q = q.contiguous().view(bsz * self.num_heads, -1, self.head_dim).transpose(0,1) #[241, 492, 64]
#print ("reshape_q: ", reshape_q.size())
B = torch.matmul(reshape_q, position_bias.transpose(-2, -1))
#print ("B: ", B.size()) ## [241, 492, 241]
#B = B.transpose(0, 1).view(bsz, self.num_heads, position_bias.size(0), position_bias.size(1))
B = B.transpose(0, 1).view(bsz*self.num_heads, position_bias.size(0), position_bias.size(1))
#print ("B 2: ", B.size())
attn_weights += B
attn_weights *= self.scaling_for_att
assert list(attn_weights.size()) == [bsz * self.num_heads, tgt_len, src_len]
if attn_mask is not None:
attn_mask = attn_mask.unsqueeze(0)
if self.onnx_trace:
attn_mask = attn_mask.repeat(attn_weights.size(0), 1, 1)
attn_weights += attn_mask
if key_padding_mask is not None:
# don't attend to padding symbols
attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
if not is_tpu:
attn_weights = attn_weights.masked_fill(
key_padding_mask.unsqueeze(1).unsqueeze(2).to(torch.bool),
float("-inf"),
)
else:
attn_weights = attn_weights.transpose(0, 2)
attn_weights = attn_weights.masked_fill(key_padding_mask, float("-inf"))
attn_weights = attn_weights.transpose(0, 2)
attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
if self.scaling_for_att > 1.0:
attn_weights = attn_weights - attn_weights.detach().max(dim=-1, keepdim=True)[0]
if before_softmax:
return attn_weights, v
attn_weights_float = utils.softmax(
attn_weights, dim=-1, onnx_trace=self.onnx_trace
)
attn_weights = attn_weights_float.type_as(attn_weights)
attn_probs = self.dropout_module(attn_weights)
assert v is not None
attn = torch.bmm(attn_probs, v)
assert list(attn.size()) == [bsz * self.num_heads, tgt_len, self.head_dim]
if self.onnx_trace and attn.size(1) == 1:
# when ONNX tracing a single decoder step (sequence length == 1)
# the transpose is a no-op copy before view, thus unnecessary
attn = attn.contiguous().view(tgt_len, bsz, embed_dim)
else:
attn = attn.transpose(0, 1).contiguous().view(tgt_len, bsz, embed_dim)
attn = self.out_proj(attn)
attn_weights: Optional[Tensor] = None
if need_weights:
attn_weights = attn_weights_float.view(
bsz, self.num_heads, tgt_len, src_len
).transpose(1, 0)
if not need_head_weights:
# average attention weights over heads
attn_weights = attn_weights.mean(dim=0)
return attn, attn_weights
| EXA-1-master | exa/models/unilm-master/speechlm/speechlm/modules/multihead_attention.py |
# --------------------------------------------------------
# Copyright (c) 2022 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Based on fairseq code bases
# https://github.com/facebookresearch/fairseq
# --------------------------------------------------------
"""
Modified from https://github.com/facebookresearch/fairseq/blob/main/fairseq/modules/learned_positional_embedding.py
1. Add clamping if the input length exceeds the max-source-tokens
"""
from typing import Dict, Optional
import torch
import torch.nn as nn
import torch.nn.functional as F
from fairseq import utils
from torch import Tensor
class LearnedPositionalEmbedding(nn.Embedding):
"""
This module learns positional embeddings up to a fixed maximum size.
Padding ids are ignored by either offsetting based on padding_idx
or by setting padding_idx to None and ensuring that the appropriate
position ids are passed to the forward function.
"""
def __init__(self, num_embeddings: int, embedding_dim: int, padding_idx: int):
super().__init__(num_embeddings, embedding_dim, padding_idx)
self.onnx_trace = False
if self.padding_idx is not None:
self.max_positions = self.num_embeddings - self.padding_idx - 1
else:
self.max_positions = self.num_embeddings
def forward(
self,
input: Tensor,
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None,
positions: Optional[Tensor] = None,
):
"""Input is expected to be of size [bsz x seqlen]."""
assert (positions is None) or (
self.padding_idx is None
), "If positions is pre-computed then padding_idx should not be set."
if positions is None:
if incremental_state is not None:
# positions is the same for every token when decoding a single step
# Without the int() cast, it doesn't work in some cases when exporting to ONNX
positions = torch.zeros(
(1, 1), device=input.device, dtype=input.dtype
).fill_(int(self.padding_idx + input.size(1)))
else:
positions = utils.make_positions(
input, self.padding_idx, onnx_trace=self.onnx_trace
)
positions = torch.clamp(positions, max=self.padding_idx + self.max_positions)
return F.embedding(
positions,
self.weight,
self.padding_idx,
self.max_norm,
self.norm_type,
self.scale_grad_by_freq,
self.sparse,
)
| EXA-1-master | exa/models/unilm-master/speechlm/speechlm/modules/learned_positional_embedding.py |
# --------------------------------------------------------
# The YiTrans End-to-End Speech Translation System for IWSLT 2022 Offline Shared Task (https://arxiv.org/abs/2206.05777)
# Github source: https://github.com/microsoft/SpeechT5/tree/main/YiTrans
# Copyright (c) 2022 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Based on fairseq code bases
# https://github.com/facebookresearch/fairseq
# --------------------------------------------------------
"""
wav2vec encoder adding relitive position bias, modified from
https://github.com/microsoft/SpeechT5/blob/main/Speech2C/speech2c/models/modules/transformer_encoder.py
https://github.com/facebookresearch/fairseq/blob/main/fairseq/models/wav2vec/wav2vec2.py
"""
import math
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from fairseq import utils
from fairseq.dataclass import ChoiceEnum
from fairseq.modules import (
LayerNorm,
SamePad,
)
from fairseq.modules.checkpoint_activations import checkpoint_wrapper
from fairseq.modules.transformer_sentence_encoder import init_bert_params
from fairseq.utils import index_put
from fairseq.distributed import fsdp_wrap
from fairseq.models.wav2vec.utils import pad_to_multiple
## reload multi-head attition with rel-pos-bias
from fairseq.models.wav2vec.wav2vec2 import TransformerEncoder as W2vTransformerEncoder
from speechlm.modules.relative_pos_enc import RelativePositionalEncoding
from speechlm.modules.multihead_attention import MultiheadAttention
EXTRACTOR_MODE_CHOICES = ChoiceEnum(["default", "layer_norm"])
MASKING_DISTRIBUTION_CHOICES = ChoiceEnum(["static", "uniform", "normal", "poisson"])
class TransformerEncoder(W2vTransformerEncoder):
def __init__(self, args):
super().__init__(args)
self.dropout = args.dropout
self.embedding_dim = args.encoder_embed_dim
self.required_seq_len_multiple = args.required_seq_len_multiple
self.use_rel_pos_enc = getattr(args, "use_rel_pos_enc", False)
self.pos_conv = nn.Conv1d(
self.embedding_dim,
self.embedding_dim,
kernel_size=args.conv_pos,
padding=args.conv_pos // 2,
groups=args.conv_pos_groups,
)
dropout = 0
std = math.sqrt((4 * (1.0 - dropout)) / (args.conv_pos * self.embedding_dim))
nn.init.normal_(self.pos_conv.weight, mean=0, std=std)
nn.init.constant_(self.pos_conv.bias, 0)
self.pos_conv = nn.utils.weight_norm(self.pos_conv, name="weight", dim=2)
self.pos_conv = nn.Sequential(self.pos_conv, SamePad(args.conv_pos), nn.GELU())
layers = []
for _ in range(args.encoder_layers):
layer = TransformerSentenceEncoderLayer(
embedding_dim=self.embedding_dim,
ffn_embedding_dim=args.encoder_ffn_embed_dim,
num_attention_heads=args.encoder_attention_heads,
dropout=self.dropout,
attention_dropout=args.attention_dropout,
activation_dropout=args.activation_dropout,
activation_fn=args.activation_fn,
layer_norm_first=args.layer_norm_first,
has_relative_attention_bias=self.use_rel_pos_enc,
)
if args.checkpoint_activations:
layer = fsdp_wrap(layer)
layer = checkpoint_wrapper(layer)
layers.append(layer)
self.layers = nn.ModuleList(layers)
self.layer_norm_first = args.layer_norm_first
self.layer_norm = LayerNorm(self.embedding_dim)
self.layerdrop = args.encoder_layerdrop
if self.use_rel_pos_enc:
self.pos_emb = RelativePositionalEncoding(args.encoder_embed_dim // args.encoder_attention_heads, 160)
self.apply(init_bert_params)
def forward(self, x, padding_mask=None, layer=None):
x, layer_results = self.extract_features(x, padding_mask, layer)
if self.layer_norm_first and layer is None:
x = self.layer_norm(x)
return x, layer_results
def extract_features(self, x, padding_mask=None, tgt_layer=None):
if padding_mask is not None:
x = index_put(x, padding_mask, 0)
x_conv = self.pos_conv(x.transpose(1, 2))
x_conv = x_conv.transpose(1, 2)
x = x + x_conv
if not self.layer_norm_first:
x = self.layer_norm(x)
# pad to the sequence length dimension
x, pad_length = pad_to_multiple(
x, self.required_seq_len_multiple, dim=-2, value=0
)
if pad_length > 0 and padding_mask is None:
padding_mask = x.new_zeros((x.size(0), x.size(1)), dtype=torch.bool)
padding_mask[:, -pad_length:] = True
else:
padding_mask, _ = pad_to_multiple(
padding_mask, self.required_seq_len_multiple, dim=-1, value=True
)
x = F.dropout(x, p=self.dropout, training=self.training)
# B x T x C -> T x B x C
x = x.transpose(0, 1)
if self.use_rel_pos_enc:
x_len = x.shape[0]
pos_seq = torch.arange(0, x_len).long().to(x.device)
pos_seq = pos_seq[:, None] - pos_seq[None, :]
pos_k, pos_v = self.pos_emb(pos_seq)
else:
pos_k = None
layer_results = []
r = None
for i, layer in enumerate(self.layers):
dropout_probability = np.random.random()
if not self.training or (dropout_probability > self.layerdrop):
x, z = layer(x, self_attn_padding_mask=padding_mask, need_weights=False, pos_bias=pos_k)
if tgt_layer is not None:
# unpad if needed
if pad_length > 0:
layer_results.append(
(
x[:-pad_length],
z[:, :-pad_length, :-pad_length]
if z is not None
else z,
)
)
else:
layer_results.append((x, z))
if i == tgt_layer:
r = x
break
if r is not None:
x = r
# T x B x C -> B x T x C
x = x.transpose(0, 1)
# undo paddding
if pad_length > 0:
x = x[:, :-pad_length]
return x, layer_results
class TransformerSentenceEncoderLayer(nn.Module):
"""
Implements a Transformer Encoder Layer used in BERT/XLM style pre-trained
models.
"""
def __init__(
self,
embedding_dim: float = 768,
ffn_embedding_dim: float = 3072,
num_attention_heads: float = 8,
dropout: float = 0.1,
attention_dropout: float = 0.1,
activation_dropout: float = 0.1,
activation_fn: str = "relu",
layer_norm_first: bool = False,
has_relative_attention_bias: bool = False,
) -> None:
super().__init__()
# Initialize parameters
self.embedding_dim = embedding_dim
self.dropout = dropout
self.activation_dropout = activation_dropout
# Initialize blocks
self.activation_fn = utils.get_activation_fn(activation_fn)
self.self_attn = MultiheadAttention(
self.embedding_dim,
num_attention_heads,
dropout=attention_dropout,
self_attention=True,
)
self.dropout1 = nn.Dropout(dropout)
self.dropout2 = nn.Dropout(self.activation_dropout)
self.dropout3 = nn.Dropout(dropout)
self.layer_norm_first = layer_norm_first
# layer norm associated with the self attention layer
self.self_attn_layer_norm = LayerNorm(self.embedding_dim)
self.fc1 = nn.Linear(self.embedding_dim, ffn_embedding_dim)
self.fc2 = nn.Linear(ffn_embedding_dim, self.embedding_dim)
# layer norm associated with the position wise feed-forward NN
self.final_layer_norm = LayerNorm(self.embedding_dim)
if has_relative_attention_bias:
self.norm_k = LayerNorm(self.embedding_dim//num_attention_heads)
def forward(
self,
x: torch.Tensor,
self_attn_mask: torch.Tensor = None,
self_attn_padding_mask: torch.Tensor = None,
need_weights: bool = False,
att_args=None,
pos_bias=None,
):
"""
LayerNorm is applied either before or after the self-attention/ffn
modules similar to the original Transformer imlementation.
"""
residual = x
if self.layer_norm_first:
x = self.self_attn_layer_norm(x)
if pos_bias is not None:
pos_bias = self.norm_k(pos_bias)
x, attn = self.self_attn(
query=x,
key=x,
value=x,
key_padding_mask=self_attn_padding_mask,
attn_mask=self_attn_mask,
position_bias=pos_bias,
)
x = self.dropout1(x)
x = residual + x
residual = x
x = self.final_layer_norm(x)
x = self.activation_fn(self.fc1(x))
x = self.dropout2(x)
x = self.fc2(x)
x = self.dropout3(x)
x = residual + x
else:
x, attn = self.self_attn(
query=x,
key=x,
value=x,
key_padding_mask=self_attn_padding_mask,
position_bias=pos_bias,
)
x = self.dropout1(x)
x = residual + x
x = self.self_attn_layer_norm(x)
residual = x
x = self.activation_fn(self.fc1(x))
x = self.dropout2(x)
x = self.fc2(x)
x = self.dropout3(x)
x = residual + x
x = self.final_layer_norm(x)
return x, attn
| EXA-1-master | exa/models/unilm-master/speechlm/speechlm/modules/w2v_encoder.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.