python_code
stringlengths 0
4.04M
| repo_name
stringlengths 7
58
| file_path
stringlengths 5
147
|
---|---|---|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import atexit
import contextlib
import json
import logging
import os
import re
import shutil
import tempfile
from collections import defaultdict
from unittest import mock
import numpy as np
import torch.utils.data as data
from d2go.config import temp_defrost
from d2go.data.datasets import (
ANN_FN,
IM_DIR,
INJECTED_COCO_DATASETS_LUT,
InjectedCocoEntry,
register_dataset_split,
)
from detectron2.data import DatasetCatalog, MetadataCatalog
from detectron2.data.build import (
get_detection_dataset_dicts as d2_get_detection_dataset_dicts,
)
from detectron2.data.common import set_default_dataset_from_list_serialize_method
from detectron2.utils import comm
from detectron2.utils.file_io import PathManager
from mobile_cv.torch.utils_pytorch.shareables import SharedList
logger = logging.getLogger(__name__)
class AdhocDatasetManager:
# mapping from the new dataset name a AdhocDataset instance
_REGISTERED = {}
@staticmethod
def add(adhoc_ds):
assert isinstance(adhoc_ds, AdhocDataset)
if adhoc_ds.new_ds_name in AdhocDatasetManager._REGISTERED:
logger.warning(
"Adhoc dataset {} has already been added, skip adding it".format(
adhoc_ds.new_ds_name
)
)
else:
logger.info("Adding new adhoc dataset {} ...".format(adhoc_ds.new_ds_name))
AdhocDatasetManager._REGISTERED[adhoc_ds.new_ds_name] = adhoc_ds
adhoc_ds.register_catalog()
@staticmethod
def remove(adhoc_ds):
try:
assert isinstance(adhoc_ds, AdhocDataset)
if adhoc_ds.new_ds_name not in AdhocDatasetManager._REGISTERED:
logger.warning(
"Adhoc dataset {} has already been removed, skip removing it".format(
adhoc_ds.new_ds_name
)
)
else:
logger.info("Remove adhoc dataset {} ...".format(adhoc_ds.new_ds_name))
del AdhocDatasetManager._REGISTERED[adhoc_ds.new_ds_name]
finally:
adhoc_ds.cleanup()
@staticmethod
@atexit.register
def _atexit():
for ds in AdhocDatasetManager._REGISTERED.values():
logger.info("Remove remaining adhoc dataset: {}".format(ds.new_ds_name))
ds.cleanup()
class AdhocDataset(object):
def __init__(self, new_ds_name):
assert isinstance(new_ds_name, str)
self.new_ds_name = new_ds_name
def register_catalog(self):
raise NotImplementedError()
def cleanup(self):
raise NotImplementedError()
class CallFuncWithJsonFile(object):
"""
The instance of this class is parameterless callable that calls its `func` using its
`json_file`, it can be used to register in DatasetCatalog which later on provide
access to the json file.
"""
def __init__(self, func, json_file):
self.func = func
self.json_file = json_file
def __call__(self):
return self.func(self.json_file)
class CallFuncWithNameAndJsonFile(object):
"""
Same purpose as CallFuncWithJsonFile but also pass name to `func` as arguments
"""
def __init__(self, func, json_file, name):
self.func = func
self.name = name
self.json_file = json_file
def __call__(self):
return self.func(self.json_file, self.name)
class AdhocCOCODataset(AdhocDataset):
def __init__(self, src_ds_name, new_ds_name):
super().__init__(new_ds_name)
# NOTE: only support single source dataset now
assert isinstance(src_ds_name, str)
self.src_ds_name = src_ds_name
def new_json_dict(self, json_dict):
raise NotImplementedError()
def register_catalog(self):
"""
Adhoc COCO (json) dataset assumes the derived dataset can be created by only
changing the json file, currently it supports two sources: 1) the dataset is
registered using standard COCO registering functions in D2 or
register_dataset_split from D2Go, this way it uses `json_file` from the metadata
to access the json file. 2) the load func in DatasetCatalog is an instance of
CallFuncWithJsonFile, which gives access to the json_file. In both cases,
metadata will be the same except for the `name` and potentially `json_file`.
"""
logger.info("Register {} from {}".format(self.new_ds_name, self.src_ds_name))
metadata = MetadataCatalog.get(self.src_ds_name)
load_func = DatasetCatalog[self.src_ds_name]
src_json_file = (
load_func.json_file
if isinstance(load_func, CallFuncWithJsonFile)
else metadata.json_file
)
# TODO cache ?
with PathManager.open(src_json_file) as f:
json_dict = json.load(f)
assert "images" in json_dict, "Only support COCO-style json!"
json_dict = self.new_json_dict(json_dict)
self.tmp_dir = tempfile.mkdtemp(prefix="detectron2go_tmp_datasets")
tmp_file = os.path.join(self.tmp_dir, "{}.json".format(self.new_ds_name))
with open(tmp_file, "w") as f:
json.dump(json_dict, f)
# re-register DatasetCatalog
if isinstance(load_func, CallFuncWithJsonFile):
new_func = CallFuncWithJsonFile(func=load_func.func, json_file=tmp_file)
DatasetCatalog.register(self.new_ds_name, new_func)
elif isinstance(load_func, CallFuncWithNameAndJsonFile):
new_func = CallFuncWithNameAndJsonFile(
func=load_func.func, name=self.new_ds_name, json_file=tmp_file
)
DatasetCatalog.register(self.new_ds_name, new_func)
elif self.src_ds_name in INJECTED_COCO_DATASETS_LUT:
_src_func, _src_dict = INJECTED_COCO_DATASETS_LUT[self.src_ds_name]
split_dict = {**_src_dict, ANN_FN: tmp_file, IM_DIR: metadata.image_root}
_src_func(self.new_ds_name, split_dict=split_dict)
INJECTED_COCO_DATASETS_LUT[self.new_ds_name] = InjectedCocoEntry(
func=_src_func, split_dict=split_dict
)
else:
# NOTE: only supports COCODataset as DS_TYPE since we cannot reconstruct
# the split_dict
register_dataset_split(
self.new_ds_name,
split_dict={ANN_FN: tmp_file, IM_DIR: metadata.image_root},
)
# re-regisister MetadataCatalog
metadata_dict = metadata.as_dict()
metadata_dict["name"] = self.new_ds_name
if "json_file" in metadata_dict:
metadata_dict["json_file"] = tmp_file
if MetadataCatalog.get(self.new_ds_name):
MetadataCatalog.remove(self.new_ds_name)
MetadataCatalog.get(self.new_ds_name).set(**metadata_dict)
def cleanup(self):
# remove temporarily registered dataset and json file
DatasetCatalog.pop(self.new_ds_name, None)
MetadataCatalog.pop(self.new_ds_name, None)
if hasattr(self, "tmp_dir"):
shutil.rmtree(self.tmp_dir)
class COCOSubsetWithNImages(AdhocCOCODataset):
_SUPPORTED_SAMPLING = ["frontmost", "random"]
def __init__(self, src_ds_name, num_images, sampling):
super().__init__(
src_ds_name=src_ds_name,
new_ds_name="{}_{}{}".format(src_ds_name, sampling, num_images),
)
self.num_images = num_images
self.sampling = sampling
def new_json_dict(self, json_dict):
all_images = json_dict["images"]
if self.sampling == "frontmost":
new_images = all_images[: self.num_images]
elif self.sampling == "random":
# use fixed seed so results are repeatable
indices = np.random.RandomState(seed=42).permutation(len(all_images))
new_images = [all_images[i] for i in indices[: self.num_images]]
else:
raise NotImplementedError(
"COCOSubsetWithNImages doesn't support sampling method: {}".format(
self.sampling
)
)
new_image_ids = {im["id"] for im in new_images}
new_annotations = [
ann for ann in json_dict["annotations"] if ann["image_id"] in new_image_ids
]
json_dict["images"] = new_images
json_dict["annotations"] = new_annotations
return json_dict
class COCOSubsetWithGivenImages(AdhocCOCODataset):
def __init__(self, src_ds_name, file_names, prefix="given"):
super().__init__(
src_ds_name=src_ds_name,
new_ds_name="{}_{}{}".format(src_ds_name, prefix, len(file_names)),
)
self.file_names = file_names
def new_json_dict(self, json_dict):
all_images = json_dict["images"]
file_name_to_im = {im["file_name"]: im for im in all_images}
new_images = [file_name_to_im[file_name] for file_name in self.file_names]
# re-assign image id to keep the order (COCO loads images by id order)
old_id_to_new_id = {im["id"]: i for i, im in enumerate(new_images)}
new_annotations = [
ann
for ann in json_dict["annotations"]
if ann["image_id"] in old_id_to_new_id
]
# update image id
for im in new_images:
im["id"] = old_id_to_new_id[im["id"]]
for anno in new_annotations:
anno["image_id"] = old_id_to_new_id[anno["image_id"]]
json_dict["images"] = new_images
json_dict["annotations"] = new_annotations
return json_dict
class COCOWithClassesToUse(AdhocCOCODataset):
def __init__(self, src_ds_name, classes_to_use):
# check if name is already a derived class and try to reverse it
res = re.match("(?P<src>.+)@(?P<num>[0-9]+)classes", src_ds_name)
if res is not None:
src_ds_name = res["src"]
super().__init__(
src_ds_name=src_ds_name,
new_ds_name="{}@{}classes".format(src_ds_name, len(classes_to_use)),
)
self.classes_to_use = classes_to_use
def new_json_dict(self, json_dict):
categories = json_dict["categories"]
new_categories = [
cat for cat in categories if cat["name"] in self.classes_to_use
]
new_category_ids = {cat["id"] for cat in new_categories}
new_annotations = [
ann
for ann in json_dict["annotations"]
if ann["category_id"] in new_category_ids
]
json_dict["categories"] = new_categories
json_dict["annotations"] = new_annotations
return json_dict
class ClipLengthGroupedDataset(data.IterableDataset):
"""
Batch data that have same clip length and similar aspect ratio.
In this implementation, images with same length and whose aspect
ratio < (or >) 1 will be batched together.
This makes training with different clip length possible and improves
training speed because the images then need less padding to form a batch.
"""
def __init__(self, dataset, batch_size):
"""
Args:
dataset: an iterable. Each element must be a dict with keys
"width" and "height", which will be used to batch data.
batch_size (int):
"""
self.dataset = dataset
self.batch_size = batch_size
self._buckets = defaultdict(list)
def __iter__(self):
for d in self.dataset:
clip_length = len(d["frames"])
h, w = d["height"], d["width"]
aspect_ratio_bucket_id = 0 if h > w else 1
bucket = self._buckets[(clip_length, aspect_ratio_bucket_id)]
bucket.append(d)
if len(bucket) == self.batch_size:
yield bucket[:]
del bucket[:]
@contextlib.contextmanager
def register_sub_dataset_with_n_images(dataset_name, num_images, sampling):
"""
Temporarily register a sub-dataset created from `dataset_name`, with the first
`num_images` from it.
"""
# when `num_images` is not larger than 0, return original dataset
if num_images <= 0:
yield dataset_name
return
# only support coco for now
assert sampling in COCOSubsetWithNImages._SUPPORTED_SAMPLING
new_dataset = COCOSubsetWithNImages(dataset_name, num_images, sampling)
AdhocDatasetManager.add(new_dataset)
try:
yield new_dataset.new_ds_name
finally:
AdhocDatasetManager.remove(new_dataset)
@contextlib.contextmanager
def register_sub_dataset_with_given_images(*args, **kwargs):
new_dataset = COCOSubsetWithGivenImages(*args, **kwargs)
AdhocDatasetManager.add(new_dataset)
AdhocDatasetManager.add(new_dataset)
try:
yield new_dataset.new_ds_name
finally:
AdhocDatasetManager.remove(new_dataset)
@contextlib.contextmanager
def maybe_subsample_n_images(cfg, is_train=False):
"""
Create a new config whose train/test datasets only take a subsample of
`max_images` image. Use all images (non-op) when `max_images` <= 0.
"""
max_images = cfg.D2GO_DATA.TEST.MAX_IMAGES
sampling = cfg.D2GO_DATA.TEST.SUBSET_SAMPLING
with contextlib.ExitStack() as stack: # python 3.3+
new_splits = tuple(
stack.enter_context(
register_sub_dataset_with_n_images(ds, max_images, sampling)
)
for ds in (cfg.DATASETS.TRAIN if is_train else cfg.DATASETS.TEST)
)
new_cfg = cfg.clone()
with temp_defrost(new_cfg):
if is_train:
new_cfg.DATASETS.TRAIN = new_splits
else:
new_cfg.DATASETS.TEST = new_splits
yield new_cfg
def update_cfg_if_using_adhoc_dataset(cfg):
if cfg.D2GO_DATA.DATASETS.TRAIN_CATEGORIES:
new_train_datasets = [
COCOWithClassesToUse(name, cfg.D2GO_DATA.DATASETS.TRAIN_CATEGORIES)
for name in cfg.DATASETS.TRAIN
]
[AdhocDatasetManager.add(new_ds) for new_ds in new_train_datasets]
with temp_defrost(cfg):
cfg.DATASETS.TRAIN = tuple(ds.new_ds_name for ds in new_train_datasets)
# If present, we also need to update the data set names for the WeightedTrainingSampler
if cfg.DATASETS.TRAIN_REPEAT_FACTOR:
for ds_to_repeat_factor in cfg.DATASETS.TRAIN_REPEAT_FACTOR:
original_ds_name = ds_to_repeat_factor[0]
# Search corresponding data set name, to not rely on the order
for ds in new_train_datasets:
if ds.src_ds_name == original_ds_name:
ds_to_repeat_factor[0] = ds.new_ds_name
break
if cfg.D2GO_DATA.DATASETS.TEST_CATEGORIES:
new_test_datasets = [
COCOWithClassesToUse(ds, cfg.D2GO_DATA.DATASETS.TEST_CATEGORIES)
for ds in cfg.DATASETS.TEST
]
[AdhocDatasetManager.add(new_ds) for new_ds in new_test_datasets]
with temp_defrost(cfg):
cfg.DATASETS.TEST = tuple(ds.new_ds_name for ds in new_test_datasets)
return cfg
class _FakeListObj(object):
def __init__(self, size):
self.size = size
def __len__(self):
return self.size
def __getitem__(self, idx):
raise NotImplementedError(
"This is a fake list, accessing this list should not happen"
)
def local_master_get_detection_dataset_dicts(*args, **kwargs):
logger.info("Only load dataset dicts on local master process ...")
dataset_dicts = (
d2_get_detection_dataset_dicts(*args, **kwargs)
if comm.get_local_rank() == 0
else []
)
comm.synchronize()
dataset_size = comm.all_gather(len(dataset_dicts))[0]
if comm.get_local_rank() != 0:
dataset_dicts = _FakeListObj(dataset_size)
return dataset_dicts
@contextlib.contextmanager
def configure_dataset_creation(cfg):
"""
Context manager for configure settings used during dataset creating. It supports:
- offload the dataset to shared memory to reduce RAM usage.
- (experimental) offload the dataset to disk cache to further reduce RAM usage.
- Replace D2's get_detection_dataset_dicts with a local-master-only version.
"""
dataset_from_list_offload_method = SharedList # use SharedList by default
if cfg.D2GO_DATA.DATASETS.DISK_CACHE.ENABLED:
# delay the import to avoid atexit cleanup
from d2go.data.disk_cache import DiskCachedList
dataset_from_list_offload_method = DiskCachedList
load_dataset_from_local_master = cfg.D2GO_DATA.DATASETS.DISK_CACHE.ENABLED
with contextlib.ExitStack() as stack:
ctx_managers = [
set_default_dataset_from_list_serialize_method(
dataset_from_list_offload_method
)
]
if load_dataset_from_local_master:
ctx_managers.append(
mock.patch(
"detectron2.data.build.get_detection_dataset_dicts",
side_effect=local_master_get_detection_dataset_dicts,
)
)
for ctx in ctx_managers:
stack.enter_context(ctx)
yield
| d2go-main | d2go/data/utils.py |
#!/usr/bin/env python3
from typing import List, NamedTuple, Tuple
from detectron2.utils.registry import Registry
KEYPOINT_METADATA_REGISTRY = Registry("KEYPOINT_METADATA")
KEYPOINT_METADATA_REGISTRY.__doc__ = "Registry keypoint metadata definitions"
class KeypointMetadata(NamedTuple):
names: List[str]
flip_map: List[Tuple[str, str]]
connection_rules: List[Tuple[str, str, Tuple[int, int, int]]]
def to_dict(self):
return {
"keypoint_names": self.names,
"keypoint_flip_map": self.flip_map,
"keypoint_connection_rules": self.connection_rules,
}
def get_keypoint_metadata(name):
return KEYPOINT_METADATA_REGISTRY.get(name)().to_dict()
| d2go-main | d2go/data/keypoint_metadata_registry.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from detectron2.utils.registry import Registry
D2GO_DATA_MAPPER_REGISTRY = Registry("D2GO_DATA_MAPPER")
def build_dataset_mapper(cfg, is_train, *args, **kwargs):
name = cfg.D2GO_DATA.MAPPER.NAME
return D2GO_DATA_MAPPER_REGISTRY.get(name)(cfg, is_train, *args, **kwargs)
| d2go-main | d2go/data/dataset_mappers/build.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import copy
import logging
import numpy as np
import torch
from d2go.data.dataset_mappers.build import D2GO_DATA_MAPPER_REGISTRY
from d2go.data.dataset_mappers.data_reading import (
read_image_with_prefetch,
read_sem_seg_file_with_prefetch,
)
from d2go.utils.helper import retryable
from detectron2.data import detection_utils as utils, transforms as T
from detectron2.data.transforms.augmentation import AugInput, AugmentationList
logger = logging.getLogger(__name__)
PREFETCHED_FILE_NAME = "prefetch_image"
PREFETCHED_SEM_SEG_FILE_NAME = "prefetch_sem_seg"
@D2GO_DATA_MAPPER_REGISTRY.register()
class D2GoDatasetMapper(object):
def __init__(self, cfg, is_train=True, image_loader=None, tfm_gens=None):
self.tfm_gens = (
tfm_gens
if tfm_gens is not None
else utils.build_transform_gen(cfg, is_train)
)
if cfg.INPUT.CROP.ENABLED and is_train:
self.crop_gen = T.RandomCrop(cfg.INPUT.CROP.TYPE, cfg.INPUT.CROP.SIZE)
# D2GO NOTE: when INPUT.CROP.ENABLED, don't allow using RandomCropOp
assert all(not isinstance(gen, T.RandomCrop) for gen in self.tfm_gens)
else:
self.crop_gen = None
# fmt: off
self.img_format = cfg.INPUT.FORMAT # noqa
self.mask_on = cfg.MODEL.MASK_ON # noqa
self.mask_format = cfg.INPUT.MASK_FORMAT # noqa
self.keypoint_on = cfg.MODEL.KEYPOINT_ON # noqa
# fmt: on
if self.keypoint_on and is_train:
# Flip only makes sense in training
self.keypoint_hflip_indices = utils.create_keypoint_hflip_indices(
cfg.DATASETS.TRAIN
)
else:
self.keypoint_hflip_indices = None
self.load_proposals = cfg.MODEL.LOAD_PROPOSALS
if self.load_proposals:
self.proposal_min_box_size = cfg.MODEL.PROPOSAL_GENERATOR.MIN_SIZE
self.proposal_topk = (
cfg.DATASETS.PRECOMPUTED_PROPOSAL_TOPK_TRAIN
if is_train
else cfg.DATASETS.PRECOMPUTED_PROPOSAL_TOPK_TEST
)
self.is_train = is_train
# Setup image loader:
self.image_loader = image_loader
self.backfill_size = cfg.D2GO_DATA.MAPPER.BACKFILL_SIZE
self.retry = cfg.D2GO_DATA.MAPPER.RETRY
self.catch_exception = cfg.D2GO_DATA.MAPPER.CATCH_EXCEPTION
if self.backfill_size:
if cfg.DATALOADER.ASPECT_RATIO_GROUPING:
logger.warning(
"ASPECT_RATIO_GROUPING may not work if image's width & height"
" are not given in json dataset when calling extended_coco_load,"
" if you encounter issue, consider disable ASPECT_RATIO_GROUPING."
)
self._error_count = 0
self._total_counts = 0
self._error_types = {}
def _original_call(self, dataset_dict):
"""
Modified from detectron2's original __call__ in DatasetMapper
"""
dataset_dict = copy.deepcopy(dataset_dict) # it will be modified by code below
image = self._read_image(dataset_dict, format=self.img_format)
if not self.backfill_size:
utils.check_image_size(dataset_dict, image)
image, dataset_dict = self._custom_transform(image, dataset_dict)
inputs = AugInput(image=image)
if "annotations" not in dataset_dict:
transforms = AugmentationList(
([self.crop_gen] if self.crop_gen else []) + self.tfm_gens
)(inputs)
image = inputs.image
else:
# pass additional arguments, will only be used when the Augmentation
# takes `annotations` as input
inputs.annotations = dataset_dict["annotations"]
inputs.boxes = [
utils.get_bbox(obj)
for obj in dataset_dict["annotations"]
if obj.get("iscrowd", 0) == 0
]
# Crop around an instance if there are instances in the image.
if self.crop_gen:
crop_tfm = utils.gen_crop_transform_with_instance(
self.crop_gen.get_crop_size(image.shape[:2]),
image.shape[:2],
np.random.choice(dataset_dict["annotations"]),
)
inputs.image = crop_tfm.apply_image(image)
transforms = AugmentationList(self.tfm_gens)(inputs)
image = inputs.image
if self.crop_gen:
transforms = crop_tfm + transforms
# Cache identical transforms in dataset_dict for subclass mappers
# TODO T122215878 Find more explicit way to expose transforms used
dataset_dict["transforms"] = transforms
image_shape = image.shape[:2] # h, w
if image.ndim == 2:
image = np.expand_dims(image, 2)
dataset_dict["image"] = torch.as_tensor(
image.transpose(2, 0, 1).astype("float32")
)
# Can use uint8 if it turns out to be slow some day
if self.load_proposals:
utils.transform_proposals(
dataset_dict,
image_shape,
transforms,
proposal_topk=self.proposal_topk,
min_box_size=self.proposal_min_box_size,
)
if not self.is_train:
dataset_dict.pop("annotations", None)
dataset_dict.pop("sem_seg_file_name", None)
return dataset_dict
if "annotations" in dataset_dict:
for anno in dataset_dict["annotations"]:
if not self.mask_on:
anno.pop("segmentation", None)
if not self.keypoint_on:
anno.pop("keypoints", None)
annos = [
utils.transform_instance_annotations(
obj,
transforms,
image_shape,
keypoint_hflip_indices=self.keypoint_hflip_indices,
)
for obj in dataset_dict.pop("annotations")
if obj.get("iscrowd", 0) == 0
]
instances = utils.annotations_to_instances(
annos, image_shape, mask_format=self.mask_format
)
# Create a tight bounding box from masks, useful when image is cropped
if self.crop_gen and instances.has("gt_masks"):
instances.gt_boxes = instances.gt_masks.get_bounding_boxes()
dataset_dict["instances"] = utils.filter_empty_instances(instances)
if "sem_seg_file_name" in dataset_dict:
sem_seg_gt = read_sem_seg_file_with_prefetch(
dataset_dict.pop("sem_seg_file_name"),
prefetched=dataset_dict.get(PREFETCHED_SEM_SEG_FILE_NAME, None),
)
if len(sem_seg_gt.shape) > 2:
sem_seg_gt = sem_seg_gt.squeeze(2)
sem_seg_gt = transforms.apply_segmentation(sem_seg_gt)
sem_seg_gt = torch.as_tensor(sem_seg_gt.astype("long"))
dataset_dict["sem_seg"] = sem_seg_gt
# extend standard D2 semantic segmentation to support multiple segmentation
# files, each file can represent a class
if "multi_sem_seg_file_names" in dataset_dict:
raise NotImplementedError()
if "_post_process_" in dataset_dict:
proc_func = dataset_dict.pop("_post_process_")
dataset_dict = proc_func(dataset_dict)
return dataset_dict
def __call__(self, dataset_dict):
self._total_counts += 1
@retryable(num_tries=self.retry, sleep_time=0.1)
def _f():
return self._original_call(dataset_dict)
if not self.catch_exception:
return _f()
try:
return _f()
except Exception as e:
self._error_count += 1
# if self._error_count % 10 == 1:
# # print the stacktrace for easier debugging
# traceback.print_exc()
error_type = type(e).__name__
self._error_types[error_type] = self._error_types.get(error_type, 0) + 1
if self._error_count % 100 == 0:
logger.warning(
"{}Error when applying transform for dataset_dict: {};"
" error rate {}/{} ({:.2f}%), msg: {}".format(
self._get_logging_prefix(),
dataset_dict,
self._error_count,
self._total_counts,
100.0 * self._error_count / self._total_counts,
repr(e),
)
)
self._log_error_type_stats()
# NOTE: the contract with MapDataset allows return `None` such that
# it'll randomly use other element in the dataset. We use this
# feature to handle error.
return None
def _get_logging_prefix(self):
worker_info = torch.utils.data.get_worker_info()
if not worker_info:
return ""
prefix = "[worker: {}/{}] ".format(worker_info.id, worker_info.num_workers)
return prefix
def _log_error_type_stats(self):
error_type_count_msgs = [
"{}: {}/{} ({}%)".format(
k, v, self._total_counts, 100.0 * v / self._total_counts
)
for k, v in self._error_types.items()
]
logger.warning(
"{}Error statistics:\n{}".format(
self._get_logging_prefix(), "\n".join(error_type_count_msgs)
)
)
def _read_image(self, dataset_dict, format=None):
if not (self.image_loader and self.image_loader.support(dataset_dict)):
# fallback to use D2's read_image
image = read_image_with_prefetch(
dataset_dict["file_name"],
format=format,
prefetched=dataset_dict.get(PREFETCHED_FILE_NAME),
)
if self.backfill_size:
h, w, _ = image.shape
dataset_dict["width"] = w
dataset_dict["height"] = h
return image
image = self.image_loader(dataset_dict)
if self.backfill_size:
dataset_dict["width"] = image.width
dataset_dict["height"] = image.height
return utils.convert_PIL_to_numpy(image, format)
def _custom_transform(self, image, dataset_dict):
"""
Override this method to inject custom transform.
"""
return image, dataset_dict
def __repr__(self):
return (
self.__class__.__name__
+ ":\n"
+ "\n".join(
[
" is_train: {}".format(self.is_train),
" image_loader: {}".format(self.image_loader),
" tfm_gens: \n{}".format(
"\n".join([" - {}".format(x) for x in self.tfm_gens])
),
]
)
)
| d2go-main | d2go/data/dataset_mappers/d2go_dataset_mapper.py |
from io import BytesIO
import numpy as np
from detectron2.data import detection_utils as utils
from detectron2.utils.file_io import PathManager
from PIL import Image
def read_image_with_prefetch(file_name, format=None, prefetched=None):
if prefetched is None:
return utils.read_image(file_name, format)
image = Image.open(BytesIO(prefetched.numpy().view()))
# work around this bug: https://github.com/python-pillow/Pillow/issues/3973
image = utils._apply_exif_orientation(image)
return utils.convert_PIL_to_numpy(image, format)
def read_sem_seg_file_with_prefetch(file_name: str, prefetched=None):
"""
Segmentation mask annotations can be stored as:
.PNG files
.npy uncompressed numpy files
"""
assert file_name.endswith(".png") or file_name.endswith(".npy")
sem_seg_type = file_name[-len(".---") :]
if sem_seg_type == ".png":
return read_image_with_prefetch(file_name, format="L", prefetched=prefetched)
elif sem_seg_type == ".npy":
if prefetched is None:
with PathManager.open(file_name, "rb") as f:
return np.load(f)
else:
return prefetched.numpy()
| d2go-main | d2go/data/dataset_mappers/data_reading.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from d2go.data.dataset_mappers.build import (
build_dataset_mapper,
D2GO_DATA_MAPPER_REGISTRY,
)
from d2go.data.dataset_mappers.d2go_dataset_mapper import D2GoDatasetMapper
from d2go.data.dataset_mappers.rotated_dataset_mapper import RotatedDatasetMapper
__all__ = [
"build_dataset_mapper",
"D2GO_DATA_MAPPER_REGISTRY",
"D2GoDatasetMapper",
"RotatedDatasetMapper",
]
# Populating registreis
# @fb-only: from d2go.data.dataset_mappers import fb as _fb # isort:skip # noqa
| d2go-main | d2go/data/dataset_mappers/__init__.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import copy
import logging
import numpy as np
import torch
from d2go.data.dataset_mappers.build import D2GO_DATA_MAPPER_REGISTRY
from d2go.data.dataset_mappers.d2go_dataset_mapper import D2GoDatasetMapper
from detectron2.data import detection_utils as utils, transforms as T
from detectron2.structures import BoxMode, Instances, RotatedBoxes
logger = logging.getLogger(__name__)
@D2GO_DATA_MAPPER_REGISTRY.register()
class RotatedDatasetMapper(D2GoDatasetMapper):
def _original_call(self, dataset_dict):
"""
Modified from detectron2's original __call__ in DatasetMapper
"""
dataset_dict = copy.deepcopy(dataset_dict) # it will be modified by code below
image = self._read_image(dataset_dict, format=self.img_format)
if not self.backfill_size:
utils.check_image_size(dataset_dict, image)
if "annotations" not in dataset_dict:
image, transforms = T.apply_transform_gens(
([self.crop_gen] if self.crop_gen else []) + self.tfm_gens, image
)
else:
# Crop around an instance if there are instances in the image.
# USER: Remove if you don't use cropping
if self.crop_gen:
crop_tfm = utils.gen_crop_transform_with_instance(
self.crop_gen.get_crop_size(image.shape[:2]),
image.shape[:2],
np.random.choice(dataset_dict["annotations"]),
)
image = crop_tfm.apply_image(image)
image, transforms = T.apply_transform_gens(self.tfm_gens, image)
if self.crop_gen:
transforms = crop_tfm + transforms
image_shape = image.shape[:2] # h, w
dataset_dict["image"] = torch.as_tensor(
image.transpose(2, 0, 1).astype("float32")
)
# Can use uint8 if it turns out to be slow some day
assert not self.load_proposals, "Not supported!"
if not self.is_train:
dataset_dict.pop("annotations", None)
dataset_dict.pop("sem_seg_file_name", None)
return dataset_dict
if "annotations" in dataset_dict:
for anno in dataset_dict["annotations"]:
if not self.mask_on:
anno.pop("segmentation", None)
if not self.keypoint_on:
anno.pop("keypoints", None)
# Convert dataset_dict["annotations"] to dataset_dict["instances"]
annotations = [
obj
for obj in dataset_dict.pop("annotations")
if obj.get("iscrowd", 0) == 0
]
# Convert either rotated box or horizontal box to XYWHA_ABS format
original_boxes = [
BoxMode.convert(
box=obj["bbox"],
from_mode=obj["bbox_mode"],
to_mode=BoxMode.XYWHA_ABS,
)
for obj in annotations
]
transformed_boxes = transforms.apply_rotated_box(
np.array(original_boxes, dtype=np.float64)
)
instances = Instances(image_shape)
instances.gt_classes = torch.tensor(
[obj["category_id"] for obj in annotations], dtype=torch.int64
)
instances.gt_boxes = RotatedBoxes(transformed_boxes)
instances.gt_boxes.clip(image_shape)
dataset_dict["instances"] = instances[instances.gt_boxes.nonempty()]
return dataset_dict
| d2go-main | d2go/data/dataset_mappers/rotated_dataset_mapper.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import json
import logging
from typing import Dict, List, Optional, Tuple
from detectron2.config import CfgNode
from detectron2.data import transforms as d2T
from detectron2.utils.registry import Registry
logger = logging.getLogger(__name__)
TRANSFORM_OP_REGISTRY = Registry("D2GO_TRANSFORM_REGISTRY")
def _json_load(arg_str: str) -> Dict:
try:
return json.loads(arg_str)
except json.decoder.JSONDecodeError as e:
logger.warning("Can't load arg_str: {}".format(arg_str))
raise e
# example repr: "ResizeShortestEdgeOp"
@TRANSFORM_OP_REGISTRY.register()
def ResizeShortestEdgeOp(
cfg: CfgNode, arg_str: str, is_train: bool
) -> List[d2T.Transform]:
if is_train:
min_size = cfg.INPUT.MIN_SIZE_TRAIN
max_size = cfg.INPUT.MAX_SIZE_TRAIN
sample_style = cfg.INPUT.MIN_SIZE_TRAIN_SAMPLING
else:
min_size = cfg.INPUT.MIN_SIZE_TEST
max_size = cfg.INPUT.MAX_SIZE_TEST
sample_style = "choice"
if sample_style == "range":
assert (
len(min_size) == 2
), "more than 2 ({}) min_size(s) are provided for ranges".format(len(min_size))
tfm_gens = []
if not min_size == 0: # set to zero to disable resize
tfm_gens.append(d2T.ResizeShortestEdge(min_size, max_size, sample_style))
return tfm_gens
# example repr: "ResizeShortestEdgeSquareOp"
@TRANSFORM_OP_REGISTRY.register()
def ResizeShortestEdgeSquareOp(
cfg: CfgNode, arg_str: str, is_train: bool
) -> List[d2T.Transform]:
"""Resize the input to square using INPUT.MIN_SIZE_TRAIN or INPUT.MIN_SIZE_TEST
without keeping aspect ratio
"""
if is_train:
min_size = cfg.INPUT.MIN_SIZE_TRAIN
assert (
isinstance(min_size, (list, tuple)) and len(min_size) == 1
), "Only a signle size is supported"
min_size = min_size[0]
else:
min_size = cfg.INPUT.MIN_SIZE_TEST
tfm_gens = []
if not min_size == 0: # set to zero to disable resize
tfm_gens.append(d2T.Resize(shape=[min_size, min_size]))
return tfm_gens
@TRANSFORM_OP_REGISTRY.register()
def ResizeOp(cfg: CfgNode, arg_str: str, is_train: bool) -> List[d2T.Transform]:
kwargs = _json_load(arg_str) if arg_str is not None else {}
assert isinstance(kwargs, dict)
return [d2T.Resize(**kwargs)]
_TRANSFORM_REPR_SEPARATOR = "::"
def parse_tfm_gen_repr(tfm_gen_repr: str) -> Tuple[str, Optional[str]]:
if tfm_gen_repr.count(_TRANSFORM_REPR_SEPARATOR) == 0:
return tfm_gen_repr, None
else:
# Split only after first delimiter, to allow for:
# - nested transforms, e.g:
# 'SomeTransformOp::{"args": ["SubTransform2Op::{\\"param1\\": 0, \\"param2\\": false}", "SubTransform2Op::{\\"param1\\": 0.8}"], "other_args": 2}'
# - list of transforms, e.g.:
# ["SubTransform2Op::{\\"param1\\": 0, \\"param2\\": false}", "SubTransform2Op::{\\"param1\\": 0.8}"]
# TODO(T144470024): Support recursive parsing. For now, it's user responsibility to ensure the nested transforms are parsed correctly.
return tfm_gen_repr.split(_TRANSFORM_REPR_SEPARATOR, 1)
def build_transform_gen(
cfg: CfgNode, is_train: bool, tfm_gen_repr_list: Optional[List[str]] = None
) -> List[d2T.Transform]:
"""
This function builds a list of TransformGen or Transform objects using a list of
strings (`tfm_gen_repr_list). If list is not provided, cfg.D2GO_DATA.AUG_OPS.TRAIN/TEST is used.
Each string (aka. `tfm_gen_repr`) will be split into `name` and `arg_str` (separated by "::");
the `name` will be used to lookup the registry while `arg_str` will be used as argument.
Each function in registry needs to take `cfg`, `arg_str` and `is_train` as
input, and return a list of TransformGen or Transform objects.
"""
tfm_gen_repr_list = tfm_gen_repr_list or (
cfg.D2GO_DATA.AUG_OPS.TRAIN if is_train else cfg.D2GO_DATA.AUG_OPS.TEST
)
tfm_gens = [
TRANSFORM_OP_REGISTRY.get(name)(cfg, arg_str, is_train)
for name, arg_str in [
parse_tfm_gen_repr(tfm_gen_repr) for tfm_gen_repr in tfm_gen_repr_list
]
]
assert all(isinstance(gens, list) for gens in tfm_gens)
tfm_gens = [gen for gens in tfm_gens for gen in gens]
assert all(isinstance(gen, (d2T.Transform, d2T.TransformGen)) for gen in tfm_gens)
return tfm_gens
| d2go-main | d2go/data/transforms/build.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from typing import Callable, List, Union
import detectron2.data.transforms.augmentation as aug
import numpy as np
from d2go.data.transforms.build import _json_load, TRANSFORM_OP_REGISTRY
from detectron2.config import CfgNode
from detectron2.data import detection_utils as du
from detectron2.data.transforms.transform import Transform
from fvcore.transforms.transform import BlendTransform
class InvertibleColorTransform(Transform):
"""
Generic wrapper for invertible photometric transforms.
These transformations should only affect the color space and
not the coordinate space of the image (e.g. annotation
coordinates such as bounding boxes should not be changed)
"""
def __init__(self, op: Callable, inverse_op: Callable):
"""
Args:
op (Callable): operation to be applied to the image,
which takes in an ndarray and returns an ndarray.
"""
if not callable(op):
raise ValueError("op parameter should be callable")
if not callable(inverse_op):
raise ValueError("inverse_op parameter should be callable")
super().__init__()
self._set_attributes(locals())
def apply_image(self, img: np.ndarray) -> np.ndarray:
return self.op(img)
def apply_coords(self, coords: np.ndarray) -> np.ndarray:
return coords
def inverse(self) -> Transform:
return InvertibleColorTransform(self.inverse_op, self.op)
def apply_segmentation(self, segmentation: np.ndarray) -> np.ndarray:
return segmentation
class RandomContrastYUV(aug.Augmentation):
"""
Randomly transforms contrast for images in YUV format.
See similar:
detectron2.data.transforms.RandomContrast,
detectron2.data.transforms.RandomBrightness
"""
def __init__(self, intensity_min: float, intensity_max: float):
super().__init__()
self._init(locals())
def get_transform(self, img: np.ndarray) -> Transform:
w = np.random.uniform(self.intensity_min, self.intensity_max)
pure_gray = np.zeros_like(img)
pure_gray[:, :, 0] = 0.5
return BlendTransform(src_image=pure_gray, src_weight=1 - w, dst_weight=w)
class RandomSaturationYUV(aug.Augmentation):
"""
Randomly transforms saturation for images in YUV format.
See similar: detectron2.data.transforms.RandomSaturation
"""
def __init__(self, intensity_min: float, intensity_max: float):
super().__init__()
self._init(locals())
def get_transform(self, img: np.ndarray) -> Transform:
assert (
len(img.shape) == 3 and img.shape[-1] == 3
), f"Expected (H, W, 3), image shape {img.shape}"
w = np.random.uniform(self.intensity_min, self.intensity_max)
grayscale = np.zeros_like(img)
grayscale[:, :, 0] = img[:, :, 0]
return BlendTransform(src_image=grayscale, src_weight=1 - w, dst_weight=w)
def convert_rgb_to_yuv_bt601(image: np.ndarray) -> np.ndarray:
"""Convert RGB image in (H, W, C) to YUV format
image: range 0 ~ 255
"""
image = image / 255.0
image = np.dot(image, np.array(du._M_RGB2YUV).T)
return image
def convery_yuv_bt601_to_rgb(image: np.ndarray) -> np.ndarray:
return du.convert_image_to_rgb(image, "YUV-BT.601")
class RGB2YUVBT601(aug.Augmentation):
def __init__(self):
super().__init__()
self.trans = InvertibleColorTransform(
convert_rgb_to_yuv_bt601, convery_yuv_bt601_to_rgb
)
def get_transform(self, image) -> Transform:
return self.trans
class YUVBT6012RGB(aug.Augmentation):
def __init__(self):
super().__init__()
self.trans = InvertibleColorTransform(
convery_yuv_bt601_to_rgb, convert_rgb_to_yuv_bt601
)
def get_transform(self, image) -> Transform:
return self.trans
def build_func(
cfg: CfgNode, arg_str: str, is_train: bool, obj
) -> List[Union[aug.Augmentation, Transform]]:
assert is_train
kwargs = _json_load(arg_str) if arg_str is not None else {}
assert isinstance(kwargs, dict)
return [obj(**kwargs)]
@TRANSFORM_OP_REGISTRY.register()
def RandomContrastYUVOp(
cfg: CfgNode, arg_str: str, is_train: bool
) -> List[Union[aug.Augmentation, Transform]]:
return build_func(cfg, arg_str, is_train, obj=RandomContrastYUV)
@TRANSFORM_OP_REGISTRY.register()
def RandomSaturationYUVOp(
cfg: CfgNode, arg_str: str, is_train: bool
) -> List[Union[aug.Augmentation, Transform]]:
return build_func(cfg, arg_str, is_train, obj=RandomSaturationYUV)
@TRANSFORM_OP_REGISTRY.register()
def RGB2YUVBT601Op(
cfg: CfgNode, arg_str: str, is_train: bool
) -> List[Union[aug.Augmentation, Transform]]:
return build_func(cfg, arg_str, is_train, obj=RGB2YUVBT601)
@TRANSFORM_OP_REGISTRY.register()
def YUVBT6012RGBOp(
cfg: CfgNode, arg_str: str, is_train: bool
) -> List[Union[aug.Augmentation, Transform]]:
return build_func(cfg, arg_str, is_train, obj=YUVBT6012RGB)
| d2go-main | d2go/data/transforms/color_yuv.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from typing import List, Optional, Union
import detectron2.data.transforms.augmentation as aug
import numpy as np
import torchvision.transforms as tvtf
from d2go.data.transforms.build import _json_load, TRANSFORM_OP_REGISTRY
from d2go.data.transforms.tensor import Array2Tensor, Tensor2Array
from detectron2.config import CfgNode
from fvcore.transforms.transform import Transform
class ToTensorWrapper:
def __init__(self, transform):
self.a2t = Array2Tensor(preserve_dtype=True)
self.transform = transform
self.t2a = Tensor2Array()
def __call__(self, img: np.ndarray):
return self.t2a.apply_image(self.transform(self.a2t.apply_image(img)))
class RandAugmentImage(Transform):
"""Rand Augment transform, only support image transformation"""
def __init__(
self,
num_ops: int = 2,
magnitude: int = 9,
num_magnitude_bins: int = 31,
interpolation: tvtf.functional.InterpolationMode = tvtf.functional.InterpolationMode.NEAREST,
fill: Optional[List[float]] = None,
):
transform = tvtf.RandAugment(
num_ops, magnitude, num_magnitude_bins, interpolation, fill
)
self.transform = ToTensorWrapper(transform)
def apply_image(self, img: np.ndarray) -> np.array:
assert (
img.dtype == np.uint8
), f"Only uint8 image format is supported, got {img.dtype}"
return self.transform(img)
def apply_coords(self, coords: np.ndarray) -> np.ndarray:
raise NotImplementedError()
def apply_segmentation(self, segmentation: np.ndarray) -> np.ndarray:
raise NotImplementedError()
class TrivialAugmentWideImage(Transform):
"""TrivialAugmentWide transform, only support image transformation"""
def __init__(
self,
num_magnitude_bins: int = 31,
interpolation: tvtf.functional.InterpolationMode = tvtf.functional.InterpolationMode.NEAREST,
fill: Optional[List[float]] = None,
):
transform = tvtf.TrivialAugmentWide(num_magnitude_bins, interpolation, fill)
self.transform = ToTensorWrapper(transform)
def apply_image(self, img: np.ndarray) -> np.array:
assert (
img.dtype == np.uint8
), f"Only uint8 image format is supported, got {img.dtype}"
return self.transform(img)
def apply_coords(self, coords: np.ndarray) -> np.ndarray:
raise NotImplementedError()
def apply_segmentation(self, segmentation: np.ndarray) -> np.ndarray:
raise NotImplementedError()
class AugMixImage(Transform):
"""AugMix transform, only support image transformation"""
def __init__(
self,
severity: int = 3,
mixture_width: int = 3,
chain_depth: int = -1,
alpha: float = 1.0,
all_ops: bool = True,
interpolation: tvtf.functional.InterpolationMode = tvtf.functional.InterpolationMode.NEAREST,
fill: Optional[List[float]] = None,
):
transform = tvtf.AugMix(
severity, mixture_width, chain_depth, alpha, all_ops, interpolation, fill
)
self.transform = ToTensorWrapper(transform)
def apply_image(self, img: np.ndarray) -> np.array:
assert (
img.dtype == np.uint8
), f"Only uint8 image format is supported, got {img.dtype}"
return self.transform(img)
def apply_coords(self, coords: np.ndarray) -> np.ndarray:
raise NotImplementedError()
def apply_segmentation(self, segmentation: np.ndarray) -> np.ndarray:
raise NotImplementedError()
# example repr: 'RandAugmentImageOp::{"magnitude": 9}'
@TRANSFORM_OP_REGISTRY.register()
def RandAugmentImageOp(
cfg: CfgNode, arg_str: str, is_train: bool
) -> List[Union[aug.Augmentation, Transform]]:
assert is_train
kwargs = _json_load(arg_str) if arg_str is not None else {}
assert isinstance(kwargs, dict)
return [RandAugmentImage(**kwargs)]
# example repr: 'TrivialAugmentWideImageOp::{"num_magnitude_bins": 31}'
@TRANSFORM_OP_REGISTRY.register()
def TrivialAugmentWideImageOp(
cfg: CfgNode, arg_str: str, is_train: bool
) -> List[Union[aug.Augmentation, Transform]]:
assert is_train
kwargs = _json_load(arg_str) if arg_str is not None else {}
assert isinstance(kwargs, dict)
return [TrivialAugmentWideImage(**kwargs)]
# example repr: 'AugMixImageOp::{"severity": 3}'
@TRANSFORM_OP_REGISTRY.register()
def AugMixImageOp(
cfg: CfgNode, arg_str: str, is_train: bool
) -> List[Union[aug.Augmentation, Transform]]:
assert is_train
kwargs = _json_load(arg_str) if arg_str is not None else {}
assert isinstance(kwargs, dict)
return [AugMixImage(**kwargs)]
| d2go-main | d2go/data/transforms/auto_aug.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import copy
import json
import random
from typing import List, Optional, Tuple
import cv2
import numpy as np
import torchvision.transforms as T
from d2go.data.transforms.build import TRANSFORM_OP_REGISTRY
from detectron2.config import CfgNode
from detectron2.data.transforms.augmentation import TransformGen
from fvcore.transforms.transform import NoOpTransform, Transform
class AffineTransform(Transform):
def __init__(
self,
M: np.ndarray,
img_w: int,
img_h: int,
flags: Optional[int] = None,
border_mode: Optional[int] = None,
is_inversed_M: bool = False,
):
"""
Args:
will transform img according to affine transform M
"""
super().__init__()
self._set_attributes(locals())
self.warp_kwargs = {}
if flags is not None:
self.warp_kwargs["flags"] = flags
if border_mode is not None:
self.warp_kwargs["borderMode"] = border_mode
def _warp_array(self, input_data: np.array, interp_flag: Optional[int] = None):
warp_kwargs = copy.deepcopy(self.warp_kwargs)
if interp_flag is not None:
flags = warp_kwargs.get("flags", 0)
# remove previous interp and add the new one
flags = (flags - (flags & cv2.INTER_MAX)) + interp_flag
warp_kwargs["flags"] = flags
M = self.M
if self.is_inversed_M:
M = M[:2]
img = cv2.warpAffine(
input_data,
M,
(int(self.img_w), (self.img_h)),
**warp_kwargs,
)
return img
def apply_image(self, img: np.ndarray) -> np.ndarray:
return self._warp_array(img)
def apply_coords(self, coords: np.ndarray) -> np.ndarray:
# Add row of ones to enable matrix multiplication
coords = coords.T
ones = np.ones((1, coords.shape[1]))
coords = np.vstack((coords, ones))
M = self.M
if self.is_inversed_M:
M = np.linalg.inv(M)
coords = (M @ coords)[:2, :].T
return coords
def apply_segmentation(self, img: np.ndarray) -> np.ndarray:
return self._warp_array(img, interp_flag=cv2.INTER_NEAREST)
class RandomPivotScaling(TransformGen):
"""
Uniformly pick a random pivot point inside image frame, scaling the image
around the pivot point using the scale factor sampled from a list of
given scales. The pivot point's location is unchanged after the transform.
Arguments:
scales: List[float]: each element can be any positive float number,
when larger than 1.0 objects become larger after transform
and vice versa.
"""
def __init__(self, scales: List[int]):
super().__init__()
self._init(locals())
self.scales = scales
def get_transform(self, img: np.ndarray) -> Transform:
img_h, img_w, _ = img.shape
img_h = float(img_h)
img_w = float(img_w)
pivot_y = self._rand_range(0.0, img_h)
pivot_x = self._rand_range(0.0, img_w)
def _interp(p1, p2, alpha):
dx = p2[0] - p1[0]
dy = p2[1] - p1[1]
p_x = p1[0] + alpha * dx
p_y = p1[1] + alpha * dy
return (p_x, p_y)
scale = np.random.choice(self.scales)
lt = (0.0, 0.0)
rb = (img_w, img_h)
pivot = (pivot_x, pivot_y)
pts1 = np.float32([lt, pivot, rb])
pts2 = np.float32(
[_interp(pivot, lt, scale), pivot, _interp(pivot, rb, scale)],
)
M = cv2.getAffineTransform(pts1, pts2)
return AffineTransform(M, img_w, img_h)
class RandomAffine(TransformGen):
"""
Apply random affine trasform to the image given
probabilities and ranges in each dimension.
"""
def __init__(
self,
prob: float = 0.5,
angle_range: Tuple[float, float] = (-90, 90),
translation_range: Tuple[float, float] = (0, 0),
scale_range: Tuple[float, float] = (1.0, 1.0),
shear_range: Tuple[float, float] = (0, 0),
fit_in_frame: bool = True,
keep_aspect_ratio: bool = False,
):
"""
Args:
prob (float): probability of applying transform.
angle_range (tuple of integers): min/max rotation angle in degrees
between -180 and 180.
translation_range (tuple of integers): min/max translation
(post re-centered rotation).
scale_range (tuple of floats): min/max scale (post re-centered rotation).
shear_range (tuple of intgers): min/max shear angle value in degrees
between -180 to 180.
fit_in_frame: warped image is scaled into the output frame
keep_aspect_ratio: aspect ratio is kept instead of creating a squared image
with dimension of max dimension
"""
super().__init__()
# Turn all locals into member variables.
self._init(locals())
def _compute_scale_adjustment(
self,
im_w: float,
im_h: float,
out_w: float,
out_h: float,
center: Tuple[float, float],
angle: float,
shear: Tuple[float, float],
) -> float:
M_inv = T.functional._get_inverse_affine_matrix(
center, angle, [0.0, 0.0], 1.0, shear
)
M_inv.extend([0.0, 0.0, 1.0])
M_inv = np.array(M_inv).reshape((3, 3))
M = np.linalg.inv(M_inv)
# Center in output patch
img_corners = np.array(
[
[0, 0, im_w - 1, im_w - 1],
[0, im_h - 1, 0, im_h - 1],
[1, 1, 1, 1],
]
)
new_corners = M @ img_corners
x_range = np.ceil(np.amax(new_corners[0]) - np.amin(new_corners[0]))
y_range = np.ceil(np.amax(new_corners[1]) - np.amin(new_corners[1]))
# Apply translation and scale after centering in output patch
scale_adjustment = min(out_w / x_range, out_h / y_range)
return scale_adjustment
def get_transform(self, img: np.ndarray) -> Transform:
do = self._rand_range() < self.prob
if not do:
return NoOpTransform()
im_h, im_w = img.shape[:2]
center = [im_w / 2, im_h / 2]
angle = random.uniform(self.angle_range[0], self.angle_range[1])
translation = [
random.uniform(self.translation_range[0], self.translation_range[1]),
random.uniform(self.translation_range[0], self.translation_range[1]),
]
scale = random.uniform(self.scale_range[0], self.scale_range[1])
shear = [
random.uniform(self.shear_range[0], self.shear_range[1]),
random.uniform(self.shear_range[0], self.shear_range[1]),
]
# Determine output image size
max_size = max(im_w, im_h)
out_w, out_h = (im_w, im_h) if self.keep_aspect_ratio else (max_size, max_size)
# Apply translation adjustment
translation_adjustment = [(out_w - im_w) / 2, (out_h - im_h) / 2]
translation[0] += translation_adjustment[0]
translation[1] += translation_adjustment[1]
# Apply scale adjustment
if self.fit_in_frame:
scale_adjustment = self._compute_scale_adjustment(
im_w, im_h, out_w, out_h, center, angle, shear
)
scale *= scale_adjustment
# Compute the affine transform
M_inv = T.functional._get_inverse_affine_matrix(
center, angle, translation, scale, shear
)
M_inv = np.array(M_inv).reshape((2, 3))
M_inv = np.vstack([M_inv, [0.0, 0.0, 1.0]])
return AffineTransform(
M_inv,
out_w,
out_h,
flags=cv2.WARP_INVERSE_MAP + cv2.INTER_LINEAR,
border_mode=cv2.BORDER_REPLICATE,
is_inversed_M=True,
)
# example repr: "RandomPivotScalingOp::[1.0, 0.75, 0.5]"
@TRANSFORM_OP_REGISTRY.register()
def RandomPivotScalingOp(cfg: CfgNode, arg_str: str, is_train: bool) -> List[Transform]:
assert is_train
scales = json.loads(arg_str)
assert isinstance(scales, list)
assert all(isinstance(scale, (float, int)) for scale in scales)
return [RandomPivotScaling(scales=scales)]
@TRANSFORM_OP_REGISTRY.register()
def RandomAffineOp(cfg: CfgNode, arg_str: str, is_train: bool) -> List[Transform]:
assert is_train
kwargs = json.loads(arg_str) if arg_str is not None else {}
assert isinstance(kwargs, dict)
return [RandomAffine(**kwargs)]
| d2go-main | d2go/data/transforms/affine.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
# Populating registreis
from d2go.data.transforms import ( # noqa
affine as _affine,
auto_aug,
blur as _blur,
box_utils as _box_utils,
color_yuv as _color_yuv,
crop as _crop,
d2_native as _d2_native,
)
# @fb-only: from d2go.data.transforms import fb as _fb # isort:skip # noqa
| d2go-main | d2go/data/transforms/__init__.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import functools
from typing import Any, List, Tuple, Union
import detectron2.data.transforms.augmentation as aug
import numpy as np
import torch
from d2go.data.transforms.build import _json_load, TRANSFORM_OP_REGISTRY
from detectron2.config import CfgNode
from detectron2.data.transforms.transform import Transform
from detectron2.structures.boxes import Boxes
def get_box_union(boxes: Boxes):
"""Merge all boxes into a single box"""
if len(boxes) == 0:
return boxes
bt = boxes.tensor
union_bt = torch.cat(
(torch.min(bt[:, :2], 0).values, torch.max(bt[:, 2:], 0).values)
).reshape(1, -1)
return Boxes(union_bt)
def get_box_from_mask(mask: torch.Tensor) -> Tuple[int, int, int, int]:
"""Find if there are non-zero elements per row/column first and then find
min/max position of those elements.
Only support 2d image (h x w)
Return (x1, y1, w, h) if bbox found, otherwise None
"""
assert len(mask.shape) == 2, f"Invalid shape {mask.shape}"
rows = np.any(mask, axis=1)
cols = np.any(mask, axis=0)
if bool(np.any(rows)) is False or bool(np.any(cols)) is False:
return None
rmin, rmax = np.where(rows)[0][[0, -1]]
cmin, cmax = np.where(cols)[0][[0, -1]]
assert cmax >= cmin, f"cmax={cmax}, cmin={cmin}"
assert rmax >= rmin, f"rmax={rmax}, rmin={rmin}"
# x1, y1, w, h
return cmin, rmin, cmax - cmin + 1, rmax - rmin + 1
def get_min_box_aspect_ratio(
bbox_xywh: torch.Tensor, target_aspect_ratio: float
) -> torch.Tensor:
"""Get a minimal bbox that matches the target_aspect_ratio
target_aspect_ratio is representation by w/h
bbox are represented by pixel coordinates"""
bbox_xywh = torch.Tensor(bbox_xywh)
box_w, box_h = bbox_xywh[2:]
box_ar = float(box_w) / box_h
if box_ar >= target_aspect_ratio:
new_w = box_w
new_h = float(new_w) / target_aspect_ratio
else:
new_h = box_h
new_w = new_h * target_aspect_ratio
new_wh = torch.Tensor([new_w, new_h])
bbox_center = bbox_xywh[:2] + bbox_xywh[2:] / 2.0
new_xy = bbox_center - new_wh / 2.0
return torch.cat([new_xy, new_wh])
def get_box_center(bbox_xywh: torch.Tensor) -> torch.Tensor:
"""Get the center of the bbox"""
return torch.Tensor(bbox_xywh[:2]) + torch.Tensor(bbox_xywh[2:]) / 2.0
def get_bbox_xywh_from_center_wh(
bbox_center: torch.Tensor, bbox_wh: torch.Tensor
) -> torch.Tensor:
"""Get a bbox from bbox center and the width and height"""
bbox_wh = torch.Tensor(bbox_wh)
bbox_xy = torch.Tensor(bbox_center) - bbox_wh / 2.0
return torch.cat([bbox_xy, bbox_wh])
def get_bbox_xyxy_from_xywh(bbox_xywh: torch.Tensor) -> torch.Tensor:
"""Convert the bbox from xywh format to xyxy format
bbox are represented by pixel coordinates,
the center of pixels are (x + 0.5, y + 0.5)
"""
return torch.Tensor(
[
bbox_xywh[0],
bbox_xywh[1],
bbox_xywh[0] + bbox_xywh[2],
bbox_xywh[1] + bbox_xywh[3],
]
)
def get_bbox_xywh_from_xyxy(bbox_xyxy: torch.Tensor) -> torch.Tensor:
"""Convert the bbox from xyxy format to xywh format"""
return torch.Tensor(
[
bbox_xyxy[0],
bbox_xyxy[1],
bbox_xyxy[2] - bbox_xyxy[0],
bbox_xyxy[3] - bbox_xyxy[1],
]
)
def to_boxes_from_xywh(bbox_xywh: torch.Tensor) -> torch.Tensor:
return Boxes(get_bbox_xyxy_from_xywh(bbox_xywh).unsqueeze(0))
def scale_bbox_center(bbox_xywh: torch.Tensor, target_scale: float) -> torch.Tensor:
"""Scale the bbox around the center of the bbox"""
box_center = get_box_center(bbox_xywh)
box_wh = torch.Tensor(bbox_xywh[2:]) * target_scale
return get_bbox_xywh_from_center_wh(box_center, box_wh)
def offset_bbox(bbox_xywh: torch.Tensor, target_offset: float) -> torch.Tensor:
"""Offset the bbox based on target_offset"""
box_center = get_box_center(bbox_xywh)
new_center = box_center + torch.Tensor(target_offset)
return get_bbox_xywh_from_center_wh(new_center, bbox_xywh[2:])
def clip_box_xywh(bbox_xywh: torch.Tensor, image_size_hw: List[int]):
"""Clip the bbox based on image_size_hw"""
h, w = image_size_hw
bbox_xyxy = get_bbox_xyxy_from_xywh(bbox_xywh)
bbox_xyxy[0] = max(bbox_xyxy[0], 0)
bbox_xyxy[1] = max(bbox_xyxy[1], 0)
bbox_xyxy[2] = min(bbox_xyxy[2], w)
bbox_xyxy[3] = min(bbox_xyxy[3], h)
return get_bbox_xywh_from_xyxy(bbox_xyxy)
def scale_coord(
target: Union[torch.tensor, np.ndarray],
source: Union[torch.tensor, np.ndarray],
percentage: float,
):
return [((a - b) * percentage + a) for a, b in zip(target, source)]
def pad_coord(
target: Union[torch.tensor, np.ndarray],
source: Union[torch.tensor, np.ndarray],
fixed_pad: float,
):
return [(np.sign(a - b) * fixed_pad + a) for a, b in zip(target, source)]
class EnlargeBoundingBox(Transform):
"""Enlarge bounding box based on fixed padding or percentage"""
def __init__(
self, percentage: float = None, fixed_pad: int = None, box_only: bool = False
):
super().__init__()
assert percentage is not None or fixed_pad is not None
assert percentage is None or fixed_pad is None
if percentage is not None:
self.xfm_fn = functools.partial(scale_coord, percentage=percentage)
elif fixed_pad is not None:
self.xfm_fn = functools.partial(pad_coord, fixed_pad=fixed_pad)
self.box_only = box_only
def apply_image(self, img: torch.Tensor) -> np.ndarray:
return img
def apply_box(self, coords: Any) -> Any:
# Takes boxes_xyxy
center = (np.array(coords[0, 0:2]) + np.array(coords[0, 2:])) / 2
new_coords = np.zeros_like(coords)
new_coords[0, 0:2] = self.xfm_fn(coords[0, 0:2], center)
new_coords[0, 2:] = self.xfm_fn(coords[0, 2:], center)
return new_coords
def apply_coords(self, coords: Any) -> Any:
if self.box_only:
return coords
assert coords.shape[1] == 2, "Supported 2d inputs only"
center = np.mean(coords, axis=0)
for index in range(coords.shape[0]):
coords[index] = self.xfm_fn(coords[index], center)
return coords
@TRANSFORM_OP_REGISTRY.register()
def EnlargeBoundingBoxOp(
cfg: CfgNode, arg_str: str, is_train: bool
) -> List[Union[aug.Augmentation, Transform]]:
kwargs = _json_load(arg_str) if arg_str is not None else {}
assert isinstance(kwargs, dict)
return [EnlargeBoundingBox(**kwargs)]
| d2go-main | d2go/data/transforms/box_utils.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from typing import Any, List, Optional, Union
import numpy as np
import torch
from detectron2.data.transforms.augmentation import Augmentation, AugmentationList
from detectron2.structures import Boxes
from fvcore.transforms.transform import Transform
class AugInput:
"""
Same as AugInput in vision/fair/detectron2/detectron2/data/transforms/augmentation.py
but allows torch.Tensor as input
"""
def __init__(
self,
image: Union[np.ndarray, torch.Tensor],
*,
boxes: Optional[Union[np.ndarray, torch.Tensor, Boxes]] = None,
sem_seg: Optional[Union[np.ndarray, torch.Tensor]] = None,
):
"""
Args:
image (ndarray/torch.Tensor): (H,W) or (H,W,C) ndarray of type uint8 in range [0, 255], or
floating point in range [0, 1] or [0, 255]. (C, H, W) for tensor.
boxes (ndarray or None): Nx4 float32 boxes in XYXY_ABS mode
sem_seg (ndarray or None): HxW uint8 semantic segmentation mask. Each element
is an integer label of pixel.
"""
self.image = image
self.boxes = boxes
self.sem_seg = sem_seg
def transform(self, tfm: Transform) -> None:
"""
In-place transform all attributes of this class.
By "in-place", it means after calling this method, accessing an attribute such
as ``self.image`` will return transformed data.
"""
self.image = tfm.apply_image(self.image)
if self.boxes is not None:
self.boxes = tfm.apply_box(self.boxes)
if self.sem_seg is not None:
self.sem_seg = tfm.apply_segmentation(self.sem_seg)
def apply_augmentations(
self, augmentations: List[Union[Augmentation, Transform]]
) -> AugmentationList:
"""
Equivalent of ``AugmentationList(augmentations)(self)``
"""
return AugmentationList(augmentations)(self)
class Tensor2Array(Transform):
"""Convert image tensor (CHW) to np array (HWC)"""
def __init__(self):
super().__init__()
def apply_image(self, img: torch.Tensor) -> np.ndarray:
# CHW -> HWC
assert isinstance(img, torch.Tensor)
assert len(img.shape) == 3, img.shape
return img.cpu().numpy().transpose(1, 2, 0)
def apply_coords(self, coords: Any) -> Any:
return coords
def apply_segmentation(self, segmentation: torch.Tensor) -> np.ndarray:
assert len(segmentation.shape) == 2, segmentation.shape
return segmentation.cpu().numpy()
def inverse(self) -> Transform:
return Array2Tensor()
class Array2Tensor(Transform):
"""Convert image np array (HWC) to torch tensor (CHW)"""
def __init__(self, preserve_dtype: bool = False):
"""
preserve_dtype: always convert to float32 if False
"""
super().__init__()
self.preserve_dtype = preserve_dtype
def apply_image(self, img: np.ndarray) -> torch.Tensor:
# HW(C) -> CHW
assert isinstance(img, np.ndarray)
assert len(img.shape) in [2, 3], img.shape
if len(img.shape) == 2:
# HW -> HWC
img = np.expand_dims(img, axis=2)
if not self.preserve_dtype:
img = img.astype("float32")
return torch.from_numpy(img.transpose(2, 0, 1))
def apply_coords(self, coords: Any) -> Any:
return coords
def apply_segmentation(self, segmentation: np.ndarray) -> torch.Tensor:
assert len(segmentation.shape) == 2, segmentation.shape
return torch.from_numpy(segmentation.astype("long"))
def inverse(self) -> Transform:
return Tensor2Array()
| d2go-main | d2go/data/transforms/tensor.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import math
from typing import Any, List, Optional, Tuple, Union
import d2go.data.transforms.box_utils as bu
import detectron2.data.transforms.augmentation as aug
import numpy as np
from d2go.data.transforms.build import _json_load, TRANSFORM_OP_REGISTRY
from detectron2.config import CfgNode
from detectron2.data.transforms.transform import ExtentTransform
from detectron2.structures import BoxMode
from fvcore.transforms.transform import CropTransform, NoOpTransform, Transform
class CropBoundary(aug.Augmentation):
"""Crop the boundary of the image by `count` pixel on each side"""
def __init__(self, count=3):
super().__init__()
self.count = count
def get_transform(self, image: np.ndarray) -> Transform:
img_h, img_w = image.shape[:2]
assert self.count < img_h and self.count < img_w
assert img_h > self.count * 2
assert img_w > self.count * 2
box = [self.count, self.count, img_w - self.count * 2, img_h - self.count * 2]
return CropTransform(*box)
class PadTransform(Transform):
def __init__(
self,
x0: int,
y0: int,
w: int,
h: int,
org_w: int,
org_h: int,
pad_mode: str = "constant",
pad_value: float = 0.0,
):
super().__init__()
assert x0 + w <= org_w
assert y0 + h <= org_h
self._set_attributes(locals())
def apply_image(self, img: np.ndarray) -> np.array:
"""img: HxWxC or HxW"""
assert len(img.shape) == 2 or len(img.shape) == 3
assert img.shape[0] == self.h and img.shape[1] == self.w
pad_width = [
(self.y0, self.org_h - self.h - self.y0),
(self.x0, self.org_w - self.w - self.x0),
*([(0, 0)] if len(img.shape) == 3 else []),
]
pad_args = {"mode": self.pad_mode}
if self.pad_mode == "constant":
pad_args["constant_values"] = self.pad_value
ret = np.pad(img, pad_width=tuple(pad_width), **pad_args)
return ret
def apply_coords(self, coords: np.ndarray) -> np.ndarray:
raise NotImplementedError()
def inverse(self) -> Transform:
return CropTransform(self.x0, self.y0, self.w, self.h, self.org_w, self.org_h)
InvertibleCropTransform = CropTransform
class PadBorderDivisible(aug.Augmentation):
def __init__(self, size_divisibility: int, pad_mode: str = "constant"):
super().__init__()
self.size_divisibility = size_divisibility
self.pad_mode = pad_mode
def get_transform(self, image: np.ndarray) -> Transform:
"""image: HxWxC"""
assert len(image.shape) == 3 and image.shape[2] in [
1,
3,
], f"Invalid image shape {image.shape}"
H, W = image.shape[:2]
new_h = int(math.ceil(H / self.size_divisibility) * self.size_divisibility)
new_w = int(math.ceil(W / self.size_divisibility) * self.size_divisibility)
return PadTransform(0, 0, W, H, new_w, new_h, pad_mode=self.pad_mode)
class PadToSquare(aug.Augmentation):
"""Pad the image to square"""
def __init__(
self,
pad_mode: str = "constant",
pad_value: float = 0.0,
):
super().__init__()
self.pad_mode = pad_mode
self.pad_value = pad_value
def get_transform(self, image: np.ndarray) -> Transform:
"""image: HxWxC"""
assert len(image.shape) == 3 and image.shape[2] in [
1,
3,
], f"Invalid image shape {image.shape}"
H, W = image.shape[:2]
new_h = new_w = max(H, W)
return PadTransform(
0,
0,
W,
H,
new_w,
new_h,
pad_mode=self.pad_mode,
pad_value=self.pad_value,
)
class RandomCropFixedAspectRatio(aug.Augmentation):
def __init__(
self,
crop_aspect_ratios_list: List[float],
scale_range: Optional[Union[List, Tuple]] = None,
offset_scale_range: Optional[Union[List, Tuple]] = None,
):
super().__init__()
assert isinstance(crop_aspect_ratios_list, (list, tuple))
assert (
scale_range is None
or isinstance(scale_range, (list, tuple))
and len(scale_range) == 2
)
assert (
offset_scale_range is None
or isinstance(offset_scale_range, (list, tuple))
and len(offset_scale_range) == 2
)
# [w1/h1, w2/h2, ...]
self.crop_aspect_ratios_list = crop_aspect_ratios_list
# [low, high] or None
self.scale_range = scale_range
# [low, high] or None
self.offset_scale_range = offset_scale_range
self.rng = np.random.default_rng()
def _pick_aspect_ratio(self) -> float:
return self.rng.choice(self.crop_aspect_ratios_list)
def _pick_scale(self) -> float:
if self.scale_range is None:
return 1.0
return self.rng.uniform(*self.scale_range)
def _pick_offset(self, box_w: float, box_h: float) -> Tuple[float, float]:
if self.offset_scale_range is None:
return [0, 0]
offset_scale = self.rng.uniform(*self.offset_scale_range, size=2)
return offset_scale[0] * box_w, offset_scale[1] * box_h
def get_transform(self, image: np.ndarray, sem_seg: np.ndarray) -> Transform:
# HWC or HW for image, HW for sem_seg
assert len(image.shape) in [2, 3]
assert len(sem_seg.shape) == 2
mask_box_xywh = bu.get_box_from_mask(sem_seg)
# do nothing if the mask is empty (the whole image is background)
if mask_box_xywh is None:
return NoOpTransform()
crop_ar = self._pick_aspect_ratio()
target_scale = self._pick_scale()
target_offset = self._pick_offset(*mask_box_xywh[2:])
mask_box_xywh = bu.offset_bbox(mask_box_xywh, target_offset)
mask_box_xywh = bu.scale_bbox_center(mask_box_xywh, target_scale)
target_box_xywh = bu.get_min_box_aspect_ratio(mask_box_xywh, crop_ar)
target_bbox_xyxy = bu.get_bbox_xyxy_from_xywh(target_box_xywh)
return ExtentTransform(
src_rect=target_bbox_xyxy,
output_size=(
int(target_box_xywh[3].item()),
int(target_box_xywh[2].item()),
),
)
# example repr: "CropBoundaryOp::{'count': 3}"
@TRANSFORM_OP_REGISTRY.register()
def CropBoundaryOp(
cfg: CfgNode, arg_str: str, is_train: bool
) -> List[Union[aug.Augmentation, Transform]]:
assert is_train
kwargs = _json_load(arg_str) if arg_str is not None else {}
assert isinstance(kwargs, dict)
return [CropBoundary(**kwargs)]
# example repr: 'PadToSquareOp::{"pad_value": 255.0}'
@TRANSFORM_OP_REGISTRY.register()
def PadToSquareOp(
cfg: CfgNode, arg_str: str, is_train: bool
) -> List[Union[aug.Augmentation, Transform]]:
kwargs = _json_load(arg_str) if arg_str is not None else {}
assert isinstance(kwargs, dict)
return [PadToSquare(**kwargs)]
# example repr: "RandomCropFixedAspectRatioOp::{'crop_aspect_ratios_list': [0.5], 'scale_range': [0.8, 1.2], 'offset_scale_range': [-0.3, 0.3]}"
@TRANSFORM_OP_REGISTRY.register()
def RandomCropFixedAspectRatioOp(
cfg: CfgNode, arg_str: str, is_train: bool
) -> List[Union[aug.Augmentation, Transform]]:
assert is_train
kwargs = _json_load(arg_str) if arg_str is not None else {}
assert isinstance(kwargs, dict)
return [RandomCropFixedAspectRatio(**kwargs)]
class RandomInstanceCrop(aug.Augmentation):
def __init__(
self, crop_scale: Tuple[float, float] = (0.8, 1.6), fix_instance=False
):
"""
Generates a CropTransform centered around the instance.
crop_scale: [low, high] relative crop scale around the instance, this
determines how far to zoom in / out around the cropped instance
"""
super().__init__()
self.crop_scale = crop_scale
self.fix_instance = fix_instance
assert (
isinstance(crop_scale, (list, tuple)) and len(crop_scale) == 2
), crop_scale
def get_transform(self, image: np.ndarray, annotations: List[Any]) -> Transform:
"""
This function will modify instances to set the iscrowd flag to 1 for
annotations not picked. It relies on the dataset mapper to filter those
items out
"""
assert isinstance(annotations, (list, tuple)), annotations
assert all("bbox" in x for x in annotations), annotations
assert all("bbox_mode" in x for x in annotations), annotations
image_size = image.shape[:2]
# filter out iscrowd
annotations = [x for x in annotations if x.get("iscrowd", 0) == 0]
if len(annotations) == 0:
return NoOpTransform()
if not self.fix_instance:
sel_index = np.random.randint(len(annotations))
else:
sel_index = 0
# set iscrowd flag of other annotations to 1 so that they will be
# filtered out by the datset mapper (https://fburl.com/diffusion/fg64cb4h)
for idx, instance in enumerate(annotations):
if idx != sel_index:
instance["iscrowd"] = 1
instance = annotations[sel_index]
bbox_xywh = BoxMode.convert(
instance["bbox"], instance["bbox_mode"], BoxMode.XYWH_ABS
)
scale = np.random.uniform(*self.crop_scale)
bbox_xywh = bu.scale_bbox_center(bbox_xywh, scale)
bbox_xywh = bu.clip_box_xywh(bbox_xywh, image_size).int()
return CropTransform(
*bbox_xywh.tolist(), orig_h=image_size[0], orig_w=image_size[1]
)
# example repr: "RandomInstanceCropOp::{'crop_scale': [0.8, 1.6]}"
@TRANSFORM_OP_REGISTRY.register()
def RandomInstanceCropOp(
cfg: CfgNode, arg_str: str, is_train: bool
) -> List[Union[aug.Augmentation, Transform]]:
kwargs = _json_load(arg_str) if arg_str is not None else {}
assert isinstance(kwargs, dict)
return [RandomInstanceCrop(**kwargs)]
class CropBoxAug(aug.Augmentation):
"""Augmentation to crop the image based on boxes
Scale the box with `box_scale_factor` around the center before cropping
"""
def __init__(self, box_scale_factor: float = 1.0):
super().__init__()
self.box_scale_factor = box_scale_factor
def get_transform(self, image: np.ndarray, boxes: np.ndarray) -> Transform:
# boxes: 1 x 4 in xyxy format
assert boxes.shape[0] == 1
assert isinstance(image, np.ndarray)
assert isinstance(boxes, np.ndarray)
img_h, img_w = image.shape[0:2]
box_xywh = bu.get_bbox_xywh_from_xyxy(boxes[0])
if self.box_scale_factor != 1.0:
box_xywh = bu.scale_bbox_center(box_xywh, self.box_scale_factor)
box_xywh = bu.clip_box_xywh(box_xywh, [img_h, img_w])
box_xywh = box_xywh.int().tolist()
return CropTransform(*box_xywh, orig_w=img_w, orig_h=img_h)
| d2go-main | d2go/data/transforms/crop.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import random
from typing import Dict, List, Tuple
import cv2
import detectron2.data.transforms.augmentation as aug
import numpy as np
from d2go.data.transforms.build import _json_load, TRANSFORM_OP_REGISTRY
from detectron2.config import CfgNode
from fvcore.transforms.transform import NoOpTransform, Transform
class LocalizedBoxMotionBlurTransform(Transform):
"""Transform to blur provided bounding boxes from an image."""
def __init__(
self,
bounding_boxes: List[List[int]],
k: Tuple[float, float] = (7, 15),
angle: Tuple[float, float] = (0, 360),
direction: Tuple[float, float] = (-1.0, 1.0),
):
import imgaug.augmenters as iaa
super().__init__()
self._set_attributes(locals())
self.aug = iaa.MotionBlur(k, angle, direction, 1)
def apply_image(self, img: np.ndarray) -> np.ndarray:
bbox_regions = [img[y : y + h, x : x + w] for x, y, w, h in self.bounding_boxes]
blurred_boxes = self.aug.augment_images(bbox_regions)
new_img = np.array(img)
for (x, y, w, h), blurred in zip(self.bounding_boxes, blurred_boxes):
new_img[y : y + h, x : x + w] = blurred
return new_img
def apply_segmentation(self, segmentation: np.ndarray) -> np.ndarray:
"""Apply no transform on the full-image segmentation."""
return segmentation
def apply_coords(self, coords: np.ndarray):
"""Apply no transform on the coordinates."""
return coords
def inverse(self) -> Transform:
"""The inverse is a No-op, only for geometric transforms."""
return NoOpTransform()
class LocalizedBoxMotionBlur(aug.Augmentation):
"""
Performs faked motion blur on bounding box annotations in an image.
Randomly selects motion blur parameters from the ranges `k`, `angle`, `direction`.
"""
def __init__(
self,
prob: float = 0.5,
k: Tuple[float, float] = (7, 15),
angle: Tuple[float, float] = (0, 360),
direction: Tuple[float, float] = (-1.0, 1.0),
):
super().__init__()
self._init(locals())
def _validate_bbox_xywh_within_bounds(
self, bbox: List[int], img_h: int, img_w: int
):
x, y, w, h = bbox
assert x >= 0, f"Invalid x {x}"
assert y >= 0, f"Invalid y {x}"
assert y + h <= img_h, f"Invalid right {x+w} (img width {img_w})"
assert y + h <= img_h, f"Invalid bottom {y+h} (img height {img_h})"
def get_transform(self, image: np.ndarray, annotations: List[Dict]) -> Transform:
do_tfm = self._rand_range() < self.prob
if do_tfm:
return self._get_blur_transform(image, annotations)
else:
return NoOpTransform()
def _get_blur_transform(
self, image: np.ndarray, annotations: List[Dict]
) -> Transform:
"""
Return a `Transform` that simulates motion blur within the image's bounding box regions.
"""
img_h, img_w = image.shape[:2]
bboxes = [ann["bbox"] for ann in annotations]
# Debug
for bbox in bboxes:
self._validate_bbox_xywh_within_bounds(bbox, img_h, img_w)
return LocalizedBoxMotionBlurTransform(
bboxes,
k=self.k,
angle=self.angle,
direction=self.direction,
)
# example repr: "LocalizedBoxMotionBlurOp::{'prob': 0.5, 'k': [3,7], 'angle': [0, 360]}"
@TRANSFORM_OP_REGISTRY.register()
def RandomLocalizedBoxMotionBlurOp(
cfg: CfgNode, arg_str: str, is_train: bool
) -> List[Transform]:
assert is_train
kwargs = _json_load(arg_str) if arg_str is not None else {}
assert isinstance(kwargs, dict)
return [LocalizedBoxMotionBlur(**kwargs)]
class MotionBlurTransform(Transform):
def __init__(
self,
k: Tuple[float, float] = (7, 15),
angle: Tuple[float, float] = (0, 360),
direction: Tuple[float, float] = (-1.0, 1.0),
):
"""
Args:
will apply the specified blur to the image
"""
super().__init__()
self._set_attributes(locals())
self.k = k
self.angle = angle
self.direction = direction
def apply_image(self, img: np.ndarray) -> np.ndarray:
# Imported here and not in __init__to avoid linting errors
# also, imported here and not in the header section
# since the rest of the code does not have this dependency
import imgaug.augmenters as iaa
aug = iaa.MotionBlur(self.k, self.angle, self.direction, 1)
img = aug.augment_image(img)
return img
def apply_segmentation(self, segmentation: np.ndarray) -> np.ndarray:
return segmentation
def apply_coords(self, coords: np.ndarray) -> np.ndarray:
return coords
class RandomMotionBlur(aug.Augmentation):
"""
Apply random motion blur.
"""
def __init__(
self,
prob: float = 0.5,
k: Tuple[float, float] = (3, 7),
angle: Tuple[float, float] = (0, 360),
direction: Tuple[float, float] = (-1.0, 1.0),
):
"""
Args:
prob (float): probability of applying transform
k (tuple): refer to `iaa.MotionBlur`
angle (tuple): refer to `iaa.MotionBlur`
direction (tuple): refer to `iaa.MotionBlur`
"""
super().__init__()
# Turn all locals into member variables.
self._init(locals())
def get_transform(self, img: np.ndarray) -> Transform:
do = self._rand_range() < self.prob
if do:
return MotionBlurTransform(self.k, self.angle, self.direction)
else:
return NoOpTransform()
# example repr: "RandomMotionBlurOp::{'prob': 0.5, 'k': [3,7], 'angle': [0, 360]}"
@TRANSFORM_OP_REGISTRY.register()
def RandomMotionBlurOp(cfg: CfgNode, arg_str: str, is_train: bool) -> List[Transform]:
assert is_train
kwargs = _json_load(arg_str) if arg_str is not None else {}
assert isinstance(kwargs, dict)
return [RandomMotionBlur(**kwargs)]
class GaussianBlurTransform(Transform):
def __init__(
self,
k: int = 3,
sigma_range: Tuple[float, float] = (0.3, 0.3),
):
"""
Args:
will apply the specified blur to the image
"""
super().__init__()
self._set_attributes(locals())
def apply_image(self, img: np.ndarray) -> np.ndarray:
sigma = random.uniform(*self.sigma_range)
img_out = cv2.GaussianBlur(img, (self.k, self.k), sigma)
return img_out
def apply_segmentation(self, segmentation: np.ndarray) -> np.ndarray:
return segmentation
def apply_coords(self, coords: np.ndarray) -> np.ndarray:
return coords
class RandomGaussianBlur(aug.Augmentation):
"""
Apply random motion blur.
"""
def __init__(
self,
prob: float = 0.5,
k: int = 3,
sigma_range: Tuple[float, float] = (0.3, 0.3),
):
"""
Args:
prob (float): probability of applying transform
k (int): kernel size
sigma_range (tuple): min, max of sigma gaussian filter used
"""
super().__init__()
# Turn all locals into member variables.
self._init(locals())
def get_transform(self, img: np.ndarray) -> Transform:
do = self._rand_range() < self.prob
if do:
return GaussianBlurTransform(self.k, self.sigma_range)
else:
return NoOpTransform()
# example repr: "RandomGaussianBlurOp::{'prob': 0.5, 'k': 5, 'sigma': [0.1, 2]}"
@TRANSFORM_OP_REGISTRY.register()
def RandomGaussianBlurOp(cfg: CfgNode, arg_str: str, is_train: bool) -> List[Transform]:
assert is_train
kwargs = _json_load(arg_str) if arg_str is not None else {}
assert isinstance(kwargs, dict)
return [RandomGaussianBlur(**kwargs)]
| d2go-main | d2go/data/transforms/blur.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import logging
from typing import List, Optional, Union
import detectron2.data.transforms.augmentation as aug
from d2go.data.transforms.build import _json_load, TRANSFORM_OP_REGISTRY
from detectron2.config import CfgNode
from detectron2.data import transforms as d2T
from detectron2.projects.point_rend import ColorAugSSDTransform
logger = logging.getLogger(__name__)
D2_RANDOM_TRANSFORMS = {
"RandomBrightness": d2T.RandomBrightness,
"RandomContrast": d2T.RandomContrast,
"RandomCrop": d2T.RandomCrop,
"RandomRotation": d2T.RandomRotation,
"RandomExtent": d2T.RandomExtent,
"RandomFlip": d2T.RandomFlip,
"RandomSaturation": d2T.RandomSaturation,
"RandomLighting": d2T.RandomLighting,
"RandomResize": d2T.RandomResize,
"FixedSizeCrop": d2T.FixedSizeCrop,
"ResizeScale": d2T.ResizeScale,
"MinIoURandomCrop": d2T.MinIoURandomCrop,
}
def build_func(
cfg: CfgNode, arg_str: str, is_train: bool, name: str
) -> List[Union[aug.Augmentation, d2T.Transform]]:
assert is_train, "Random augmentation is for training only"
kwargs = _json_load(arg_str) if arg_str is not None else {}
assert isinstance(kwargs, dict)
return [D2_RANDOM_TRANSFORMS[name](**kwargs)]
# example 1: RandomFlipOp
# example 2: RandomFlipOp::{}
# example 3: RandomFlipOp::{"prob":0.5}
# example 4: RandomBrightnessOp::{"intensity_min":1.0, "intensity_max":2.0}
@TRANSFORM_OP_REGISTRY.register()
def RandomBrightnessOp(
cfg: CfgNode, arg_str: str, is_train: bool
) -> List[Union[aug.Augmentation, d2T.Transform]]:
return build_func(cfg, arg_str, is_train, name="RandomBrightness")
@TRANSFORM_OP_REGISTRY.register()
def RandomContrastOp(
cfg: CfgNode, arg_str: str, is_train: bool
) -> List[Union[aug.Augmentation, d2T.Transform]]:
return build_func(cfg, arg_str, is_train, name="RandomContrast")
@TRANSFORM_OP_REGISTRY.register()
def RandomCropOp(
cfg: CfgNode, arg_str: str, is_train: bool
) -> List[Union[aug.Augmentation, d2T.Transform]]:
return build_func(cfg, arg_str, is_train, name="RandomCrop")
@TRANSFORM_OP_REGISTRY.register()
def RandomRotation(
cfg: CfgNode, arg_str: str, is_train: bool
) -> List[Union[aug.Augmentation, d2T.Transform]]:
return build_func(cfg, arg_str, is_train, name="RandomRotation")
@TRANSFORM_OP_REGISTRY.register()
def RandomExtentOp(
cfg: CfgNode, arg_str: str, is_train: bool
) -> List[Union[aug.Augmentation, d2T.Transform]]:
return build_func(cfg, arg_str, is_train, name="RandomExtent")
@TRANSFORM_OP_REGISTRY.register()
def RandomFlipOp(
cfg: CfgNode, arg_str: str, is_train: bool
) -> List[Union[aug.Augmentation, d2T.Transform]]:
return build_func(cfg, arg_str, is_train, name="RandomFlip")
@TRANSFORM_OP_REGISTRY.register()
def RandomSaturationOp(
cfg: CfgNode, arg_str: str, is_train: bool
) -> List[Union[aug.Augmentation, d2T.Transform]]:
return build_func(cfg, arg_str, is_train, name="RandomSaturation")
@TRANSFORM_OP_REGISTRY.register()
def RandomLightingOp(
cfg: CfgNode, arg_str: str, is_train: bool
) -> List[Union[aug.Augmentation, d2T.Transform]]:
return build_func(cfg, arg_str, is_train, name="RandomLighting")
@TRANSFORM_OP_REGISTRY.register()
def RandomSSDColorAugOp(
cfg: CfgNode, arg_str: str, is_train: bool
) -> List[Union[aug.Augmentation, d2T.Transform]]:
assert is_train
kwargs = _json_load(arg_str) if arg_str is not None else {}
assert isinstance(kwargs, dict)
assert "img_format" not in kwargs
return [ColorAugSSDTransform(img_format=cfg.INPUT.FORMAT, **kwargs)]
# example repr: ResizeScaleOp::{"min_scale": 0.1, "max_scale": 2.0, "target_height": 1024, "target_width": 1024}
@TRANSFORM_OP_REGISTRY.register()
def ResizeScaleOp(
cfg: CfgNode, arg_str: Optional[str], is_train: bool
) -> List[aug.Augmentation]:
return build_func(cfg, arg_str, is_train, name="ResizeScale")
@TRANSFORM_OP_REGISTRY.register()
def MinIoURandomCropOp(
cfg: CfgNode, arg_str: Optional[str], is_train: bool
) -> List[aug.Augmentation]:
return build_func(cfg, arg_str, is_train, name="MinIoURandomCrop")
# example repr: FixedSizeCropOp::{"crop_size": [1024, 1024]}
@TRANSFORM_OP_REGISTRY.register()
def FixedSizeCropOp(
cfg: CfgNode, arg_str: Optional[str], is_train: bool
) -> List[aug.Augmentation]:
return build_func(cfg, arg_str, is_train, name="FixedSizeCrop")
# example repr: RandomResizeOp::{"shape_list": [[224, 224], [256, 256], [320, 320]]}
@TRANSFORM_OP_REGISTRY.register()
def RandomResizeOp(
cfg: CfgNode, arg_str: Optional[str], is_train: bool
) -> List[aug.Augmentation]:
return build_func(cfg, arg_str, is_train, name="RandomResize")
| d2go-main | d2go/data/transforms/d2_native.py |
#!/usr/bin/env python3
# (c) Meta Platforms, Inc. and affiliates. Confidential and proprietary.
import logging
from functools import partial
import torch.nn as nn
from d2go.config import CfgNode as CN
from d2go.modeling import modeling_hook as mh
from d2go.registry.builtin import MODELING_HOOK_REGISTRY
from d2go.trainer.helper import D2GO_WRAP_POLICY_REGISTRY
from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import (
apply_activation_checkpointing,
checkpoint_wrapper,
CheckpointImpl,
)
logger = logging.getLogger(__name__)
def add_activation_checkpoint_configs(_C: CN):
_C.ACTIVATION_CHECKPOINT = CN()
_C.ACTIVATION_CHECKPOINT.REENTRANT = False
# Find autowrap policy at D2GO_WRAP_POLICY_REGISTRY, or use '' to disable autowrap
_C.ACTIVATION_CHECKPOINT.AUTO_WRAP_POLICY = "always_wrap_policy"
# A list of layer cls names to wrap, case sensitive
_C.ACTIVATION_CHECKPOINT.AUTO_WRAP_LAYER_CLS = []
@MODELING_HOOK_REGISTRY.register()
class ActivationCheckpointModelingHook(mh.ModelingHook):
"""Modeling hook that wraps model in activation checkpoint based on config"""
def apply(self, model: nn.Module) -> nn.Module:
logger.info("Activation Checkpointing is used")
wrapper_fn = partial(
checkpoint_wrapper,
checkpoint_impl=CheckpointImpl.NO_REENTRANT
if not self.cfg.ACTIVATION_CHECKPOINT.REENTRANT
else CheckpointImpl.REENTRANT,
)
policy_name = self.cfg.ACTIVATION_CHECKPOINT.AUTO_WRAP_POLICY
assert (
policy_name != "size_based_auto_wrap_policy"
), "ActivationCheckpointing should always be wrapped at module boundary"
policy_kwargs = {
"layer_names": self.cfg.ACTIVATION_CHECKPOINT.AUTO_WRAP_LAYER_CLS,
}
auto_wrap_policy = (
D2GO_WRAP_POLICY_REGISTRY.get(policy_name)(model, **policy_kwargs)
if policy_name != ""
else lambda _: True
)
apply_activation_checkpointing(
model, checkpoint_wrapper_fn=wrapper_fn, auto_wrap_policy=auto_wrap_policy
)
return model
def unapply(self, model: nn.Module) -> nn.Module:
raise NotImplementedError(
"ActivationCheckpointModelingHook.unapply() not implemented: can't unwrap an activation checkpoint module"
)
| d2go-main | d2go/trainer/activation_checkpointing.py |
#!/usr/bin/env python3
# (c) Meta Platforms, Inc. and affiliates. Confidential and proprietary.
import contextlib
import logging
from enum import Enum
from typing import Generator, Optional
import torch
import torch.nn as nn
from d2go.config import CfgNode as CN
from d2go.modeling.modeling_hook import ModelingHook
from d2go.registry.builtin import MODELING_HOOK_REGISTRY
from d2go.trainer.helper import D2GO_WRAP_POLICY_REGISTRY, parse_precision_from_string
from torch.ao.pruning import fqn_to_module
from torch.cuda.amp import GradScaler
from torch.distributed.fsdp.fully_sharded_data_parallel import (
BackwardPrefetch,
CPUOffload,
FullStateDictConfig,
FullyShardedDataParallel as FSDP,
LocalStateDictConfig,
MixedPrecision,
ShardedStateDictConfig,
ShardingStrategy,
StateDictType,
)
from torch.distributed.fsdp.sharded_grad_scaler import ShardedGradScaler
logger = logging.getLogger(__name__)
def add_fsdp_configs(_C: CN):
_C.FSDP = CN()
_C.FSDP.ALGORITHM = "grad_optim" # 'grad_optim', 'full', 'hybrid', 'hybrid_zero2'
# Configs for fully sharded data parallel (fsdp)
# Check out https://pytorch.org/docs/stable/fsdp.html
# and docstring of torch.distributed.fsdp.fully_sharded_data_parallel
_C.FSDP.CPU_OFFLOAD = False
_C.FSDP.BACKWARD_PREFETCH = True
_C.FSDP.USE_ORIG_PARAMS = False
# Find autowrap policy at D2GO_WRAP_POLICY_REGISTRY, or use '' to disable autowrap
_C.FSDP.AUTO_WRAP_POLICY = "never_wrap_policy"
_C.FSDP.AUTO_WRAP_MIN_PARAMS = int(1e4)
# A list of layer cls names to wrap, case sensitive
_C.FSDP.AUTO_WRAP_LAYER_CLS = []
# Whether to use local state dict -- superseded by STATE_DICT_TYPE
_C.FSDP.USE_LOCAL_STATE_DICT = True
# State dict type to use when calling FSDPWrapper.state_dict() (used when saving).
# If None, defaults to checking the value of USE_LOCAL_STATE_DICT
_C.FSDP.STATE_DICT_TYPE = "SHARDED_STATE_DICT"
# Whether to offload state dict to cpu
_C.FSDP.STATE_DICT_CPU_OFFLOAD = False
# Whether to materialize state dict on rank 0
_C.FSDP.STATE_DICT_RANK0_ONLY = True
# The ignored modules, if any
_C.FSDP.IGNORED_MODULES = None
# Whether to prefetch in forward pass
_C.FSDP.FORWARD_PREFETCH_OPTION = "no"
# if False, this allows the CPU thread to schedule all-gathers without any extra synchronization
_C.FSDP.LIMIT_ALL_GATHERS = False
class ShardingAlgorithm(str, Enum):
"""
This enum specifies the sharding algorithm to be used by FullyShardedDataParallel (FSDP).
It matches the strings used in D2Go config with the enum class :class:`ShardingStrategy` used by Pytorch FSDP module:
"grad_optim" => ShardingAlgorithm.SHARD_GRAD_OP => ShardingStrategy.SHARD_GRAD_OP
"full" => ShardingAlgorithm.FULL_SHARD => ShardingStrategy.FULL_SHARD
"hybrid" => ShardingAlgorithm.HYBRID_SHARD => ShardingStrategy.HYBRID_SHARD
"hybrid_zero2" => ShardingAlgorithm.HYBRID_SHARD_ZERO2 => ShardingStrategy._HYBRID_SHARD_ZERO2
"""
SHARD_GRAD_OP = "grad_optim"
FULL_SHARD = "full"
HYBRID_SHARD = "hybrid"
HYBRID_SHARD_ZERO2 = "hybrid_zero2"
class ForwardPrefetchOption(str, Enum):
"""
This enum specifies the forward prefetch types to be used by FullyShardedDataParallel (FSDP).
"auto" => Use the default forward prefetch mechanism in FSDP.
"manual" => Use custom forward prefetch mechansim, implemented as training hooks.
"no" => No forward prefetch.
"""
AUTO = "auto"
MANUAL = "manual"
NO = "no"
def is_fsdp_enabled(cfg):
return "FSDPModelingHook" in cfg.MODEL.MODELING_HOOKS
def get_grad_scaler(cfg):
return ShardedGradScaler() if is_fsdp_enabled(cfg) else GradScaler()
class FSDPWrapper(FSDP):
def __init__(
self,
model,
state_dict_type: StateDictType,
load_state_dict_type: StateDictType,
amp_autocast_dtype: Optional[torch.dtype] = None,
state_dict_cpu_offload: bool = True,
state_dict_rank0_only: bool = True,
**fsdp_kwargs,
):
self.precision = amp_autocast_dtype
self.state_dict_type = state_dict_type
self.load_state_dict_type = load_state_dict_type
self.offload_to_cpu = state_dict_cpu_offload
self.rank0_only = state_dict_rank0_only
super().__init__(model, **fsdp_kwargs)
def forward(self, *args, **kwargs):
# Wrap forward() in autocast if mixed precision is enabled
if self.precision is not None and not torch.is_autocast_enabled():
from torch.cuda.amp import autocast
with autocast(dtype=self.precision):
return super().forward(*args, **kwargs)
else:
return super().forward(*args, **kwargs)
@contextlib.contextmanager
def state_dict_type_and_config(self, state_dict_type: StateDictType) -> Generator:
if state_dict_type == StateDictType.LOCAL_STATE_DICT:
# only offload_to_cpu=False is supported for local state dict
state_dict_config = LocalStateDictConfig(offload_to_cpu=False)
elif state_dict_type == StateDictType.FULL_STATE_DICT:
state_dict_config = FullStateDictConfig(
offload_to_cpu=self.offload_to_cpu, rank0_only=self.rank0_only
)
else:
state_dict_config = ShardedStateDictConfig(
offload_to_cpu=self.offload_to_cpu
)
with FSDP.state_dict_type(self, state_dict_type, state_dict_config):
yield
def state_dict(self, *args, **kwargs):
# NOTE: model.state_dict() needs to be called by all ranks because synchronization primitives are used
with self.state_dict_type_and_config(self.state_dict_type):
return super().state_dict(*args, **kwargs)
def load_state_dict(
self,
state_dict,
*args,
**kwargs,
):
with self.state_dict_type_and_config(self.load_state_dict_type):
return super().load_state_dict(state_dict, *args, **kwargs)
def build_fsdp(
model,
*,
sharding_algorithm: str = ShardingAlgorithm.FULL_SHARD,
auto_wrap_policy_name: str = "",
auto_wrap_policy_kwargs: Optional[dict] = None,
use_cpu_offload: bool = False,
use_backward_prefetch: bool = True,
param_dtype: Optional[torch.dtype] = None,
reduce_dtype: Optional[torch.dtype] = None,
buffer_dtype: Optional[torch.dtype] = None,
amp_autocast_dtype: Optional[torch.dtype] = None,
# TODO: to remove after migration to state_dict_type completes
use_local_state_dict: bool = False,
load_local_state_dict: bool = False,
state_dict_type: Optional[StateDictType] = None,
state_dict_cpu_offload: bool = True,
state_dict_rank0_only: bool = True,
ignored_modules: Optional[nn.Module] = None,
forward_prefetch: bool = False,
use_orig_params: bool = False,
device_id: Optional[int] = None,
limit_all_gathers: bool = False,
):
if sharding_algorithm == ShardingAlgorithm.SHARD_GRAD_OP:
sharding_strategy = ShardingStrategy.SHARD_GRAD_OP
logger.info("Optimizer + Gradient State Sharding (ZeRO-2) is used")
elif sharding_algorithm == ShardingAlgorithm.FULL_SHARD:
sharding_strategy = ShardingStrategy.FULL_SHARD
logger.info("Optimizer + Gradient + Horizontal Model Sharding (ZeRO-3) is used")
elif sharding_algorithm == ShardingAlgorithm.HYBRID_SHARD:
sharding_strategy = ShardingStrategy.HYBRID_SHARD
logger.info(
"Optimizer + Gradient + Horizontal Model Sharding (ZeRO-3) within a node is used"
)
elif sharding_algorithm == ShardingAlgorithm.HYBRID_SHARD_ZERO2:
sharding_strategy = ShardingStrategy._HYBRID_SHARD_ZERO2
logger.info(
"Optimizer + Gradient State Sharding (ZeRO-2) within a node is used"
)
else:
raise ValueError(
f"Invalid sharding algorithm for FSDP. Can be {ShardingAlgorithm.SHARD_GRAD_OP}, "
+ f"{ShardingAlgorithm.FULL_SHARD}, {ShardingAlgorithm.HYBRID_SHARD}, or {ShardingAlgorithm.HYBRID_SHARD_ZERO2}."
)
auto_wrap_policy = (
D2GO_WRAP_POLICY_REGISTRY.get(auto_wrap_policy_name)(
model, **auto_wrap_policy_kwargs
)
if auto_wrap_policy_name != ""
else None
)
cpu_offload = CPUOffload(offload_params=use_cpu_offload)
mixed_precision = MixedPrecision(
param_dtype=param_dtype,
reduce_dtype=reduce_dtype,
buffer_dtype=buffer_dtype,
keep_low_precision_grads=False,
)
backward_prefetch = (
BackwardPrefetch.BACKWARD_PRE
if use_backward_prefetch
else BackwardPrefetch.BACKWARD_POST
)
fsdp_kwargs = {
"sharding_strategy": sharding_strategy,
"cpu_offload": cpu_offload,
"mixed_precision": mixed_precision,
"auto_wrap_policy": auto_wrap_policy,
"backward_prefetch": backward_prefetch,
"ignored_modules": ignored_modules,
"forward_prefetch": forward_prefetch,
"use_orig_params": use_orig_params,
"device_id": torch.cuda.current_device() if not device_id else device_id,
"limit_all_gathers": limit_all_gathers,
}
# default to using use_local_state_dict if state_dict_type is None
if not state_dict_type:
_state_dict_type = (
StateDictType.LOCAL_STATE_DICT
if use_local_state_dict
else StateDictType.FULL_STATE_DICT
)
else:
_state_dict_type = state_dict_type
# load_state_dict_type defaults to load_local_state_dict
_load_state_dict_type = (
StateDictType.LOCAL_STATE_DICT
if load_local_state_dict
else StateDictType.FULL_STATE_DICT
)
wrapper_kwargs = {
"amp_autocast_dtype": amp_autocast_dtype,
"state_dict_type": _state_dict_type,
"load_state_dict_type": _load_state_dict_type,
"state_dict_cpu_offload": state_dict_cpu_offload,
"state_dict_rank0_only": state_dict_rank0_only,
}
return FSDPWrapper(model, **wrapper_kwargs, **fsdp_kwargs)
@MODELING_HOOK_REGISTRY.register()
class FSDPModelingHook(ModelingHook):
"""Modeling hook that wraps model in FSDP based on config"""
def apply(self, model: nn.Module) -> FSDPWrapper:
# SOLVER.AMP.ENABLED and SOLVER.AMP.PRECISION controls mixed precision for all parameters, buffers and reduce in FSDP
precision_dtype = (
parse_precision_from_string(self.cfg.SOLVER.AMP.PRECISION, lightning=False)
if self.cfg.SOLVER.AMP.ENABLED
else None
)
ignored_modules = None
if isinstance(self.cfg.FSDP.IGNORED_MODULES, list):
ignored_modules = []
for mod_name in self.cfg.FSDP.IGNORED_MODULES:
mod = fqn_to_module(model, mod_name)
assert mod is not None, f"Module {mod_name} cannot be found in model."
ignored_modules.append(mod)
forward_prefetch = (
self.cfg.FSDP.FORWARD_PREFETCH_OPTION == ForwardPrefetchOption.AUTO
)
_state_dict_type = (
StateDictType[self.cfg.FSDP.STATE_DICT_TYPE]
if self.cfg.FSDP.STATE_DICT_TYPE
else None
)
wrapped_model = build_fsdp(
model,
sharding_algorithm=self.cfg.FSDP.ALGORITHM,
auto_wrap_policy_name=self.cfg.FSDP.AUTO_WRAP_POLICY,
auto_wrap_policy_kwargs={
"min_num_params": self.cfg.FSDP.AUTO_WRAP_MIN_PARAMS,
"layer_names": self.cfg.FSDP.AUTO_WRAP_LAYER_CLS,
},
use_cpu_offload=self.cfg.FSDP.CPU_OFFLOAD,
use_backward_prefetch=self.cfg.FSDP.BACKWARD_PREFETCH,
param_dtype=precision_dtype,
reduce_dtype=precision_dtype,
buffer_dtype=None,
amp_autocast_dtype=precision_dtype,
use_local_state_dict=self.cfg.FSDP.USE_LOCAL_STATE_DICT,
load_local_state_dict=self.cfg.FSDP.USE_LOCAL_STATE_DICT,
state_dict_type=_state_dict_type,
state_dict_cpu_offload=self.cfg.FSDP.STATE_DICT_CPU_OFFLOAD,
state_dict_rank0_only=self.cfg.FSDP.STATE_DICT_RANK0_ONLY,
ignored_modules=ignored_modules,
forward_prefetch=forward_prefetch,
use_orig_params=self.cfg.FSDP.USE_ORIG_PARAMS,
device_id=torch.cuda.current_device(),
limit_all_gathers=self.cfg.FSDP.LIMIT_ALL_GATHERS,
)
return wrapped_model
def unapply(self, model: FSDPWrapper) -> nn.Module:
raise NotImplementedError(
"FSDPModelingHook.unapply() not implemented: can't unwrap a FSDP module"
)
| d2go-main | d2go/trainer/fsdp.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
| d2go-main | d2go/trainer/__init__.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
Trainer APIs on which D2Go's binary can build on top.
"""
from dataclasses import dataclass
from typing import Dict, Optional
from d2go.evaluation.api import AccuracyDict, MetricsDict
@dataclass
class TrainNetOutput:
accuracy: AccuracyDict[float]
metrics: MetricsDict[float]
model_configs: Dict[str, str]
# TODO (T127368603): decide if `tensorboard_log_dir` should be part of output
tensorboard_log_dir: Optional[str] = None
@dataclass
class TestNetOutput:
accuracy: AccuracyDict[float]
metrics: MetricsDict[float]
# TODO (T127368603): decide if `tensorboard_log_dir` should be part of output
tensorboard_log_dir: Optional[str] = None
@dataclass
class EvaluatorOutput:
accuracy: AccuracyDict[float]
metrics: MetricsDict[float]
def do_train():
pass
def do_test():
pass
| d2go-main | d2go/trainer/api.py |
from functools import partial
from typing import Any, Callable, Iterable, List, Optional, Union
import torch
from detectron2.utils.registry import Registry
from torch.distributed.fsdp.wrap import (
always_wrap_policy as _always_wrap_policy,
size_based_auto_wrap_policy as _size_based_auto_wrap_policy,
transformer_auto_wrap_policy as _layer_based_auto_wrap_policy,
)
D2GO_WRAP_POLICY_REGISTRY = Registry("D2GO_WRAP_POLICY_REGISTRY")
def parse_precision_from_string(
precision: str, lightning=False
) -> Union[str, int, torch.dtype]:
"""
Convert our string format for precision to what Detectron2 / lightning Trainer expects, controlled by the *lightning* flag
"""
if precision == "float64":
return torch.float64 if not lightning else 64
if precision == "float32":
return torch.float32 if not lightning else 32
elif precision == "float16":
return torch.float16 if not lightning else 16
elif precision == "bfloat16":
return torch.bfloat16 if not lightning else "bf16"
else:
raise ValueError(f"Invalid precision dtype {precision}")
def get_module_class_from_name(module, name):
"""
Gets a class from a module by its name. Code borrowed from HuggingFace
Args:
module (`torch.nn.Module`): The module to get the class from.
name (`str`): The name of the class.
"""
modules_children = list(module.children())
if module.__class__.__name__ == name:
return module.__class__
elif len(modules_children) == 0:
return
else:
for child_module in modules_children:
module_class = get_module_class_from_name(child_module, name)
if module_class is not None:
return module_class
def get_layer_cls_from_names(
model: Any, layer_names: Iterable[str]
) -> List[torch.nn.Module]:
"""
Get a list of layers from a model that match a list of layer names.
"""
layer_cls = []
for name in layer_names:
closure = get_module_class_from_name(model, name)
if closure is None:
raise Exception(
f"Could not find the layer class {name} to wrap in the model."
)
layer_cls.append(closure)
return layer_cls
@D2GO_WRAP_POLICY_REGISTRY.register()
def never_wrap_policy(model, **kwargs) -> Optional[Callable]:
"""
Don't wrap any child module, only wrap the root
"""
def never_wrap(*args, **kwargs):
return False
return never_wrap
@D2GO_WRAP_POLICY_REGISTRY.register()
def always_wrap_policy(model, **kwargs) -> Optional[Callable]:
"""
Wrapper for always_wrap_policy() from torch.distributed.fsdp.wrap
"""
return _always_wrap_policy
@D2GO_WRAP_POLICY_REGISTRY.register()
def size_based_auto_wrap_policy(
model, min_num_params=1e4, **kwargs
) -> Optional[Callable]:
"""
Wrapper for size_based_auto_wrap_policy() from torch.distributed.fsdp.wrap
"""
# Note: be careful when using auto wrap with shared parameters.
# Errors will be thrown if shared parameters reside in different FSDP units
return partial(
_size_based_auto_wrap_policy,
min_num_params=min_num_params,
)
@D2GO_WRAP_POLICY_REGISTRY.register()
def layer_based_auto_wrap_policy(
model, layer_names: Iterable[str], **kwargs
) -> Optional[Callable]:
"""
Wrapper for transformer_auto_wrap_policy() from torch.distributed.fsdp.wrap
Args:
layer_names: a list of layer names
"""
assert (
len(layer_names) > 0
), "layer_names should be a nonempty list of layer names contained in the model"
layer_cls = get_layer_cls_from_names(model, layer_names)
return partial(
_layer_based_auto_wrap_policy,
transformer_layer_cls=layer_cls,
)
| d2go-main | d2go/trainer/helper.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
| d2go-main | d2go/trainer/lightning/__init__.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import os
from typing import Dict
import pytorch_lightning as pl
from d2go.config import CfgNode, temp_defrost
from d2go.runner.lightning_task import GeneralizedRCNNTask
from d2go.utils.misc import dump_trained_model_configs
from detectron2.utils.events import EventStorage
from pytorch_lightning.callbacks.model_checkpoint import ModelCheckpoint
FINAL_MODEL_CKPT = f"model_final{ModelCheckpoint.FILE_EXTENSION}"
def _do_train(
cfg: CfgNode, trainer: pl.Trainer, task: GeneralizedRCNNTask
) -> Dict[str, str]:
"""Runs the training loop with given trainer and task.
Args:
cfg: The normalized ConfigNode for this D2Go Task.
trainer: PyTorch Lightning trainer.
task: Lightning module instance.
Returns:
A map of model name to trained model config path.
"""
with EventStorage() as storage:
task.storage = storage
trainer.fit(task)
final_ckpt = os.path.join(cfg.OUTPUT_DIR, FINAL_MODEL_CKPT)
trainer.save_checkpoint(final_ckpt) # for validation monitor
trained_cfg = cfg.clone()
with temp_defrost(trained_cfg):
trained_cfg.MODEL.WEIGHTS = final_ckpt
model_configs = dump_trained_model_configs(
cfg.OUTPUT_DIR, {"model_final": trained_cfg}
)
return model_configs
def _do_test(trainer: pl.Trainer, task: GeneralizedRCNNTask):
"""Runs the evaluation with a pre-trained model.
Args:
cfg: The normalized ConfigNode for this D2Go Task.
trainer: PyTorch Lightning trainer.
task: Lightning module instance.
"""
with EventStorage() as storage:
task.storage = storage
trainer.test(task)
| d2go-main | d2go/trainer/lightning/training_loop.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import networkx as nx
import numpy as np
import os
import tempfile
import torch
import torch.nn as nn
from networkx.algorithms.bipartite.matrix import from_biadjacency_matrix
from scipy.sparse import csr_matrix
from sklearn.metrics.pairwise import cosine_similarity
from tqdm import tqdm
from transformers import AutoTokenizer
import regex
import collections
from glob import glob
class CRISSAligner(object):
def __init__(self, path='criss/criss-3rd.pt',
args_path='criss/args.pt',
tokenizer='facebook/mbart-large-cc25', device='cpu', distortion=0,
matching_method='a'
):
from fairseq import bleu, checkpoint_utils, options, progress_bar, tasks, utils
from fairseq.sequence_generator import EnsembleModel
self.device = device
args = torch.load(args_path)
task = tasks.setup_task(args)
models, _model_args = checkpoint_utils.load_model_ensemble(
path.split(':'),
arg_overrides=eval('{}'),
task=task
)
for model in models:
model.make_generation_fast_(
beamable_mm_beam_size=None if args.no_beamable_mm else args.beam,
need_attn=args.print_alignment,
)
if args.fp16:
model.half()
model = model.to(self.device)
self.model = EnsembleModel(models).to(self.device)
self.tokenizer = AutoTokenizer.from_pretrained(tokenizer)
self.distortion = distortion
self.matching_method = matching_method
def get_embed(self, bpe_lists, langcodes=('en_XX', 'en_XX')):
vectors = list()
for i, bpe_list in enumerate(bpe_lists):
input_ids = self.tokenizer.convert_tokens_to_ids(bpe_list + ['</s>', langcodes[i]])
encoder_input = {
'src_tokens': torch.tensor(input_ids).view(1, -1).to(self.device),
'src_lengths': torch.tensor([len(input_ids)]).to(self.device)
}
encoder_outs = self.model.forward_encoder(encoder_input)
np_encoder_outs = encoder_outs[0].encoder_out.cpu().squeeze(1).numpy().astype(np.float32)
vectors.append(np_encoder_outs[:-2, :])
return vectors
def get_word_aligns(self, src_sent, trg_sent, langcodes=None, fwd_dict=None, bwd_dict=None, debug=False):
l1_tokens = [self.tokenizer.tokenize(word) for word in src_sent]
l2_tokens = [self.tokenizer.tokenize(word) for word in trg_sent]
bpe_lists = [[bpe for w in sent for bpe in w] for sent in [l1_tokens, l2_tokens]]
l1_b2w_map = list()
for i, wlist in enumerate(l1_tokens):
l1_b2w_map += [i for _ in wlist]
l2_b2w_map = list()
for i, wlist in enumerate(l2_tokens):
l2_b2w_map += [i for _ in wlist]
vectors = self.get_embed(list(bpe_lists), langcodes)
sim = (cosine_similarity(vectors[0], vectors[1]) + 1.0) / 2.0
sim = self.apply_distortion(sim, self.distortion)
all_mats = dict()
fwd, bwd = self.get_alignment_matrix(sim)
if self.matching_method.find('a') != -1:
all_mats['inter'] = fwd * bwd
if self.matching_method.find('i') != -1:
all_mats['itermax'] = self.iter_max(sim)
if self.matching_method.find('m') != -1:
all_mats['mwmf'] = self.get_max_weight_match(sim)
if self.matching_method.find('f') != -1:
all_mats['fixed'] = fwd * bwd
aligns = {k: set() for k in all_mats}
for key in aligns:
for i in range(vectors[0].shape[0]):
for j in range(vectors[1].shape[0]):
if all_mats[key][i, j] > 1e-10:
aligns[key].add((l1_b2w_map[i], l2_b2w_map[j]))
if 'fixed' in aligns:
src_aligned = set([x[0] for x in aligns['fixed']])
trg_aligned = set([x[1] for x in aligns['fixed']])
candidate_alignment = list()
for i, sw in enumerate(src_sent):
sw = sw.lower()
if i not in src_aligned:
for j, tw in enumerate(trg_sent):
tw = tw.lower()
if tw in fwd_dict[sw]:
ri = i / len(src_sent)
rj = j / len(trg_sent)
if -0.2 < ri - rj < 0.2:
candidate_alignment.append((sw, tw, i, j, fwd_dict[sw][tw], 0))
for j, tw in enumerate(trg_sent):
tw = tw.lower()
if j not in trg_aligned:
for i, sw in enumerate(src_sent):
sw = sw.lower()
if sw in bwd_dict[tw]:
ri = i / len(src_sent)
rj = j / len(trg_sent)
if -0.2 < ri - rj < 0.2:
candidate_alignment.append((sw, tw, i, j, bwd_dict[tw][sw], 1))
candidate_alignment = sorted(candidate_alignment, key=lambda x: -x[-2])
for sw, tw, i, j, val, d in candidate_alignment:
if regex.match(r'\p{P}', sw) or regex.match(r'\p{P}', tw):
continue
if val < 0.05:
break
if d == 0:
if i in src_aligned:
continue
if (j not in trg_aligned) or ((i-1, j) in aligns['fixed']) or ((i+1, j) in aligns['fixed']):
aligns['fixed'].add((i, j))
src_aligned.add(i)
trg_aligned.add(j)
if debug:
print(sw, tw, i, j, val, d)
else:
if j in trg_aligned:
continue
if (i not in src_aligned) or ((i, j+1) in aligns['fixed']) or ((i, j-1) in aligns['fixed']):
aligns['fixed'].add((i, j))
src_aligned.add(i)
trg_aligned.add(j)
if debug:
print(sw, tw, i, j, val, d)
for ext in aligns:
aligns[ext] = sorted(aligns[ext])
return aligns
@staticmethod
def get_max_weight_match(sim):
if nx is None:
raise ValueError("networkx must be installed to use match algorithm.")
def permute(edge):
if edge[0] < sim.shape[0]:
return edge[0], edge[1] - sim.shape[0]
else:
return edge[1], edge[0] - sim.shape[0]
G = from_biadjacency_matrix(csr_matrix(sim))
matching = nx.max_weight_matching(G, maxcardinality=True)
matching = [permute(x) for x in matching]
matching = sorted(matching, key=lambda x: x[0])
res_matrix = np.zeros_like(sim)
for edge in matching:
res_matrix[edge[0], edge[1]] = 1
return res_matrix
@staticmethod
def iter_max(sim_matrix, max_count=2):
alpha_ratio = 0.9
m, n = sim_matrix.shape
forward = np.eye(n)[sim_matrix.argmax(axis=1)] # m x n
backward = np.eye(m)[sim_matrix.argmax(axis=0)] # n x m
inter = forward * backward.transpose()
if min(m, n) <= 2:
return inter
new_inter = np.zeros((m, n))
count = 1
while count < max_count:
mask_x = 1.0 - np.tile(inter.sum(1)[:, np.newaxis], (1, n)).clip(0.0, 1.0)
mask_y = 1.0 - np.tile(inter.sum(0)[np.newaxis, :], (m, 1)).clip(0.0, 1.0)
mask = ((alpha_ratio * mask_x) + (alpha_ratio * mask_y)).clip(0.0, 1.0)
mask_zeros = 1.0 - ((1.0 - mask_x) * (1.0 - mask_y))
if mask_x.sum() < 1.0 or mask_y.sum() < 1.0:
mask *= 0.0
mask_zeros *= 0.0
new_sim = sim_matrix * mask
fwd = np.eye(n)[new_sim.argmax(axis=1)] * mask_zeros
bac = np.eye(m)[new_sim.argmax(axis=0)].transpose() * mask_zeros
new_inter = fwd * bac
if np.array_equal(inter + new_inter, inter):
break
inter = inter + new_inter
count += 1
return inter
@staticmethod
def get_alignment_matrix(sim_matrix):
m, n = sim_matrix.shape
forward = np.eye(n)[sim_matrix.argmax(axis=1)] # m x n
backward = np.eye(m)[sim_matrix.argmax(axis=0)] # n x m
return forward, backward.transpose()
@staticmethod
def apply_distortion(sim_matrix, ratio=0.5):
shape = sim_matrix.shape
if (shape[0] < 2 or shape[1] < 2) or ratio == 0.0:
return sim_matrix
pos_x = np.array([[y / float(shape[1] - 1) for y in range(shape[1])] for x in range(shape[0])])
pos_y = np.array([[x / float(shape[0] - 1) for x in range(shape[0])] for y in range(shape[1])])
distortion_mask = 1.0 - ((pos_x - np.transpose(pos_y)) ** 2) * ratio
return np.multiply(sim_matrix, distortion_mask)
class Aligner(object):
def __init__(self, aligner_type, **kwargs):
self.aligner_type = aligner_type
if aligner_type == 'simalign':
from simalign import SentenceAligner
d = 'cuda' if torch.cuda.is_available() else 'cpu'
self.aligner = SentenceAligner('xlm-roberta-base', device=d, **kwargs)
elif aligner_type in ['fastalign', 'giza++']:
pass
elif aligner_type == 'criss-align':
self.aligner = CRISSAligner(**kwargs)
else:
raise Exception('Aligner type not supported.')
def align_sents(self, sent_pairs, train_file=None, **kwargs):
aligns = list()
if self.aligner_type in ['simalign', 'criss-align']:
for src, trg in tqdm(sent_pairs):
src = src.strip().split()
trg = trg.strip().split()
align_info = self.aligner.get_word_aligns(src, trg, **kwargs)
result = None
for key in align_info:
if result is None:
result = set(align_info[key])
else:
result = result.intersection(align_info[key])
aligns.append(' '.join(['-'.join([str(x) for x in item]) for item in sorted(result)]))
elif self.aligner_type == 'fastalign':
temp_dir = tempfile.TemporaryDirectory(prefix='fast-align')
with open(os.path.join(temp_dir.name, 'bitext.txt'), 'w') as fout:
for ss, ts in sent_pairs:
fout.write(ss + ' ||| ' + ts + '\n')
fout.close()
if train_file is not None:
assert os.path.exists(train_file)
os.system(f'cat {train_file} >> {temp_dir.name}/bitext.txt')
os.system(f'fast_align -d -o -v -i {temp_dir.name}/bitext.txt > {temp_dir.name}/fwd.align')
os.system(f'fast_align -d -o -v -r -i {temp_dir.name}/bitext.txt > {temp_dir.name}/bwd.align')
os.system(f'atools -i {temp_dir.name}/fwd.align -j {temp_dir.name}/bwd.align -c grow-diag-final-and > {temp_dir.name}/final.align')
aligns = [x.strip() for x in open(f'{temp_dir.name}/final.align').readlines()][:len(sent_pairs)]
elif self.aligner_type == 'giza++':
assert train_file is not None
giza_path = '/private/home/fhs/codebase/lexind/fairseq/2-word-align-final/giza-pp/GIZA++-v2/GIZA++'
temp_dir = tempfile.TemporaryDirectory(prefix='giza++')
d_src = collections.Counter()
d_trg = collections.Counter()
w2id_src = collections.defaultdict()
w2id_trg = collections.defaultdict()
for sent_pair in open(train_file):
ss, ts = regex.split(r'\|\|\|', sent_pair.lower())
for w in ss.strip().split():
d_src[w] += 1
for w in ts.strip().split():
d_trg[w] += 1
for ss, ts in sent_pairs:
ss = ss.lower()
ts = ts.lower()
for w in ss.strip().split():
d_src[w] += 1
for w in ts.strip().split():
d_trg[w] += 1
with open(os.path.join(temp_dir.name, 's.vcb'), 'w') as fout:
for i, w in enumerate(sorted(d_src.keys())):
print(i + 1, w, d_src[w], file=fout)
w2id_src[w] = i + 1
fout.close()
with open(os.path.join(temp_dir.name, 't.vcb'), 'w') as fout:
for i, w in enumerate(sorted(d_trg.keys())):
print(i + 1, w, d_trg[w], file=fout)
w2id_trg[w] = i + 1
fout.close()
with open(os.path.join(temp_dir.name, 'bitext.train'), 'w') as fout:
for sent_pair in open(train_file):
ss, ts = regex.split(r'\|\|\|', sent_pair.lower())
print(1, file=fout)
print(' '.join([str(w2id_src[x]) for x in ss.strip().split()]), file=fout)
print(' '.join([str(w2id_trg[x]) for x in ts.strip().split()]), file=fout)
fout.close()
with open(os.path.join(temp_dir.name, 'bitext.test'), 'w') as fout:
for ss, ts in sent_pairs:
ss = ss.lower()
ts = ts.lower()
print(1, file=fout)
print(' '.join([str(w2id_src[x]) for x in ss.strip().split()]), file=fout)
print(' '.join([str(w2id_trg[x]) for x in ts.strip().split()]), file=fout)
fout.close()
os.chdir(f'{temp_dir.name}')
os.system(f'{giza_path} -S {temp_dir.name}/s.vcb -T {temp_dir.name}/t.vcb -C {temp_dir.name}/bitext.train -tc {temp_dir.name}/bitext.test')
# read giza++ results
for i, line in enumerate(open(glob(f'{temp_dir.name}/*tst.A3*')[0])):
if i % 3 == 2:
align = list()
is_trg = False
is_null = False
src_idx = 0
for item in line.strip().split():
if item == '({':
is_trg = True
elif item == '})':
is_trg = False
elif is_trg:
if not is_null:
trg_idx = int(item)
align.append(f'{src_idx}-{trg_idx}')
elif item != 'NULL':
src_idx += 1
is_null = False
else:
is_null = True
aligns.append(' '.join(align))
temp_dir.cleanup()
return aligns
class CRISSWrapper(object):
def __init__(self, path='criss/criss-3rd.pt', args_path='criss/args.pt',
tokenizer='facebook/mbart-large-cc25', device='cpu'):
from fairseq import bleu, checkpoint_utils, options, progress_bar, tasks, utils
from fairseq.sequence_generator import EnsembleModel
self.device = device
args = torch.load(args_path)
task = tasks.setup_task(args)
models, _model_args = checkpoint_utils.load_model_ensemble(
path.split(':'),
arg_overrides=eval('{}'),
task=task
)
for model in models:
model.make_generation_fast_(
beamable_mm_beam_size=None if args.no_beamable_mm else args.beam,
need_attn=args.print_alignment,
)
if args.fp16:
model.half()
model = model.to(self.device)
self.model = EnsembleModel(models).to(self.device)
self.tokenizer = AutoTokenizer.from_pretrained(tokenizer)
def embed(self, words, langcode='en_XX'):
lbs, rbs = list(), list()
tokens, word_ids = list(), list()
for word in words:
word_tokens = self.tokenizer.tokenize(word)
lbs.append(len(tokens))
tokens.extend(word_tokens)
rbs.append(len(tokens))
tokens = [tokens + ['</s>', langcode]]
lengths = [len(x) for x in tokens]
max_length = max(lengths)
for i in range(len(tokens)):
word_ids.append(self.tokenizer.convert_tokens_to_ids(['<pad>'] * (max_length - len(tokens[i])) + tokens[i]))
encoder_input = {
'src_tokens': torch.tensor(word_ids).to(self.device),
'src_lengths': torch.tensor(lengths).to(self.device)
}
encoder_outs = self.model.forward_encoder(encoder_input)
np_encoder_outs = encoder_outs[0].encoder_out.float().detach()
word_features = list()
for i, lb in enumerate(lbs):
rb = rbs[i]
word_features.append(np_encoder_outs[lb:rb].mean(0))
word_features = torch.cat(word_features, dim=0)
return word_features
class WordAligner(nn.Module):
def __init__(self, input_dim, hidden_dims, output_dim=1, feature_transform=3):
super(WordAligner, self).__init__()
layers = list()
hidden_dims = [input_dim] + hidden_dims
for i in range(1, len(hidden_dims)):
layers.append(nn.Linear(hidden_dims[i-1], hidden_dims[i]))
layers.append(nn.ReLU())
layers.append(nn.Linear(hidden_dims[-1], output_dim))
layers.append(nn.Sigmoid())
self.model = nn.Sequential(*layers)
self.bias = nn.Parameter(torch.ones(feature_transform))
self.feature_transform = feature_transform
def forward(self, x):
transformed_features = torch.cat([x[:, :-self.feature_transform], torch.log(x[:, -self.feature_transform:] + self.bias.abs())], dim=-1)
return self.model(transformed_features)
def __call__(self, *args, **kwargs):
return self.forward(*args, **kwargs)
| bitext-lexind-main | align/models.py |
# Copyright (c) Facebook, Inc. and its affiliates.
from train import *
from data import AlignDataset
import collections
import copy
import numpy as np
from models import Aligner
def eval_align(gold, silver, adjust=0):
assert len(gold) == len(silver)
a_size = s_size = p_size = ap_inter = as_inter = 0
for i, g in enumerate(gold):
s = set([
tuple(map(lambda x: int(x), item.split('-')))
for item in filter(lambda x: x.find('p') == -1, g.split())
])
p = set([tuple(map(lambda x: int(x), regex.split('-|p', item))) for item in g.split()])
a = set([tuple(map(lambda x: int(x) + adjust, regex.split('-', item))) for item in silver[i].split()])
ap_inter += len(a.intersection(p))
as_inter += len(a.intersection(s))
a_size += len(a)
p_size += len(p)
s_size += len(s)
prec = ap_inter / a_size if a_size > 0 else 0
rec = as_inter / s_size if s_size > 0 else 0
return {
'prec': prec,
'rec': rec,
'f1': 2 * prec * rec / (prec + rec) if s_size > 0 and a_size > 0 else 0,
'aer': 1 - (as_inter + ap_inter) / (a_size + s_size)
}
def inference(simalign, probs, threshold):
n, m = probs.shape
ids = probs.view(-1).argsort(descending=True)
f = lambda x, m: (x.item()//m, x.item()%m)
src2trg = collections.defaultdict(set)
trg2src = collections.defaultdict(set)
results = set()
for pair in simalign.split():
x, y = pair.split('-')
x = int(x)
y = int(y)
src2trg[x].add(y)
trg2src[y].add(x)
results.add((x, y))
for idx in ids:
x, y = f(idx, m)
if probs[x, y] < threshold: # too low similarity
break
if (x not in src2trg) and (y not in trg2src): # perfect company, keep
src2trg[x].add(y)
trg2src[y].add(x)
results.add((x, y))
elif (x in src2trg) and (y in trg2src): # both have other companies, skip
continue
elif x in src2trg: # x has company, but y is still addable
if y == max(src2trg[x]) + 1 or y == min(src2trg[x]) - 1:
src2trg[x].add(y)
trg2src[y].add(x)
results.add((x, y))
else:
if x == max(trg2src[y]) + 1 or x == min(trg2src[y]) - 1:
src2trg[x].add(y)
trg2src[y].add(x)
results.add((x, y))
results = ' '.join([f'{x}-{y}' for x, y in sorted(results)])
return results
def test(configs, criss, dataset, simaligns, threshold=0.5):
setup_configs(configs)
os.system(f'mkdir -p {configs.save_path}')
torch.save(configs, configs.save_path + '/configs.pt')
info = collect_bitext_stats(
configs.bitext_path, configs.align_path, configs.save_path,
configs.src_lang, configs.trg_lang, configs.reversed
)
aligner = WordAligner(5 + (2 if configs.use_criss else 0), configs.hiddens, 3, 5).to(configs.device)
model_path = configs.save_path+f'/model.pt'
results = list()
aligner.load_state_dict(torch.load(model_path))
for idx, batch in enumerate(tqdm(dataset.sent_pairs)):
ss, ts = batch
ss = ss.split()
ts = ts.split()
if criss is not None:
semb = criss.embed(ss, langcode=configs.src_lang)
temb = criss.embed(ts, langcode=configs.trg_lang)
cos_matrix = cos(semb.unsqueeze(1), temb.unsqueeze(0)).unsqueeze(-1).unsqueeze(-1)
ip_matrix = (semb.unsqueeze(1) * temb.unsqueeze(0)).sum(-1).unsqueeze(-1).unsqueeze(-1)
feat_matrix = torch.cat((cos_matrix, ip_matrix), dim=-1)
word_pairs = list()
criss_features = list()
for i, sw in enumerate(ss):
for j, tw in enumerate(ts):
word_pairs.append((sw, tw))
criss_features.append(feat_matrix[i, j])
scores = extract_scores(word_pairs, criss_features, aligner, info, configs).reshape(len(ss), len(ts), -1)
scores = scores.softmax(-1)
arrange = torch.arange(3).to(configs.device).view(1, 1, -1)
scores = (scores * arrange).sum(-1)
result = inference(simaligns[idx], scores, threshold)
results.append(result)
return results
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-a', '--align', type=str, help='path to word alignment')
parser.add_argument('-b', '--bitext', type=str, help='path to bitext')
parser.add_argument('-g', '--ground-truth', type=str, default='./data/align/', help='path to ground-truth')
parser.add_argument('-src', '--source', type=str, help='source language code')
parser.add_argument('-trg', '--target', type=str, help='target language code')
parser.add_argument('-m', '--model-path', type=str, default='./model/', help='path to output folder')
parser.add_argument('-d', '--device', type=str, default='cuda', help='device for training [cuda|cpu]')
args = parser.parse_args()
configs = dotdict.DotDict(
{
'align_path': args.align,
'bitext_path': args.bitext,
'save_path': args.model_path,
'batch_size': 128,
'epochs': 100,
'device': args.device,
'hiddens': [8],
'use_criss': True,
'src_lang': args.source,
'trg_lang': args.target,
'threshold': 1.0
}
)
criss = CRISSWrapper(device=configs.device)
dataset = collections.defaultdict(None)
simaligner = Aligner(
'criss-align', distortion=0,
path='criss/criss-3rd.pt', args_path='criss/args.pt',
matching_method='a'
)
lp = (args.source, args.target)
dset = AlignDataset(args.ground_truth, f'{args.source.split("_")[0]}-{args.target.split("_")[0]}')
simaligns = simaligner.align_sents(dset.sent_pairs, langcodes=lp)
aligns = test(configs, criss, dset, simaligns, configs.threshold)
results = eval_align(dset.ground_truth, aligns, 1)
print(results)
from IPython import embed; embed(using=False)
| bitext-lexind-main | align/test.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import regex
from data import AlignDataset
from evaluate import evaluate
from models import Aligner
import collections
resdict = collections.defaultdict(None)
aligner = Aligner(
'criss-align', distortion=0,
path='criss/criss-3rd.pt',
args_path='criss/args.pt',
matching_method='a'
)
dset = AlignDataset('data/align/', 'de-en')
aligns = aligner.align_sents(dset.sent_pairs, langcodes=('de_DE', 'en_XX'))
res = evaluate(dset.ground_truth, aligns, 1)
print('de-en:', res)
| bitext-lexind-main | align/eval_simalign_criss.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import argparse
import collections
import copy
import dotdict
import json
import numpy as np
import os
import random
import regex
import tempfile
import torch
import torch.nn as nn
from glob import glob
from chinese_converter import to_traditional, to_simplified
from tqdm import tqdm
from models import CRISSWrapper, WordAligner
from data import BitextAlignmentDataset
cos = torch.nn.CosineSimilarity(dim=-1)
def setup_configs(configs):
configs.stats_path = configs.save_path + '/stats.pt'
def collect_bitext_stats(bitext_path, align_path, save_path, src_lang, trg_lang, is_reversed=False):
stats_path = save_path + '/stats.pt'
freq_path = save_path + '/freqs.pt'
if os.path.exists(stats_path):
coocc, semi_matched_coocc, matched_coocc = torch.load(stats_path)
else:
coocc = collections.defaultdict(collections.Counter)
semi_matched_coocc = collections.defaultdict(collections.Counter)
matched_coocc = collections.defaultdict(collections.Counter)
tmpdir = tempfile.TemporaryDirectory()
os.system(f'cat {bitext_path} > {tmpdir.name}/bitext.txt')
os.system(f'cat {align_path} > {tmpdir.name}/aligns.txt')
bitext = open(f'{tmpdir.name}/bitext.txt').readlines()
aligns = open(f'{tmpdir.name}/aligns.txt').readlines()
tmpdir.cleanup()
assert len(bitext) == len(aligns)
bar = tqdm(bitext)
for i, item in enumerate(bar):
try:
src_sent, trg_sent = regex.split(r'\|\|\|', item.strip())
if is_reversed:
src_sent, trg_sent = trg_sent, src_sent
align = [tuple(x if not is_reversed else reversed(x)) for x in json.loads(aligns[i])['inter']]
except:
continue
if src_lang == 'zh_CN':
src_sent = to_simplified(src_sent)
if trg_lang == 'zh_CN':
trg_sent = to_simplified(trg_sent)
src_words = src_sent.lower().split()
trg_words = trg_sent.lower().split()
src_cnt = collections.Counter([x[0] for x in align])
trg_cnt = collections.Counter([x[1] for x in align])
for x, sw in enumerate(src_words):
for y, tw in enumerate(trg_words):
if (x, y) in align:
semi_matched_coocc[sw][tw] += 1
if src_cnt[x] == 1 and trg_cnt[y] == 1:
matched_coocc[sw][tw] += 1
coocc[sw][tw] += 1
torch.save((coocc, semi_matched_coocc, matched_coocc), stats_path)
if os.path.exists(freq_path):
freq_src, freq_trg = torch.load(freq_path)
else:
freq_src = collections.Counter()
freq_trg = collections.Counter()
tmpdir = tempfile.TemporaryDirectory()
os.system(f'cat {bitext_path} > {tmpdir.name}/bitext.txt')
bitext = open(f'{tmpdir.name}/bitext.txt').readlines()
tmpdir.cleanup()
bar = tqdm(bitext)
for i, item in enumerate(bar):
try:
src_sent, trg_sent = regex.split(r'\|\|\|', item.strip())
if is_reversed:
src_sent, trg_sent = trg_sent, src_sent
except:
continue
if src_lang == 'zh_CN':
src_sent = to_simplified(src_sent)
if trg_lang == 'zh_CN':
trg_sent = to_simplified(trg_sent)
for w in src_sent.split():
freq_src[w] += 1
for w in trg_sent.split():
freq_trg[w] += 1
torch.save((freq_src, freq_trg), freq_path)
return coocc, semi_matched_coocc, matched_coocc, freq_src, freq_trg
def extract_scores(batch, criss_features, aligner, info, configs):
coocc, semi_matched_coocc, matched_coocc, freq_src, freq_trg = info
all_scores = list()
for i in range(0, len(batch), configs.batch_size):
subbatch = batch[i:i+configs.batch_size]
src_words, trg_words = zip(*subbatch)
features = torch.tensor(
[
[
matched_coocc[x[0]][x[1]],
semi_matched_coocc[x[0]][x[1]],
coocc[x[0]][x[1]],
freq_src[x[0]],
freq_trg[x[1]]
] for x in subbatch
]
).float().to(configs.device).reshape(-1, 5)
if configs.use_criss:
subbatch_crissfeat = torch.cat(criss_features[i:i+configs.batch_size], dim=0)
features = torch.cat((subbatch_crissfeat, features), dim=-1).detach()
scores = aligner(features).squeeze(-1)
all_scores.append(scores)
return torch.cat(all_scores, dim=0)
def train(configs, logging_steps=50000):
setup_configs(configs)
os.system(f'mkdir -p {configs.save_path}')
torch.save(configs, configs.save_path + '/configs.pt')
info = collect_bitext_stats(
configs.bitext_path, configs.align_path, configs.save_path,
configs.src_lang, configs.trg_lang, configs.reversed
)
if configs.use_criss:
criss = CRISSWrapper(device=configs.device)
else:
criss = None
dataset = BitextAlignmentDataset(configs.bitext_path, configs.align_path)
dataloader = torch.utils.data.DataLoader(dataset, batch_size=1, shuffle=True, collate_fn=dataset.collate_fn)
aligner = WordAligner(5 + (2 if configs.use_criss else 0), configs.hiddens, 3, 5).to(configs.device)
optimizer = torch.optim.Adam(aligner.parameters(), lr=.0005)
for epoch in range(configs.epochs):
model_cnt = 0
total_loss = total_cnt = 0
bar = tqdm(dataloader)
for idx, batch in enumerate(bar):
(ss, ts), edges = batch[0]
if criss is not None:
semb = criss.embed(ss, langcode=configs.src_lang)
temb = criss.embed(ts, langcode=configs.trg_lang)
cos_matrix = cos(semb.unsqueeze(1), temb.unsqueeze(0)).unsqueeze(-1).unsqueeze(-1)
ip_matrix = (semb.unsqueeze(1) * temb.unsqueeze(0)).sum(-1).unsqueeze(-1).unsqueeze(-1)
feat_matrix = torch.cat((cos_matrix, ip_matrix), dim=-1)
# adding contexualized embeddings here
training_sets = collections.defaultdict(list)
criss_features = collections.defaultdict(list)
for i, sw in enumerate(ss):
for j, tw in enumerate(ts):
label = edges[i, j]
training_sets[label].append((sw, tw))
if criss is not None:
criss_features[label].append(feat_matrix[i, j])
max_len = max(len(training_sets[k]) for k in training_sets)
training_set = list()
criss_feats = list()
targets = list()
for key in training_sets:
training_set += training_sets[key] * (max_len // len(training_sets[key]))
criss_feats += criss_features[key] * (max_len // len(training_sets[key]))
targets += [key] * len(training_sets[key]) * (max_len // len(training_sets[key]))
targets = torch.tensor(targets).long().to(configs.device)
scores = extract_scores(training_set, criss_feats, aligner, info, configs)
optimizer.zero_grad()
loss = nn.CrossEntropyLoss()(scores, targets)
loss.backward()
optimizer.step()
total_loss += loss.item() * len(batch)
total_cnt += len(batch)
bar.set_description(f'loss={total_loss / total_cnt:.5f}')
if (idx + 1) % logging_steps == 0:
print(f'Epoch {epoch}, step {idx+1}, loss = {total_loss / total_cnt:.5f}', flush=True)
torch.save(aligner.state_dict(), configs.save_path + f'/model.pt')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-a', '--align', type=str, help='path to word alignment')
parser.add_argument('-b', '--bitext', type=str, help='path to bitext')
parser.add_argument('-src', '--source', type=str, help='source language code')
parser.add_argument('-trg', '--target', type=str, help='target language code')
parser.add_argument('-o', '--output', type=str, default='./model/', help='path to output folder')
parser.add_argument('-d', '--device', type=str, default='cuda', help='device for training [cuda|cpu]')
args = parser.parse_args()
configs = dotdict.DotDict(
{
'align_path': args.align,
'bitext_path': args.bitext,
'save_path': args.output,
'batch_size': 128,
'epochs': 100,
'device': args.device,
'hiddens': [8],
'use_criss': True,
'src_lang': args.source,
'trg_lang': args.target
}
)
train(configs)
| bitext-lexind-main | align/train.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import regex
def evaluate(gold, silver, offset=0):
assert len(gold) == len(silver)
a_size = s_size = p_size = ap_inter = as_inter = 0
for i, g in enumerate(gold):
s = set([
tuple(map(lambda x: int(x), item.split('-')))
for item in filter(lambda x: x.find('p') == -1, g.split())
])
p = set([tuple(map(lambda x: int(x), regex.split('-|p', item))) for item in g.split()])
a = set([tuple(map(lambda x: int(x) + offset, regex.split('-', item))) for item in silver[i].split()])
ap_inter += len(a.intersection(p))
as_inter += len(a.intersection(s))
a_size += len(a)
p_size += len(p)
s_size += len(s)
prec = ap_inter / a_size if a_size > 0 else 0
rec = as_inter / s_size if s_size > 0 else 0
return {
'prec': prec,
'rec': rec,
'f1': 2 * prec * rec / (prec + rec) if s_size > 0 and a_size > 0 else 0,
'aer': 1 - (as_inter + ap_inter) / (a_size + s_size)
}
| bitext-lexind-main | align/evaluate.py |
# Copyright (c) Facebook, Inc. and its affiliates.
from torch.utils.data import DataLoader, Dataset
import regex
import json
import numpy as np
import os
class BitextAlignmentDataset(Dataset):
def __init__(self, bitext_path, alignment_path):
super(BitextAlignmentDataset, self).__init__()
self.bitext_path = bitext_path
self.alignment_path = alignment_path
bitext = [regex.split(r'\|\|\|', x.strip()) for x in open(bitext_path)]
align = open(alignment_path).readlines()
self.bitext, self.edges = self.filter(bitext, align)
assert len(self.bitext) == len(self.edges)
@staticmethod
def filter(bitext, align):
real_bitext = list()
edges = list()
for i, a in enumerate(align):
try:
a = json.loads(a)
if len(bitext[i]) == 2:
bitext[i][0] = bitext[i][0].split()
bitext[i][1] = bitext[i][1].split()
real_bitext.append(bitext[i])
edge_info = np.zeros((len(bitext[i][0]), len(bitext[i][1])))
for x, y in a['inter']:
edge_info[x, y] = 2
for x, y in a['itermax']:
if edge_info[x, y] == 0:
edge_info[x, y] = 1
edges.append(edge_info)
except:
continue
return real_bitext, edges
def __getitem__(self, index):
return self.bitext[index], self.edges[index]
def __len__(self):
return len(self.bitext)
@staticmethod
def collate_fn(batch):
return batch
class AlignDataset(object):
def __init__(self, path, langs, split='test'):
if langs == 'de-en':
src_sents = [x.strip() for x in open(os.path.join(path, langs, 'de'), encoding='iso-8859-1').readlines()][:-1]
trg_sents = [x.strip() for x in open(os.path.join(path, langs, 'en'), encoding='iso-8859-1').readlines()][:-1]
self.ground_truth = self.load_std_file(os.path.join(path, langs, 'alignmentDeEn.talp'))[:-1]
elif langs == 'ro-en' or langs == 'en-fr':
src_id2s = dict()
trg_id2s = dict()
for fpair in open(os.path.join(path, langs, split, f'FilePairs.{split}')):
sf, tf = fpair.strip().split()
for line in open(os.path.join(path, langs, split, sf), encoding='iso-8859-1'):
matching = regex.match(r'<s snum=([0-9]*)>(.*)</s>', line.strip())
assert matching is not None
idx = matching.group(1)
sent = matching.group(2).strip()
src_id2s[idx] = sent
for line in open(os.path.join(path, langs, split, tf), encoding='iso-8859-1'):
matching = regex.match(r'<s snum=([0-9]*)>(.*)</s>', line.strip())
assert matching is not None
idx = matching.group(1)
sent = matching.group(2).strip()
trg_id2s[idx] = sent
src_sents = [src_id2s[key] for key in sorted(src_id2s.keys())]
trg_sents = [trg_id2s[key] for key in sorted(trg_id2s.keys())]
snum2idx = dict([(key, i) for i, key in enumerate(sorted(trg_id2s.keys()))])
assert len(src_id2s) == len(trg_id2s)
ground_truth = [list() for _ in src_id2s]
raw_gt = open(os.path.join(path, langs, split, f'{split}.wa.nonullalign')).readlines()
for line in raw_gt:
sid, s, t, sure = line.strip().split()
idx = snum2idx[sid]
if sure == 'S':
align = '-'.join([s, t])
else:
assert sure == 'P'
align = 'p'.join([s, t])
ground_truth[idx].append(align)
for i, item in enumerate(ground_truth):
ground_truth[i] = ' '.join(item)
self.ground_truth = ground_truth
elif langs == 'en-hi':
src_id2s = dict()
trg_id2s = dict()
sf = f'{split}.e'
tf = f'{split}.h'
for line in open(os.path.join(path, langs, split, sf), encoding='us-ascii'):
matching = regex.match(r'<s snum=([0-9]*)>(.*)</s>', line.strip())
assert matching is not None
idx = matching.group(1)
sent = matching.group(2).strip()
src_id2s[idx] = sent
for line in open(os.path.join(path, langs, split, tf), encoding='utf-8'):
matching = regex.match(r'<s snum=([0-9]*)>(.*)</s>', line.strip())
assert matching is not None
idx = matching.group(1)
sent = matching.group(2).strip()
trg_id2s[idx] = sent
src_sents = [src_id2s[key] for key in sorted(src_id2s.keys())]
trg_sents = [trg_id2s[key] for key in sorted(trg_id2s.keys())]
snum2idx = dict([(key, i) for i, key in enumerate(sorted(trg_id2s.keys()))])
assert len(src_id2s) == len(trg_id2s)
ground_truth = [list() for _ in src_id2s]
raw_gt = open(os.path.join(path, langs, split, f'{split}.wa.nonullalign')).readlines()
for line in raw_gt:
sid, s, t = line.strip().split()
idx = snum2idx[sid]
align = '-'.join([s, t])
ground_truth[idx].append(align)
for i, item in enumerate(ground_truth):
ground_truth[i] = ' '.join(item)
self.ground_truth = ground_truth
else:
raise Exception('language pair not supported.')
self.sent_pairs = list(zip(src_sents, trg_sents))
assert len(self.sent_pairs) == len(self.ground_truth)
@staticmethod
def load_std_file(path):
return [x.strip() for x in open(path)]
| bitext-lexind-main | align/data.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import argparse
import collections
import copy
import dotdict
import json
import numpy as np
import os
import random
import regex
import tempfile
import torch
import torch.nn as nn
from chinese_converter import to_traditional, to_simplified
from tqdm import tqdm
from evaluate import evaluate
from models import CRISSWrapper, LexiconInducer
cos = nn.CosineSimilarity(dim=-1)
def setup_configs(configs):
configs.save_path = configs.save_path.format(src=configs.src_lang, trg=configs.trg_lang)
configs.stats_path = configs.save_path + '/stats.pt'
def collect_bitext_stats(bitext_path, align_path, save_path, src_lang, trg_lang, is_reversed=False):
stats_path = save_path + '/stats.pt'
freq_path = save_path + '/freqs.pt'
if os.path.exists(stats_path):
coocc, semi_matched_coocc, matched_coocc = torch.load(stats_path)
else:
coocc = collections.defaultdict(collections.Counter)
semi_matched_coocc = collections.defaultdict(collections.Counter)
matched_coocc = collections.defaultdict(collections.Counter)
tmpdir = tempfile.TemporaryDirectory()
os.system(f'cat {bitext_path} > {tmpdir.name}/bitext.txt')
os.system(f'cat {align_path} > {tmpdir.name}/aligns.txt')
bitext = open(f'{tmpdir.name}/bitext.txt').readlines()
aligns = open(f'{tmpdir.name}/aligns.txt').readlines()
tmpdir.cleanup()
assert len(bitext) == len(aligns)
bar = tqdm(bitext)
for i, item in enumerate(bar):
try:
src_sent, trg_sent = regex.split(r'\|\|\|', item.strip())
if is_reversed:
src_sent, trg_sent = trg_sent, src_sent
align = [tuple(x if not is_reversed else reversed(x)) for x in json.loads(aligns[i])['inter']]
except:
continue
if src_lang == 'zh_CN':
src_sent = to_simplified(src_sent)
if trg_lang == 'zh_CN':
trg_sent = to_simplified(trg_sent)
src_words = src_sent.lower().split()
trg_words = trg_sent.lower().split()
src_cnt = collections.Counter([x[0] for x in align])
trg_cnt = collections.Counter([x[1] for x in align])
for x, sw in enumerate(src_words):
for y, tw in enumerate(trg_words):
if (x, y) in align:
semi_matched_coocc[sw][tw] += 1
if src_cnt[x] == 1 and trg_cnt[y] == 1:
matched_coocc[sw][tw] += 1
coocc[sw][tw] += 1
torch.save((coocc, semi_matched_coocc, matched_coocc), stats_path)
if os.path.exists(freq_path):
freq_src, freq_trg = torch.load(freq_path)
else:
freq_src = collections.Counter()
freq_trg = collections.Counter()
tmpdir = tempfile.TemporaryDirectory()
os.system(f'cat {bitext_path} > {tmpdir.name}/bitext.txt')
bitext = open(f'{tmpdir.name}/bitext.txt').readlines()
tmpdir.cleanup()
bar = tqdm(bitext)
for i, item in enumerate(bar):
try:
src_sent, trg_sent = regex.split(r'\|\|\|', item.strip())
if is_reversed:
src_sent, trg_sent = trg_sent, src_sent
except:
continue
if src_lang == 'zh_CN':
src_sent = to_simplified(src_sent)
if trg_lang == 'zh_CN':
trg_sent = to_simplified(trg_sent)
for w in src_sent.split():
freq_src[w] += 1
for w in trg_sent.split():
freq_trg[w] += 1
torch.save((freq_src, freq_trg), freq_path)
return coocc, semi_matched_coocc, matched_coocc, freq_src, freq_trg
def load_lexicon(path):
lexicon = [regex.split(r'\t| ', x.strip()) for x in open(path)]
return set([tuple(x) for x in lexicon])
def extract_dataset(train_lexicon, test_lexicon, coocc, configs):
cooccs = [coocc]
test_set = set()
pos_training_set = set()
neg_training_set = set()
for tsw in set([x[0] for x in train_lexicon]):
for coocc in cooccs:
ssw = to_simplified(tsw) if configs.src_lang == 'zh_CN' else tsw
for stw in coocc[ssw]:
if stw == ssw:
added_self = True
ttw = to_traditional(stw) if configs.trg_lang == 'zh_CN' else stw
if (tsw, ttw) in train_lexicon:
pos_training_set.add((ssw, stw))
else:
neg_training_set.add((ssw, stw))
if (ssw, ssw) in train_lexicon:
pos_training_set.add((ssw, ssw))
else:
neg_training_set.add((ssw, ssw))
for tsw in set([x[0] for x in test_lexicon]):
for coocc in cooccs:
ssw = to_simplified(tsw) if configs.src_lang == 'zh_CN' else tsw
added_self = False
for stw in coocc[ssw]:
if stw == ssw:
added_self = True
test_set.add((ssw, stw))
test_set.add((ssw, ssw))
pos_training_set = list(pos_training_set)
neg_training_set = list(neg_training_set)
test_set = list(test_set)
return pos_training_set, neg_training_set, test_set
def extract_probs(batch, criss, lexicon_inducer, info, configs):
matched_coocc, semi_matched_coocc, coocc, freq_src, freq_trg = info
all_probs = list()
for i in range(0, len(batch), configs.batch_size):
subbatch = batch[i:i+configs.batch_size]
src_words, trg_words = zip(*subbatch)
src_encodings = criss.word_embed(src_words, configs.src_lang).detach()
trg_encodings = criss.word_embed(trg_words, configs.trg_lang).detach()
cos_sim = cos(src_encodings, trg_encodings).reshape(-1, 1)
dot_prod = (src_encodings * trg_encodings).sum(-1).reshape(-1, 1)
features = torch.tensor(
[
[
matched_coocc[x[0]][x[1]],
semi_matched_coocc[x[0]][x[1]],
coocc[x[0]][x[1]],
freq_src[x[0]],
freq_trg[x[1]],
] for x in subbatch
]
).float().to(configs.device).reshape(-1, 5)
features = torch.cat([cos_sim, dot_prod, features], dim=-1)
probs = lexicon_inducer(features).squeeze(-1)
all_probs.append(probs)
return torch.cat(all_probs, dim=0)
def get_test_lexicon(
test_set, test_lexicon, criss, lexicon_inducer, info, configs, best_threshold, best_n_cand
):
induced_lexicon = list()
pred_test_lexicon = collections.defaultdict(collections.Counter)
probs = extract_probs(
test_set, criss, lexicon_inducer, info, configs
)
for i, (x, y) in enumerate(test_set):
pred_test_lexicon[x][y] = max(pred_test_lexicon[x][y], probs[i].item())
possible_predictions = list()
for tsw in set([x[0] for x in test_lexicon]):
ssw = to_simplified(tsw)
for stw in pred_test_lexicon[ssw]:
ttw = to_traditional(stw)
pos = 1 if (tsw, ttw) in test_lexicon else 0
possible_predictions.append([tsw, ttw, pred_test_lexicon[ssw][stw], pos])
possible_predictions = sorted(possible_predictions, key=lambda x:-x[-2])
word_cnt = collections.Counter()
correct_predictions = 0
for i, item in enumerate(possible_predictions):
if item[-2] < best_threshold:
prec = correct_predictions / (sum(word_cnt.values()) + 1) * 100.0
rec = correct_predictions / len(test_lexicon) * 100.0
f1 = 2 * prec * rec / (rec + prec)
print(f'Test F1: {f1:.2f}')
break
if word_cnt[item[0]] == best_n_cand:
continue
word_cnt[item[0]] += 1
if item[-1] == 1:
correct_predictions += 1
induced_lexicon.append(item[:2])
eval_result = evaluate(induced_lexicon, test_lexicon)
return induced_lexicon, eval_result
def get_optimal_parameters(
pos_training_set, neg_training_set, train_lexicon, criss,
lexicon_inducer, info, configs,
):
pred_train_lexicon = collections.defaultdict(collections.Counter)
probs = extract_probs(
pos_training_set + neg_training_set, criss, lexicon_inducer, info, configs
)
for i, (x, y) in enumerate(pos_training_set + neg_training_set):
pred_train_lexicon[x][y] = max(pred_train_lexicon[x][y], probs[i].item())
possible_predictions = list()
for tsw in set([x[0] for x in train_lexicon]):
ssw = to_simplified(tsw)
for stw in pred_train_lexicon[ssw]:
ttw = to_traditional(stw)
pos = 1 if (tsw, ttw) in train_lexicon else 0
possible_predictions.append([tsw, ttw, pred_train_lexicon[ssw][stw], pos])
possible_predictions = sorted(possible_predictions, key=lambda x:-x[-2])
best_f1 = -1e10
best_threshold = best_n_cand = 0
for n_cand in range(1, 6):
word_cnt = collections.Counter()
correct_predictions = 0
bar = tqdm(possible_predictions)
for i, item in enumerate(bar):
if word_cnt[item[0]] == n_cand:
continue
word_cnt[item[0]] += 1
if item[-1] == 1:
correct_predictions += 1
prec = correct_predictions / (sum(word_cnt.values()) + 1) * 100.0
rec = correct_predictions / len(train_lexicon) * 100.0
f1 = 2 * prec * rec / (rec + prec)
if f1 > best_f1:
best_f1 = f1
best_threshold = item[-2]
best_n_cand = n_cand
bar.set_description(
f'Best F1={f1:.1f}, Prec={prec:.1f}, Rec={rec:.1f}, NCand={n_cand}, Threshold={item[-2]}'
)
return best_threshold, best_n_cand
def train_test(configs, logging_steps=50000):
setup_configs(configs)
os.system(f'mkdir -p {configs.save_path}')
torch.save(configs, configs.save_path + '/configs.pt')
# prepare feature extractor
info = collect_bitext_stats(
configs.bitext_path, configs.align_path, configs.save_path, configs.src_lang, configs.trg_lang, configs.reversed)
# dataset
train_lexicon = load_lexicon(configs.tuning_set)
sim_train_lexicon = {(to_simplified(x[0]), to_simplified(x[1])) for x in train_lexicon}
all_train_lexicon = train_lexicon.union(sim_train_lexicon)
test_lexicon = load_lexicon(configs.test_set)
pos_training_set, neg_training_set, test_set = extract_dataset(
train_lexicon, test_lexicon, info[2], configs
)
training_set_modifier = max(1, len(neg_training_set) // len(pos_training_set))
training_set = pos_training_set * training_set_modifier + neg_training_set
print(f'Positive training set is repeated {training_set_modifier} times due to data imbalance.')
# model and optimizers
criss = CRISSWrapper(device=configs.device)
lexicon_inducer = LexiconInducer(7, configs.hiddens, 1, 5).to(configs.device)
optimizer = torch.optim.Adam(lexicon_inducer.parameters(), lr=.0005)
# train model
for epoch in range(configs.epochs):
model_path = configs.save_path + f'/{epoch}.model.pt'
if os.path.exists(model_path):
lexicon_inducer.load_state_dict(torch.load(model_path))
continue
random.shuffle(training_set)
bar = tqdm(range(0, len(training_set), configs.batch_size))
total_loss = total_cnt = 0
for i, sid in enumerate(bar):
batch = training_set[sid:sid+configs.batch_size]
probs = extract_probs(batch, criss, lexicon_inducer, info, configs)
targets = torch.tensor(
[1 if tuple(x) in all_train_lexicon else 0 for x in batch]).float().to(configs.device)
optimizer.zero_grad()
loss = nn.BCELoss()(probs, targets)
loss.backward()
optimizer.step()
total_loss += loss.item() * len(batch)
total_cnt += len(batch)
bar.set_description(f'loss={total_loss / total_cnt:.5f}')
if (i + 1) % logging_steps == 0:
print(f'Epoch {epoch}, step {i+1}, loss = {total_loss / total_cnt:.5f}', flush=True)
torch.save(lexicon_inducer.state_dict(), configs.save_path + f'/{epoch}.{i+1}.model.pt')
print(f'Epoch {epoch}, loss = {total_loss / total_cnt:.5f}', flush=True)
torch.save(lexicon_inducer.state_dict(), configs.save_path + f'/model.pt')
best_threshold, best_n_cand = get_optimal_parameters(
pos_training_set, neg_training_set, train_lexicon, criss,
lexicon_inducer, info, configs,
)
induced_test_lexicon, test_eval = get_test_lexicon(
test_set, test_lexicon, criss, lexicon_inducer, info, configs, best_threshold, best_n_cand
)
with open(configs.save_path + '/induced.weaklysup.dict', 'w') as fout:
for item in induced_test_lexicon:
fout.write('\t'.join([str(x) for x in item]) + '\n')
fout.close()
return induced_test_lexicon, test_eval
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-a', '--align', type=str, help='path to word alignment')
parser.add_argument('-b', '--bitext', type=str, help='path to bitext')
parser.add_argument('-src', '--source', type=str, help='source language code')
parser.add_argument('-trg', '--target', type=str, help='target language code')
parser.add_argument('-te', '--test', type=str, help='path to test lexicon')
parser.add_argument('-tr', '--train', type=str, help='path to training lexicon')
parser.add_argument('-o', '--output', type=str, default='./model/', help='path to output folder')
parser.add_argument('-d', '--device', type=str, default='cuda', help='device for training [cuda|cpu]')
args = parser.parse_args()
configs = dotdict.DotDict(
{
'test_set': args.test,
'tuning_set': args.train,
'align_path': args.align,
'bitext_path': args.bitext,
'save_path': args.output,
'batch_size': 128,
'epochs': 50,
'device': args.device,
'hiddens': [8],
'src_lang': args.source,
'trg_lang': args.target
}
)
res = train_test(configs)
print(res[-1])
| bitext-lexind-main | src/weakly_sup.py |
# Copyright (c) Facebook, Inc. and its affiliates.
from transformers import AutoTokenizer
import numpy as np
import torch
import torch.nn as nn
class CRISSWrapper(object):
def __init__(self, path='criss/criss-3rd.pt',
args_path='criss/args.pt',
tokenizer='facebook/mbart-large-cc25', device='cpu'):
from fairseq import bleu, checkpoint_utils, options, progress_bar, tasks, utils
from fairseq.sequence_generator import EnsembleModel
self.device = device
args = torch.load(args_path)
task = tasks.setup_task(args)
models, _model_args = checkpoint_utils.load_model_ensemble(
path.split(':'),
arg_overrides=eval('{}'),
task=task
)
for model in models:
model.make_generation_fast_(
beamable_mm_beam_size=None if args.no_beamable_mm else args.beam,
need_attn=args.print_alignment,
)
if args.fp16:
model.half()
model = model.to(self.device)
self.model = EnsembleModel(models).to(self.device)
self.tokenizer = AutoTokenizer.from_pretrained(tokenizer)
def word_embed(self, words, langcode='en_XX'):
tokens = list()
word_ids = list()
for word in words:
word_tokens = self.tokenizer.tokenize(word) + ['</s>', langcode]
tokens.append(word_tokens)
lengths = [len(x) for x in tokens]
max_length = max(lengths)
for i in range(len(tokens)):
word_ids.append(self.tokenizer.convert_tokens_to_ids(['<pad>'] * (max_length - len(tokens[i])) + tokens[i]))
encoder_input = {
'src_tokens': torch.tensor(word_ids).to(self.device),
'src_lengths': torch.tensor(lengths).to(self.device)
}
encoder_outs = self.model.forward_encoder(encoder_input)
np_encoder_outs = encoder_outs[0].encoder_out.float().detach()
encoder_mask = 1 - encoder_outs[0].encoder_padding_mask.float().detach()
encoder_mask = encoder_mask.transpose(0, 1).unsqueeze(2)
masked_encoder_outs = encoder_mask * np_encoder_outs
avg_pool = (masked_encoder_outs / encoder_mask.sum(dim=0)).sum(dim=0)
return avg_pool
class LexiconInducer(nn.Module):
def __init__(self, input_dim, hidden_dims, output_dim=1, feature_transform=3):
super(LexiconInducer, self).__init__()
layers = list()
hidden_dims = [input_dim] + hidden_dims
for i in range(1, len(hidden_dims)):
layers.append(nn.Linear(hidden_dims[i-1], hidden_dims[i]))
layers.append(nn.ReLU())
layers.append(nn.Linear(hidden_dims[-1], output_dim))
layers.append(nn.Sigmoid())
self.model = nn.Sequential(*layers)
self.bias = nn.Parameter(torch.ones(feature_transform))
self.feature_transform = feature_transform
def forward(self, x):
transformed_features = torch.cat([x[:, :-self.feature_transform], torch.log(x[:, -self.feature_transform:] + self.bias.abs())], dim=-1)
return self.model(transformed_features)
def __call__(self, *args, **kwargs):
return self.forward(*args, **kwargs)
| bitext-lexind-main | src/models.py |
# Copyright (c) Facebook, Inc. and its affiliates.
import argparse
import collections
import copy
import dotdict
import json
import numpy as np
import os
import random
import regex
import tempfile
import torch
import torch.nn as nn
from chinese_converter import to_traditional, to_simplified
from tqdm import tqdm
from evaluate import evaluate
from models import CRISSWrapper, LexiconInducer
cos = nn.CosineSimilarity(dim=-1)
def setup_configs(configs):
configs.save_path = configs.save_path.format(src=configs.src_lang, trg=configs.trg_lang)
configs.stats_path = configs.save_path + '/stats.pt'
def collect_bitext_stats(bitext_path, align_path, save_path, src_lang, trg_lang, is_reversed=False):
stats_path = save_path + '/stats.pt'
freq_path = save_path + '/freqs.pt'
if os.path.exists(stats_path):
coocc, semi_matched_coocc, matched_coocc = torch.load(stats_path)
else:
coocc = collections.defaultdict(collections.Counter)
semi_matched_coocc = collections.defaultdict(collections.Counter)
matched_coocc = collections.defaultdict(collections.Counter)
tmpdir = tempfile.TemporaryDirectory()
os.system(f'cat {bitext_path} > {tmpdir.name}/bitext.txt')
os.system(f'cat {align_path} > {tmpdir.name}/aligns.txt')
bitext = open(f'{tmpdir.name}/bitext.txt').readlines()
aligns = open(f'{tmpdir.name}/aligns.txt').readlines()
tmpdir.cleanup()
assert len(bitext) == len(aligns)
bar = tqdm(bitext)
for i, item in enumerate(bar):
try:
src_sent, trg_sent = regex.split(r'\|\|\|', item.strip())
if is_reversed:
src_sent, trg_sent = trg_sent, src_sent
align = [tuple(x if not is_reversed else reversed(x)) for x in json.loads(aligns[i])['inter']] # only focus on inter based alignment
except:
continue
if src_lang == 'zh_CN':
src_sent = to_simplified(src_sent)
if trg_lang == 'zh_CN':
trg_sent = to_simplified(trg_sent)
src_words = src_sent.lower().split()
trg_words = trg_sent.lower().split()
src_cnt = collections.Counter([x[0] for x in align])
trg_cnt = collections.Counter([x[1] for x in align])
for x, sw in enumerate(src_words):
for y, tw in enumerate(trg_words):
if (x, y) in align:
semi_matched_coocc[sw][tw] += 1
if src_cnt[x] == 1 and trg_cnt[y] == 1:
matched_coocc[sw][tw] += 1
coocc[sw][tw] += 1
torch.save((coocc, semi_matched_coocc, matched_coocc), stats_path)
if os.path.exists(freq_path):
freq_src, freq_trg = torch.load(freq_path)
else:
freq_src = collections.Counter()
freq_trg = collections.Counter()
tmpdir = tempfile.TemporaryDirectory()
os.system(f'cat {bitext_path} > {tmpdir.name}/bitext.txt')
bitext = open(f'{tmpdir.name}/bitext.txt').readlines()
tmpdir.cleanup()
bar = tqdm(bitext)
for i, item in enumerate(bar):
try:
src_sent, trg_sent = regex.split(r'\|\|\|', item.strip())
if is_reversed:
src_sent, trg_sent = trg_sent, src_sent
except:
continue
if src_lang == 'zh_CN':
src_sent = to_simplified(src_sent)
if trg_lang == 'zh_CN':
trg_sent = to_simplified(trg_sent)
for w in src_sent.split():
freq_src[w] += 1
for w in trg_sent.split():
freq_trg[w] += 1
torch.save((freq_src, freq_trg), freq_path)
return coocc, semi_matched_coocc, matched_coocc, freq_src, freq_trg
def load_lexicon(path):
lexicon = [regex.split(r'\t| ', x.strip()) for x in open(path)]
return set([tuple(x) for x in lexicon])
def get_test_lexicon(test_lexicon, info):
induced_lexicon = list()
coocc, semi_matched_coocc, matched_coocc, freq_src, freq_trg = info
for tsw in tqdm(set([x[0] for x in test_lexicon])):
ssw = to_simplified(tsw)
candidates = list()
for stw in matched_coocc[ssw]:
ttw = to_traditional(stw)
candidates.append([tsw, ttw, matched_coocc[ssw][stw] / (coocc[ssw][stw] + 20)])
if len(candidates) == 0:
continue
candidates = sorted(candidates, key=lambda x:-x[-1])
induced_lexicon.append(candidates[0][:2])
eval_result = evaluate(induced_lexicon, test_lexicon)
return induced_lexicon, eval_result
def test(configs, logging_steps=50000):
setup_configs(configs)
# prepare feature extractor
info = collect_bitext_stats(
configs.bitext_path, configs.align_path, configs.save_path, configs.src_lang, configs.trg_lang, configs.reversed
)
# dataset
test_lexicon = load_lexicon(configs.test_set)
induced_test_lexicon, test_eval = get_test_lexicon(test_lexicon, info)
with open(configs.save_path + '/induced.fullyunsup.dict', 'w') as fout:
for item in induced_test_lexicon:
fout.write('\t'.join([str(x) for x in item]) + '\n')
fout.close()
return induced_test_lexicon, test_eval
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-a', '--align', type=str, help='path to word alignment')
parser.add_argument('-b', '--bitext', type=str, help='path to bitext')
parser.add_argument('-src', '--source', type=str, help='source language code')
parser.add_argument('-trg', '--target', type=str, help='target language code')
parser.add_argument('-te', '--test', type=str, help='path to test lexicon')
parser.add_argument('-o', '--output', type=str, default='./model/', help='path to output folder')
parser.add_argument('-d', '--device', type=str, default='cuda', help='device for training [cuda|cpu]')
args = parser.parse_args()
configs = dotdict.DotDict(
{
'test_set': args.test,
'align_path': args.align,
'bitext_path': args.bitext,
'save_path': args.output,
'batch_size': 128,
'epochs': 50,
'device': args.device,
'hiddens': [8]
}
)
res = test(configs)
print(res[-1]) | bitext-lexind-main | src/fully_unsup.py |
# Copyright (c) Facebook, Inc. and its affiliates.
def evaluate(pr_pairs, gt_pairs):
gt_set = set([tuple(x) for x in gt_pairs])
pr_set = set([tuple(x) for x in pr_pairs])
prec = sum([1 if x in gt_set else 0 for x in pr_set]) \
/ float(len(pr_set)) if len(pr_set) > 0 else 0
rec = sum([1 if x in pr_set else 0 for x in gt_set]) \
/ float(len(gt_set)) if len(gt_set) > 0 else 0
gt_src_words = set([x[0] for x in gt_pairs])
pr_src_words = set([x[0] for x in pr_pairs])
oov_number = sum([1 if x not in pr_src_words else 0 for x in gt_src_words])
oov_rate = oov_number / float(len(gt_src_words))
eval_result = {
'oov_number': oov_number,
'oov_rate': oov_rate,
'precision': prec,
'recall': rec,
'f1': 2.0 * prec * rec / (prec + rec) if prec > 0 or rec > 0 else 0.0
}
return eval_result
| bitext-lexind-main | src/evaluate.py |
"""For pip."""
from setuptools import setup, find_packages
import os
exec(open('embedding/__version__.py').read())
setup(
name="embedding",
version=__version__,
description="compute word embeddings",
packages=["embedding"],
install_requires=[
"torch",
"numba",
"scipy",
"pandas",
"sparsesvd",
],
entry_points={
"console_scripts": [
"embedding = embedding.main:main",
],
},
package_data={"embedding": [os.path.relpath(os.path.join(root, f), "embedding/") for root, dirs, files in os.walk("embedding/data") for f in files]},
include_package_data=True,
)
| embedding-master | setup.py |
#!/usr/bin/env python
import torch
import numpy as np
import pandas as pd
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
import seaborn as sns
import embedding
sns.set(style="whitegrid", color_codes=True)
ref = embedding.Embedding(gpu=False)
ref.load_vectors("output/pi.1000.txt")
ref.embedding /= ref.embedding.norm(2, 0).expand_as(ref.embedding)
dim = ref.embedding.shape[1]
method = {
"Power Iteration": "pi",
# "Power Iteration with Momentum": "pim"
}
l1 = {} # First component loss
l2 = {} # Second component loss
lw = {} # Worst component loss
for m in method:
it = [i + 1 for i in range(1000)]
e = embedding.Embedding(gpu=False)
l1[m] = []
l2[m] = []
lw[m] = []
for i in it:
e.load_vectors("output/" + method[m] + "." + str(i) + ".txt")
e.embedding /= e.embedding.norm(2, 0).expand_as(e.embedding)
l1[m].append(1 - abs(torch.dot(ref.embedding[:, 0], e.embedding[:, 0])))
l2[m].append(1 - abs(torch.dot(ref.embedding[:, 1], e.embedding[:, 1])))
lw[m].append(1 - abs(min([torch.dot(ref.embedding[:, i], e.embedding[:, i]) for i in range(dim)])))
# for i in range(1, 10 + 1):
# data = data.append(pd.DataFrame([[i, i * len(m), m]], columns=["Iterations", "Loss", "Method"]))
# print(data)
# obj = sns.pointplot(x="Iterations", y="Loss", hue="Method", data=data, markers="", linestyle="-");
# figure, ax = plt.subplots(1, 1)
# figure = obj.get_figure()
# figure.savefig("convergence.pdf", dpi=300)
plt.figure(1)
for m in method:
plt.semilogy(it, l1[m], label=m)
plt.legend()
plt.xlabel("Iterations")
plt.ylabel("Loss")
plt.savefig("first.pdf", dpi=300)
plt.figure(2)
for m in method:
plt.semilogy(it, l2[m], label=m)
plt.legend()
plt.xlabel("Iterations")
plt.ylabel("Loss")
plt.savefig("second.pdf", dpi=300)
plt.figure(3)
for m in method:
plt.semilogy(it, lw[m], label=m)
plt.legend()
plt.xlabel("Iterations")
plt.ylabel("Loss")
plt.savefig("worst.pdf", dpi=300)
| embedding-master | plot_convergence.py |
import torch
import numpy
import unittest
import embedding.tensor_type as tensor_type
torch_types = {"CPU Dense": [torch.FloatTensor,
torch.DoubleTensor,
# torch.HalfTensor,
torch.ByteTensor,
torch.CharTensor,
torch.ShortTensor,
torch.IntTensor,
torch.LongTensor],
"GPU Dense": [torch.cuda.FloatTensor,
torch.cuda.DoubleTensor,
# torch.cuda.HalfTensor,
torch.cuda.ByteTensor,
torch.cuda.CharTensor,
torch.cuda.ShortTensor,
torch.cuda.IntTensor,
torch.cuda.LongTensor],
"CPU Sparse": [torch.sparse.FloatTensor,
torch.sparse.DoubleTensor,
# torch.sparse.HalfTensor, # Does not exist
torch.sparse.ByteTensor,
torch.sparse.CharTensor,
torch.sparse.ShortTensor,
torch.sparse.IntTensor,
torch.sparse.LongTensor],
"GPU Sparse": [torch.cuda.sparse.FloatTensor,
torch.cuda.sparse.DoubleTensor,
# torch.cuda.sparse.HalfTensor,
torch.cuda.sparse.ByteTensor,
torch.cuda.sparse.CharTensor,
torch.cuda.sparse.ShortTensor,
torch.cuda.sparse.IntTensor,
torch.cuda.sparse.LongTensor]}
type_count = len(torch_types["CPU Dense"])
all_torch_types = [t for sublist in torch_types.values() for t in sublist]
class TestTensorType(unittest.TestCase):
def test_consistency(self):
for t in all_torch_types:
self.assertEqual(t().type(), tensor_type.tt2string(tensor_type.string2tt(t)))
self.assertEqual(t().type(), tensor_type.tt2string(tensor_type.string2tt(t().type())))
def test_to_cpu(self):
for i in range(type_count):
with self.assertRaises(AssertionError):
tensor_type.to_cpu(torch_types["CPU Dense"][i])
self.assertEqual(torch_types["CPU Dense"][i], tensor_type.to_cpu(torch_types["GPU Dense"][i]))
with self.assertRaises(AssertionError):
tensor_type.to_cpu(torch_types["CPU Sparse"][i])
self.assertEqual(torch_types["CPU Sparse"][i], tensor_type.to_cpu(torch_types["GPU Sparse"][i]))
def test_to_gpu(self):
for i in range(type_count):
self.assertEqual(torch_types["GPU Dense"][i], tensor_type.to_gpu(torch_types["CPU Dense"][i]))
with self.assertRaises(AssertionError):
tensor_type.to_gpu(torch_types["GPU Dense"][i])
self.assertEqual(torch_types["GPU Sparse"][i], tensor_type.to_gpu(torch_types["CPU Sparse"][i]))
with self.assertRaises(AssertionError):
tensor_type.to_gpu(torch_types["GPU Sparse"][i])
def test_to_dense(self):
for i in range(type_count):
with self.assertRaises(AssertionError):
tensor_type.to_dense(torch_types["CPU Dense"][i])
with self.assertRaises(AssertionError):
tensor_type.to_dense(torch_types["CPU Dense"][i])
self.assertEqual(torch_types["CPU Dense"][i], tensor_type.to_dense(torch_types["CPU Sparse"][i]))
self.assertEqual(torch_types["GPU Dense"][i], tensor_type.to_dense(torch_types["GPU Sparse"][i]))
def test_to_sparse(self):
for i in range(type_count):
self.assertEqual(torch_types["CPU Sparse"][i], tensor_type.to_sparse(torch_types["CPU Dense"][i]))
self.assertEqual(torch_types["GPU Sparse"][i], tensor_type.to_sparse(torch_types["GPU Dense"][i]))
with self.assertRaises(AssertionError):
tensor_type.to_sparse(torch_types["CPU Sparse"][i])
with self.assertRaises(AssertionError):
tensor_type.to_sparse(torch_types["CPU Sparse"][i])
if __name__ == "__main__":
unittest.main()
| embedding-master | test/test_tensor_type_conversion.py |
import torch
import unittest
import embedding.util as util
ind = torch.LongTensor([[0, 0], [0, 1], [0, 2],
[1, 0], [1, 1], [1, 2],
[2, 0], [2, 1], [2, 2]]).t()
v = torch.FloatTensor([1, 2, 3, 4, 5, 6, 7, 8, 9])
mat = torch.sparse.FloatTensor(ind, v, torch.Size([3, 3]))
ele = [9 * torch.Tensor([[1, 0, 0], [0, 0, 0], [0, 0, 0]]),
9 * torch.Tensor([[0, 2, 0], [0, 0, 0], [0, 0, 0]]),
9 * torch.Tensor([[0, 0, 3], [0, 0, 0], [0, 0, 0]]),
9 * torch.Tensor([[0, 0, 0], [4, 0, 0], [0, 0, 0]]),
9 * torch.Tensor([[0, 0, 0], [0, 5, 0], [0, 0, 0]]),
9 * torch.Tensor([[0, 0, 0], [0, 0, 6], [0, 0, 0]]),
9 * torch.Tensor([[0, 0, 0], [0, 0, 0], [7, 0, 0]]),
9 * torch.Tensor([[0, 0, 0], [0, 0, 0], [0, 8, 0]]),
9 * torch.Tensor([[0, 0, 0], [0, 0, 0], [0, 0, 9]])]
row = [3 * torch.Tensor([[1, 2, 3], [0, 0, 0], [0, 0, 0]]),
3 * torch.Tensor([[0, 0, 0], [4, 5, 6], [0, 0, 0]]),
3 * torch.Tensor([[0, 0, 0], [0, 0, 0], [7, 8, 9]])]
col = [3 * torch.Tensor([[1, 0, 0], [4, 0, 0], [7, 0, 0]]),
3 * torch.Tensor([[0, 2, 0], [0, 5, 0], [0, 8, 0]]),
3 * torch.Tensor([[0, 0, 3], [0, 0, 6], [0, 0, 9]])]
def test_sequential_sampler(self, scheme, option, batch):
sample = util.get_sampler(mat, batch, scheme, True)
ind = 0
for i in range(100):
ans = 0 * option[0]
for j in range(batch):
ans += option[ind]
ind += 1
ind %= len(option)
ans /= batch
test = next(sample).to_dense()
self.assertTrue((torch.abs(test - ans) <= 1e-5).all())
class TestTensorType(unittest.TestCase):
def test_sequential_element(self):
for i in range(1, 9):
test_sequential_sampler(self, "element", ele, i)
def test_sequential_row(self):
for i in range(1, 3):
test_sequential_sampler(self, "row", row, i)
def test_sequential_column(self):
for i in range(1, 3):
test_sequential_sampler(self, "column", col, i)
if __name__ == "__main__":
unittest.main()
| embedding-master | test/test_sampling.py |
from __future__ import print_function, absolute_import
import logging
import logging.config
def init_logging(level=logging.INFO):
cfg = dict(
version=1,
formatters={
"f": {"format":
"%(levelname)-8s [%(asctime)s] %(message)s",
"datefmt":
"%m/%d %H:%M:%S"}
},
handlers={
"s": {"class": "logging.StreamHandler",
"formatter": "f",
"level": level},
"f": {"class": "logging.FileHandler",
"formatter": "f",
"level": logging.DEBUG,
"filename": "embedding.log"}
},
root={
"handlers": ["s", "f"],
"level": logging.NOTSET
},
)
logging.config.dictConfig(cfg)
| embedding-master | embedding/logging_config.py |
from __future__ import print_function, absolute_import
import torch
import numpy as np
import time
import os
import struct
import sys
import sparsesvd
import scipy.sparse
import logging
import embedding.util as util
# TODO: automatically match defaults from cmd line?
def power_iteration(mat, x, x0=None, iterations=50, beta=0., norm_freq=1, gpu=False, checkpoint=lambda x, i: None):
logger = logging.getLogger(__name__)
for i in range(iterations):
begin = time.time()
if beta == 0.:
x = util.mm(mat, x, gpu)
else:
x, x0 = util.mm(mat, x, gpu) - beta * x0, x
logging.info("Iteration " + str(i + 1) + " took " + str(time.time() - begin))
if ((i + 1) % norm_freq == 0 or
(i + 1) == iterations):
x, x0 = util.normalize(x, x0)
checkpoint(x, i)
return x, x0
def alecton(mat, x, iterations=50, eta=1e-3, norm_freq=1, sample=None, gpu=False, checkpoint=lambda x, i: None):
logger = logging.getLogger(__name__)
if sample is None:
sample = util.get_sampler(mat, 100000)
# TODO: alecton will need a lot more iterations (since one iteration does
# much less work) -- clean way to have different defaults?
n = mat.shape[0]
nnz = mat._nnz()
for i in range(iterations):
begin = time.time()
m = next(sample)
x = (1 - eta) * x + eta * util.mm(m, x)
end = time.time()
logging.info("Iteration " + str(i + 1) + " took " + str(time.time() - begin))
if ((i + 1) % norm_freq == 0 or
(i + 1) == iterations):
x, _ = util.normalize(x, None)
checkpoint(x, i)
return x
def vr(mat, x, x0=None, iterations=50, beta=0., norm_freq=1, batch=100000, innerloop=10):
n = mat.shape[0]
nnz, = mat._values().shape
batch = min(batch, nnz)
rng = torch.FloatTensor(batch) # TODO: seems like theres no long random on cuda
if mat.is_cuda:
rng = rng.cuda()
for i in range(iterations):
begin = time.time()
xtilde = x.clone()
gx = torch.mm(mat, xtilde)
for j in range(innerloop):
# TODO: can ang be generated without expand_as?
ang = torch.sum(x * xtilde, 0).expand_as(xtilde)
rng.uniform_(nnz)
if mat.is_cuda: # TODO: way to do this without cases?
elements = rng.type(torch.cuda.LongTensor)
else:
elements = rng.type(torch.LongTensor)
ind = mat._indices()[:, elements]
v = mat._values()[elements]
if mat.is_cuda:
sample = torch.cuda.sparse.DoubleTensor(ind, v, torch.Size([n, n]))
else:
sample = torch.sparse.DoubleTensor(ind, v, torch.Size([n, n]))
sample = nnz / float(batch) * sample
if beta == 0:
x = torch.mm(sample, x) - ang * torch.mm(sample, xtilde) + ang * gx
else:
x, x0 = torch.mm(sample, x) - ang * torch.mm(sample, xtilde) + ang * gx - beta * x0, x
# TODO: option to normalize in inner loop
logging.info("Iteration " + str(i + 1) + " took " + str(time.time() - begin))
if ((i + 1) % norm_freq == 0 or
(i + 1) == iterations):
x, x0 = util.normalize(x, x0)
return x, x0
def sgd(mat, x, iterations=50, eta=1e-3, batch=100000):
# TODO: this does not do any negative sampling
# TODO: does this need norm_freq
nnz = mat._nnz()
n, dim = x.shape
for i in range(iterations):
begin = time.time()
total_cost = 0.
for start in range(0, nnz, batch):
end = min(start + batch, nnz)
X = mat._values()[start:end]
row = mat._indices()[0, start:end]
col = mat._indices()[1, start:end]
pred = (x[row, :] * x[col, :]).sum(1)
error = pred - torch.log(X)
step = -eta * error
dx = step.expand(dim, end - start).t().repeat(2, 1) * x[torch.cat([col, row]), :]
x.index_add_(0, torch.cat([row, col]), dx)
total_cost += 0.5 * (error * error).sum()
logging.info("Iteration" + str(i + 1) + "\t" + str(start // batch + 1), " / " + str((nnz + batch - 1) // batch) + "\t" + str(time.time() - begin) + "\r")
logging.info("Iteration " + str(i + 1) + " took " + str(time.time() - begin))
logging.info("Error: " + str(total_cost / nnz))
return x
def glove(mat, x, bias=None, iterations=50, eta=1e-3, batch=100000):
# NOTE: this does not include the context vector/bias
# the word vector/bias is just used instead
xmax = 100
alpha = 0.75
nnz = mat._nnz()
n, dim = x.shape
# TODO: should bias be CPU or GPU
if bias is None:
begin = time.time()
f_mat = mat.clone()
f_mat._values().div_(xmax).clamp_(max=1).pow_(alpha)
log_mat = mat.clone()
log_mat._values().log_()
log_mat._values().mul_(f_mat._values())
bias = util.sum_rows(log_mat) / util.sum_rows(f_mat) / 2
logging.info("Initial bias took" + str(time.time() - begin))
# bias = torch.cuda.FloatTensor(n)
# bias.zero_()
for i in range(100):
begin = time.time()
total_cost = 0.
for start in range(0, nnz, batch):
end = min(start + batch, nnz)
X = mat._values()[start:end]
f = X / xmax
f.clamp_(max=1)
f.pow_(alpha)
row = mat._indices()[0, start:end]
col = mat._indices()[1, start:end]
pred = bias[row] + bias[col]
error = pred - torch.log(X)
step = -0.001 * f * error
bias.index_add_(0, torch.cat([row, col]), torch.cat([step, step]))
total_cost += 0.5 * (f * error * error).sum()
logging.info("Tune bias " + str(i + 1) + "\t" + str(start // batch + 1) + " / " + str((nnz + batch - 1) // batch) + "\t" + str(time.time() - begin) + "\r")
logging.info("Error: " + str(total_cost / nnz))
for i in range(iterations):
begin = time.time()
total_cost = 0.
for start in range(0, nnz, batch):
end = min(start + batch, nnz)
X = mat._values()[start:end]
f = X / xmax
f.clamp_(max=1)
f.pow_(alpha)
row = mat._indices()[0, start:end]
col = mat._indices()[1, start:end]
pred = (x[row, :] * x[col, :]).sum(1) + bias[row] + bias[col]
error = pred - torch.log(X)
step = -eta * f * error
dx = step.expand(dim, end - start).t().repeat(2, 1) * x[torch.cat([col, row]), :]
x.index_add_(0, torch.cat([row, col]), dx)
# bias.index_add_(0, torch.cat([row, col]), torch.cat([step, step]))
total_cost += 0.5 * (f * error * error).sum()
logging.info("Iteration " + str(i + 1) + "\t" + str(start // batch + 1) + " / " + str((nnz + batch - 1) // batch) + "\t" + str(time.time() - begin) + "\r")
logging.info("Iteration " + str(i + 1) + " took " + str(time.time() - begin))
logging.info("Error: " + str(total_cost / nnz))
return x, bias
def sparseSVD(mat, dim):
begin = time.time()
mat = mat.tocsc()
logging.info("CSC conversion took " + str(time.time() - begin))
begin = time.time()
u, s, v = sparsesvd.sparsesvd(mat, dim)
logging.info("Solving took " + str(time.time() - begin))
return torch.from_numpy(u.transpose())
| embedding-master | embedding/solver.py |
from __future__ import print_function, absolute_import
import torch
import numba
import numpy as np
import time
import sys
import argparse
import logging
import scipy
import scipy.sparse
import embedding.tensor_type as tensor_type
def synthetic(n, nnz):
"""This function generates a synthetic matrix."""
begin = time.time()
# TODO: distribute as power law?
# (closer to real distribution)
v = torch.abs(torch.randn([nnz]))
# TODO: make non-neg
v = v.type(torch.DoubleTensor)
ind = torch.rand(2, nnz) * torch.Tensor([n, n]).repeat(nnz, 1).transpose(0, 1)
# TODO: fix ind (only diag right now)
ind = ind.type(torch.LongTensor)
cooccurrence = torch.sparse.DoubleTensor(ind, v, torch.Size([n, n])).coalesce()
vocab = None
words = None
logger = logging.getLogger(__name__)
logger.info("Generating synthetic data: " + str(time.time() - begin))
return cooccurrence, vocab, words
def normalize(x, x0=None):
logger = logging.getLogger(__name__)
# TODO: is it necessary to reorder columns by magnitude
# TODO: more numerically stable implementation?
begin = time.time()
norm = torch.norm(x, 2, 0, True).squeeze()
logger.info(" ".join(["{:10.2f}".format(n) for n in norm]))
a = time.time()
_, perm = torch.sort(-norm)
norm = norm[perm]
x = x[:, perm]
if x0 is not None:
x0 = x0[:, perm]
logger.info("Permute time: " + str(time.time() - a))
try:
temp, r = torch.qr(x)
except RuntimeError as e:
logger.error("QR decomposition has run into a problem.\n"
"Older versions of pytoch had a memory leak in QR:\n"
" https://github.com/pytorch/pytorch/issues/3009\n"
"Updating PyTorch may fix this issue.\n"
"\n"
"This issue can also be avoided by running QR on CPU.\n"
"This can be enabled with the flag `--embedgpu false`\n"
)
raise e
if np.isnan(torch.sum(temp)):
# qr seems to occassionally be unstable and result in nan
logger.warn("QR decomposition resulted in NaNs\n"
"Normalizing, but not orthogonalizing")
# TODO: should a little bit of jitter be added to make qr succeed?
x = x.div(norm.expand_as(x))
if x0 is not None:
x0 = x0.div(norm.expand_as(x0))
else:
x = temp
if x0 is not None:
x0 = torch.mm(x0, torch.inverse(r))
logger.info("Normalizing took " + str(time.time() - begin))
return x, x0
def str2bool(v):
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
def mm(A, x, gpu=False):
logger = logging.getLogger(__name__)
if (type(A) == scipy.sparse.csr.csr_matrix or
type(A) == scipy.sparse.coo.coo_matrix or
type(A) == scipy.sparse.csc.csc_matrix):
return torch.from_numpy(A * x.numpy())
elif not (A.is_cuda or x.is_cuda or gpu):
# Data and computation on CPU
return torch.mm(A, x)
else:
# Compute on GPU, regardless of where data is
if A.is_cuda and x.is_cuda:
# Everything on GPU anyways, just multiply normally
# TODO: workaround for pytorch memory leak
return torch.mm(A, x)
else:
if (A.type() == "torch.sparse.FloatTensor" or
A.type() == "torch.cuda.sparse.FloatTensor"):
SparseTensor = torch.cuda.sparse.FloatTensor
elif (A.type() == "torch.sparse.DoubleTensor" or
A.type() == "torch.cuda.sparse.DoubleTensor"):
SparseTensor = torch.cuda.sparse.DoubleTensor
else:
raise NotImplementedError("Type of cooccurrence matrix (" + A.type() + ") is not recognized.")
n, dim = x.shape
nnz = A._nnz()
indices = A._indices().t()
values = A._values()
# TODO: GPU memory usage is actually about double this
# what's causing the extra usage?
# TODO: automate batch choice
GPU_MEMORY = 2 ** 30 # Amount of GPU memory to use
# TODO: automatically detect or cmd line
# Allocate half of memory to each part
A_MEM = GPU_MEMORY // 2
X_MEM = GPU_MEMORY // 2
A_elem_size = 4 + 4 + 8 # TODO: 8 for double right now -- use actual value
x_elem_size = n * 8 # TODO 8 for double right now
# TODO: warning if batch size is 0
A_batch_size = A_MEM // A_elem_size
x_batch_size = X_MEM // x_elem_size
A_batches = (nnz + A_batch_size - 1) // A_batch_size
x_batches = (dim + x_batch_size - 1) // x_batch_size
if A.is_cuda:
A_batches = 1
if x.is_cuda:
x_batches = 1
logger.debug("Coocurrence matrix using " + str(A_batches) + " batches")
logger.debug("Embedding using " + str(x_batches) + " batches")
newx = 0 * x
for i in range(A_batches):
if A.is_cuda:
sample = A
else:
start = i * nnz // A_batches
end = (i + 1) * nnz // A_batches
ind = indices[start:end, :]
val = values[start:end]
# TODO: resort to sync transfer if needed
try:
ind = ind.cuda(async=True)
val = val.cuda(async=True)
except RuntimeError as e:
# logging.warn("async transfer failed")
ind = ind.cuda()
val = val.cuda()
sample = SparseTensor(ind.t(), val, torch.Size([n, n]))
for j in range(x_batches):
print(str(i) + " / " + str(A_batches) + "\t" + str(j) + " / " + str(x_batches) + "\r", end="")
sys.stdout.flush()
if x.is_cuda:
newx = newx.addmm(sample, x)
else:
start = j * dim // x_batches
end = (j + 1) * dim // x_batches
cols = x[:, start:end]
try:
cols = cols.cuda(async=True)
except RuntimeError as e:
# logging.warn("async transfer failed")
cols = cols.cuda()
cols = torch.mm(sample, cols).cpu()
newx[:, start:end] += cols
print()
return newx
def sum_rows(A):
n = A.shape[0]
if A.is_cuda:
ones = tensor_type.to_dense(A.type())(n, 1)
ones.fill_(1)
return torch.mm(A, ones).squeeze(1)
else:
@numba.jit(nopython=True, cache=True)
def sr(n, ind, val):
nnz = val.shape[0]
ans = np.zeros(n, dtype=val.dtype)
for i in range(nnz):
ans[ind[0, i]] += val[i]
return ans
return tensor_type.to_dense(A.type())(sr(A.shape[0], A._indices().numpy(), A._values().numpy()))
# return torch.from_numpy(scipy.sparse.coo_matrix((A._values().numpy(), (A._indices()[0, :].numpy(), A._indices()[1, :].numpy())), shape=A.shape).sum(1)).squeeze()
def save_to_text(filename, embedding, words):
begin = time.time()
embedding = embedding.cpu()
n, dim = embedding.shape
with open(filename, "w") as f:
for i in range(n):
f.write(words[i] + " " + " ".join([str(embedding[i, j]) for j in range(dim)]) + "\n")
logging.getLogger(__name__).info("Saving embeddings: " + str(time.time() - begin))
def get_sampler(mat, batch, scheme="element", sequential=True):
n = mat.shape[0]
nnz = mat._nnz()
if mat.is_cuda:
t = torch.cuda
gpu = True
else:
t = torch
gpu = False
if scheme == "element":
batch = min(batch, nnz)
scale = nnz / float(batch)
else:
batch = min(batch, n)
scale = n / float(batch)
if sequential:
start = 0
while True:
end = start + batch
if scheme == "element":
elements = torch.arange(start, end).type(t.LongTensor) % nnz
start = end % nnz
elif scheme == "row":
row = mat._indices()[0, :]
# PyTorch doesn't seem to have element-wise logical operators
# * is equivalent to and
# + is equivalent to or
elements = (((start <= row) * (row < end)) +
((start <= row + n) * (row + n < end))).nonzero().squeeze()
start = end % n
elif scheme == "column":
col = mat._indices()[1, :]
elements = (((start <= col) * (col < end)) +
((start <= col + n) * (col + n < end))).nonzero().squeeze()
start = end % n
ind = mat._indices()[:, elements]
v = mat._values()[elements]
yield scale * type(mat)(ind, v, mat.shape)
else:
if scheme == "row" or scheme == "column":
mat = mat.cpu()
data = mat._values().numpy()
row = mat._indices()[0, :].numpy()
col = mat._indices()[1, :].numpy()
if scheme == "row":
m = scipy.sparse.csr_matrix((data, (row, col)), mat.shape)
if scheme == "column":
m = scipy.sparse.csc_matrix((data, (row, col)), mat.shape)
while True:
if scheme == "element":
# TODO: seems like theres no long random
elements = t.FloatTensor(n).uniform_(0, nnz).type(t.LongTensor)
ind = mat._indices()[:, elements]
v = mat._values()[elements]
yield scale * type(mat)(ind, v, mat.shape)
elif scheme == "row" or scheme == "column":
rc = np.random.randint(0, n, batch)
if scheme == "row":
sample = m[rc, :].tocoo()
row = rc[sample.row]
col = sample.col
else:
sample = m[:, rc].tocoo()
row = sample.row
col = rc[sample.col]
ind = torch.from_numpy(np.array([row, col])).type(torch.LongTensor)
v = torch.from_numpy(m[rc].data)
sample = scale * type(mat)(ind, v, mat.shape)
if gpu:
sample = sample.cuda()
yield sample
| embedding-master | embedding/util.py |
import torch
def to_cpu(tt):
tt = string2tt(tt)
assert(tt[0])
tt[0] = False
return eval(tt2string(tt))
def to_gpu(tt):
tt = string2tt(tt)
assert(not tt[0])
tt[0] = True
return eval(tt2string(tt))
def to_dense(tt):
tt = string2tt(tt)
assert(tt[1])
tt[1] = False
return eval(tt2string(tt))
def to_sparse(tt):
tt = string2tt(tt)
assert(not tt[1])
tt[1] = True
return eval(tt2string(tt))
def to_precision(tt, precision):
tt = string2tt(tt)
assert(precision[-6:] == "Tensor")
tt[2] = precision
return eval(tt2string(tt))
def string2tt(string):
if type(string) == type:
string = string().type()
assert(type(string) == str)
tt = [False, False, ""]
# tt[0]: on gpu (bool)
# tt[1]: is sparse(bool)
# tt[2]: precision (str)
string = string.split(".")
assert(string[0] == "torch")
string = string[1:]
if string[0] == "cuda":
string = string[1:]
tt[0] = True
if string[0] == "sparse":
string = string[1:]
tt[1] = True
assert(len(string) == 1)
assert(string[0][-6:] == "Tensor")
tt[2] = string[0]
return tt
def tt2string(tt):
assert(len(tt) == 3)
assert(type(tt[0]) == bool)
assert(type(tt[1]) == bool)
assert(type(tt[2]) == str)
string = "torch."
if tt[0]:
string += "cuda."
if tt[1]:
string += "sparse."
string += tt[2]
return string
| embedding-master | embedding/tensor_type.py |
from .main import Embedding
from .main import main
from .evaluate import evaluate
from .__version__ import __version__
__all__ = ('solver')
from .logging_config import init_logging
init_logging()
| embedding-master | embedding/__init__.py |
__version__ = "0.0"
| embedding-master | embedding/__version__.py |
from __future__ import print_function, absolute_import
import argparse
import embedding.util as util
from embedding.__version__ import __version__
def get_parser():
parser = argparse.ArgumentParser(description="Tools for embeddings.")
# Add version to parser
parser.add_argument("-v", "--version",
action='version',
version="%(prog)s " + __version__,
help="Print version number.")
# Add verbosity level
parser.add_argument("--logging", type=str.upper, default="INFO",
choices=["CRITICAL", "ERROR", "WARNING", "INFO", "DEBUG"],
help="Select logging verbosity.")
subparser = parser.add_subparsers(dest="task")
# Cooccurrence parser
cooccurrence_parser = subparser.add_parser("cooccurrence", help="Preprocessing (compute vocab and cooccurrence from text).")
cooccurrence_parser.add_argument("text", type=str, nargs="?", default="text", help="filename of text file")
# Compute parser
compute_parser = subparser.add_parser("compute", help="Compute embedding from scratch via cooccurrence matrix.")
compute_parser.add_argument("-d", "--dim", type=int, default=50,
help="dimension of embedding")
compute_parser.add_argument("--vocab", type=str, default="vocab.txt",
help="filename of vocabulary file")
compute_parser.add_argument("-c", "--cooccurrence", type=str, default="cooccurrence.bin",
help="filename of cooccurrence binary")
compute_parser.add_argument("--initial", type=str, default=None,
help="filename of initial embedding vectors")
compute_parser.add_argument("--initialbias", type=str, default=None,
help="filename of initial bias")
compute_parser.add_argument("-o", "--vectors", type=str, default="vectors.txt",
help="filename for embedding vectors output")
compute_parser.add_argument("--bias", type=str, default="bias.txt",
help="filename for bias output")
compute_parser.add_argument("--checkpoint", type=int, default=0,
help="frequency of saving intermediate computations (0 to turn off)")
compute_parser.add_argument("-p", "--preprocessing", type=str.lower, default="ppmi",
choices=["none", "log1p", "ppmi"],
help="Preprocessing of cooccurrence matrix before eigenvector computation")
compute_parser.add_argument("--negative", type=float, default=1.,
help="Number of negative samples (for shifted PMI)")
compute_parser.add_argument("--alpha", type=float, default=1.,
help="Context distribution smoothing parameter")
compute_parser.add_argument("-s", "--solver", type=str.lower, default="pi",
choices=["pi", "alecton", "vr", "sgd", "glove", "sparsesvd", "gemsim"],
help="Solver used to find top eigenvectors")
compute_parser.add_argument("-i", "--iterations", type=int, default=50,
help="Iterations used by solver")
compute_parser.add_argument("-e", "--eta", "--step", type=float, default=1e-3,
help="Learning rate used by solver")
compute_parser.add_argument("-m", "--momentum", "--beta", type=float, default=0.,
help="Momentum used by solver")
compute_parser.add_argument("-f", "--normfreq", type=int, default=1,
help="Normalization frequency used by solver")
compute_parser.add_argument("-j", "--innerloop", type=int, default=10,
help="Inner loop iterations used by solver")
compute_parser.add_argument("-b", "--batch", type=int, default=100000,
help="Batch size used by solver")
compute_parser.add_argument("--scheme", type=str.lower, default="element",
choices=["element", "column", "row"],
help="Sampling scheme")
compute_parser.add_argument("--sequential", type=bool, default=True,
help="Whether or not to sample in order")
compute_parser.add_argument("--scale", type=float, default=0.5,
help="Scale on eigenvector is $\lambda_i ^ s$")
compute_parser.add_argument("-n", "--normalize", type=util.str2bool, default=False,
help="Toggle to normalize embeddings")
compute_parser.add_argument("-g", "--gpu", type=util.str2bool, default=True,
help="Toggle to use GPU for computations")
compute_parser.add_argument("--matgpu", type=util.str2bool, default=None,
help="Toggle to store cooccurrence matrix on GPU")
compute_parser.add_argument("--embedgpu", type=util.str2bool, default=None,
help="Toggle to store embeddings on GPU")
compute_parser.add_argument("--precision", type=str.lower, default="float",
choices=["float", "double"],
help="Precision of values")
# Evaluate parser
evaluate_parser = subparser.add_parser("evaluate", help="Evaluate performance of an embedding on standard tasks.")
evaluate_parser.add_argument('--vocab', type=str, default='vocab.txt',
help="filename of vocabulary file")
evaluate_parser.add_argument('--vectors', type=str, default='vectors.txt',
help="filename of embedding vectors file")
return parser
| embedding-master | embedding/parser.py |
from __future__ import print_function, absolute_import
import os
import argparse
import numpy as np
import scipy.stats
import logging
def evaluate(words, vectors):
# TODO: give option to just pass in vocab and vectors (not filename)
if type(words) == str:
with open(words, 'r') as f:
words = [x.rstrip().split(' ')[0] for x in f.readlines()]
if type(vectors) == str:
with open(vectors, 'r') as f:
vectors = {}
for line in f:
vals = line.rstrip().split(' ')
vectors[vals[0]] = [float(x) for x in vals[1:]]
vocab_size = len(words)
vocab = {w: idx for idx, w in enumerate(words)}
ivocab = {idx: w for idx, w in enumerate(words)}
vector_dim = len(vectors[ivocab[0]])
W = np.zeros((vocab_size, vector_dim))
for word, v in vectors.items():
if word == '<unk>':
continue
W[vocab[word], :] = v
# normalize each word vector to unit variance
W_norm = np.zeros(W.shape)
d = (np.sum(W ** 2, 1) ** (0.5))
W_norm = (W.T / d).T
score = {}
# evaluate_human_sim()
score["similarity"] = evaluate_vectors_sim(W, vocab, ivocab)
score["analogy-add"] = evaluate_vectors_analogy(W_norm, vocab, ivocab, "add")
score["analogy-mul"] = evaluate_vectors_analogy(W_norm, vocab, ivocab, "mul")
return score
def evaluate_vectors_analogy(W, vocab, ivocab, method="add"):
"""Evaluate the trained word vectors on a variety of tasks"""
logger = logging.getLogger(__name__)
logger.info("Analogy Task")
filenames = [
'capital-common-countries.txt', 'capital-world.txt', 'currency.txt',
'city-in-state.txt', 'family.txt', 'gram1-adjective-to-adverb.txt',
'gram2-opposite.txt', 'gram3-comparative.txt', 'gram4-superlative.txt',
'gram5-present-participle.txt', 'gram6-nationality-adjective.txt',
'gram7-past-tense.txt', 'gram8-plural.txt', 'gram9-plural-verbs.txt',
]
prefix = os.path.join(os.path.dirname(__file__), "data", "eval", "question-data")
# to avoid memory overflow, could be increased/decreased
# depending on system and vocab size
split_size = 100
correct_sem = 0 # count correct semantic questions
correct_syn = 0 # count correct syntactic questions
correct_tot = 0 # count correct questions
count_sem = 0 # count all semantic questions
count_syn = 0 # count all syntactic questions
count_tot = 0 # count all questions
full_count = 0 # count all questions, including those with unknown words
for i in range(len(filenames)):
with open('%s/%s' % (prefix, filenames[i]), 'r') as f:
full_data = [line.rstrip().split(' ') for line in f]
full_count += len(full_data)
data = [x for x in full_data if all(word in vocab for word in x)]
indices = np.array([[vocab[word] for word in row] for row in data])
ind1, ind2, ind3, ind4 = indices.T
predictions = np.zeros((len(indices),))
num_iter = int(np.ceil(len(indices) / float(split_size)))
for j in range(num_iter):
subset = np.arange(j * split_size, min((j + 1) * split_size, len(ind1)))
if method == "add":
pred_vec = (W[ind2[subset], :] - W[ind1[subset], :] + W[ind3[subset], :])
# cosine similarity if input W has been normalized
dist = np.dot(W, pred_vec.T)
elif method == "mul":
# This is 3CosMul from
# Linguistic Regularities in Sparse and Explicit Word Representations
epsilon = 0.001
# cosine similarity if input W has been normalized
cos_a = (np.dot(W, W[ind1[subset], :].T) + 1) / 2
cos_as = (np.dot(W, W[ind2[subset], :].T) + 1) / 2
cos_b = (np.dot(W, W[ind3[subset], :].T) + 1) / 2
dist = cos_as * cos_b / (cos_a + epsilon)
else:
raise NotImplementedError("Method \"" + method + "\" for analogy task not recognized.")
for k in range(len(subset)):
dist[ind1[subset[k]], k] = -np.Inf
dist[ind2[subset[k]], k] = -np.Inf
dist[ind3[subset[k]], k] = -np.Inf
# predicted word index
predictions[subset] = np.argmax(dist, 0).flatten()
val = (ind4 == predictions) # correct predictions
count_tot = count_tot + len(ind1)
correct_tot = correct_tot + sum(val)
if i < 5:
count_sem = count_sem + len(ind1)
correct_sem = correct_sem + sum(val)
else:
count_syn = count_syn + len(ind1)
correct_syn = correct_syn + sum(val)
logger.info(" %s:" % filenames[i][:-4])
logger.info(' ACCURACY TOP1: %.2f%% (%d/%d)' %
(np.mean(val) * 100, np.sum(val), len(val)))
logger.info(' Questions seen/total: %.2f%% (%d/%d)' %
(100 * count_tot / float(full_count), count_tot, full_count))
logger.info(' Semantic accuracy: %.2f%% (%i/%i)' %
(100 * correct_sem / float(count_sem), correct_sem, count_sem))
logger.info(' Syntactic accuracy: %.2f%% (%i/%i)' %
(100 * correct_syn / float(count_syn), correct_syn, count_syn))
logger.info('Total accuracy: %.2f%% (%i/%i)\n' % (100 * correct_tot / float(count_tot), correct_tot, count_tot))
return correct_tot / float(count_tot)
def evaluate_vectors_sim(W, vocab, ivocab):
"""Evaluate the trained word vectors on the WordSimilarity-353 task."""
filename = 'combined.csv'
# filename = 'set1.csv'
filename = os.path.join(os.path.dirname(__file__), "data", "eval", "wordsim353", filename)
with open(filename, 'r') as f:
data = [line.rstrip().split(',') for line in f][1:]
# TODO: include cases where words are missing
data = [row for row in data if (row[0] in vocab and row[1] in vocab)]
words = np.array([[vocab[row[0]], vocab[row[1]]] for row in data])
score = np.array([float(row[2]) for row in data])
pred = np.sum(np.multiply(W[words[:, 0], :], W[words[:, 1], :]), 1)
rho_dot, p = scipy.stats.spearmanr(score, pred)
logger = logging.getLogger(__name__)
logger.info("WordSimilarity-353 Spearman Correlation (dot): %.3f\n" % rho_dot)
pred = np.sum(np.multiply(W[words[:, 0], :], W[words[:, 1], :]), 1) / np.sum(np.multiply(W[words[:, 0], :], W[words[:, 0], :]), 1) / np.sum(np.multiply(W[words[:, 1], :], W[words[:, 1], :]), 1)
rho_cos, p = scipy.stats.spearmanr(score, pred)
logger = logging.getLogger(__name__)
logger.info("WordSimilarity-353 Spearman Correlation (cos): %.3f\n" % rho_cos)
return rho_dot, rho_cos
def evaluate_human_sim():
"""Evaluate the trained word vectors on the WordSimilarity-353 task."""
filename = 'set1.csv'
filename = os.path.join(os.path.dirname(__file__), "data", "eval", "wordsim353", filename)
with open(filename, 'r') as f:
data = [line.rstrip().split(',') for line in f][1:]
# TODO: include cases where words are missing
mean = np.array([float(row[2]) for row in data])
score = np.array([[float(row[i]) for i in range(3, len(row))] for row in data])
n, m = score.shape
trials = 100
total = 0.
for i in range(trials):
group = np.zeros(m, np.bool)
group[np.random.choice(m, m / 2, False)] = True
score1 = np.mean(score[:, group], 1)
score2 = np.mean(score[:, np.invert(group)], 1)
rho, p = scipy.stats.spearmanr(score1, score2)
total += rho
logger = logging.getLogger(__name__)
logger.info("Human WordSimilarity-353 Spearman Correlation: %.3f\n" % (total / trials))
return total / trials
| embedding-master | embedding/evaluate.py |
from __future__ import print_function, absolute_import
import torch
import numpy as np
import time
import os
import struct
import argparse
import sys
import subprocess
import math
import logging
import pandas
import collections
import scipy
import embedding.solver as solver
import embedding.util as util
import embedding.evaluate as evaluate
import embedding.tensor_type as tensor_type
import embedding.parser as parser
import embedding.logging_config as logging_config
def main(argv=None):
# Parse command line arguments
args = parser.get_parser().parse_args(argv)
# Set up logging for package
logging_config.init_logging(args.logging)
logger = logging.getLogger(__name__)
logger.debug(args)
if args.task == "cooccurrence":
subprocess.call([os.path.join(os.path.dirname(__file__), "..", "cooccurrence.sh"), args.text])
elif args.task == "compute":
if args.gpu and not torch.cuda.is_available():
logger.warn("GPU use requested, but GPU not available. "
"Toggling off GPU use.")
args.gpu = False
args.matgpu = False
args.embedgpu = False
if args.gpu and args.solver == "sparsesvd":
logger.warn("SparseSVD is not implemented for GPU. "
"Toggling off GPU use.")
args.gpu = False
args.matgpu = False
args.embedgpu = False
if args.solver == "glove" and args.preprocessing != "none":
logger.warn("GloVe only behaves properly with no preprocessing. "
"Turning off preprocessing.")
args.preprocessing = "none"
CpuTensor = torch.FloatTensor
if args.precision == "float":
CpuTensor = torch.FloatTensor
elif args.precision == "double":
CpuTensor = torch.DoubleTensor
else:
logger.warn("Precision \"" + args.precision + "\" is not recognized. "
"Defaulting to \"float\".")
embedding = Embedding(args.dim, args.gpu, args.matgpu, args.embedgpu, CpuTensor)
embedding.load_cooccurrence(args.vocab, args.cooccurrence, args.preprocessing, args.negative, args.alpha)
embedding.load_vectors(args.initial, args.initialbias)
embedding.solve(mode=args.solver, gpu=args.gpu, scale=args.scale, normalize=args.normalize, iterations=args.iterations, eta=args.eta, momentum=args.momentum, normfreq=args.normfreq, innerloop=args.innerloop, batch=args.batch, scheme=args.scheme, sequential=args.sequential, checkpoint_every=args.checkpoint, checkpoint_root=args.vectors)
embedding.save_to_text(args.vectors)
elif args.task == "evaluate":
evaluate.evaluate(args.vocab, args.vectors)
class Embedding(object):
def __init__(self, dim=50, gpu=True, matgpu=None, embedgpu=None, CpuTensor=torch.FloatTensor):
self.dim = dim
self.gpu = gpu
# TODO: add warning for storage on gpu when computation is on cpu
# TODO: swap off storage if too much memory
if matgpu is None:
matgpu = gpu
if embedgpu is None:
embedgpu = gpu
self.matgpu = matgpu
self.embedgpu = embedgpu
self.CpuTensor = CpuTensor
self.logger = logging.getLogger(__name__)
def load_cooccurrence(self, vocab_file="vocab.txt", cooccurrence_file="cooccurrence.bin", preprocessing="none", negative=1., alpha=1.):
begin = time.time()
if True: # TODO
# Load vocab (words and counts)
def parse_line(l):
l = l.split()
assert(len(l) == 2)
return l[0], int(l[1])
with open(vocab_file) as f:
lines = [parse_line(l) for l in f]
self.words = [l[0] for l in lines]
self.vocab = self.CpuTensor([l[1] for l in lines])
self.n = self.vocab.size()[0]
self.logger.info("Distinct Words: " + str(self.n))
# Load cooccurrence matrix
filesize = os.stat(cooccurrence_file).st_size
assert(filesize % 16 == 0)
nnz = filesize // 16
self.logger.info("Number of non-zeros: " + str(nnz))
dt = np.dtype([("ind", "2<i4"), ("val", "<d")])
data = np.fromfile(cooccurrence_file, dtype=dt)
ind = torch.IntTensor(data["ind"].transpose()).type(torch.LongTensor) - 1
val = self.CpuTensor(data["val"])
self.mat = tensor_type.to_sparse(self.CpuTensor)(ind, val, torch.Size([self.n, self.n]))
# TODO: coalescing is very slow, and the cooccurrence matrix is
# almost always coalesced, but this might not be safe
# self.cooccurrence = self.cooccurrence.coalesce()
self.logger.info("Loading cooccurrence matrix took " + str(time.time() - begin))
# Preprocess cooccurrence matrix
self.preprocessing(preprocessing, negative, alpha)
if not self.gpu:
begin = time.time()
self.mat = scipy.sparse.csr_matrix((self.mat._values().numpy(), (self.mat._indices()[0, :].numpy(), self.mat._indices()[1, :].numpy())), shape=(self.n, self.n))
self.logger.info("CSR conversion took " + str(time.time() - begin))
# TODO: dump to file
else:
pass # TODO: load from file
def load_vectors(self, initial_vectors=None, initial_bias=None):
# TODO: move into load
if initial_vectors is None:
begin = time.time()
# TODO: this initialization is really bad for sgd and glove
# Older versions of PyTorch do not support random_ on GPU
# self.embedding = tensor_type.to_gpu(self.CpuTensor)(self.n, self.dim)
# self.embedding.random_(2)
self.embedding = self.CpuTensor(self.n, self.dim)
self.embedding.random_(2)
if self.embedgpu:
try:
self.embedding = self.embedding.cuda()
except RuntimeError as e:
self.logger.warn("Embeddings do not fit on GPU. Storing on CPU instead.")
self.embedgpu = False
self.logger.info("Random initialization took " + str(time.time() - begin))
self.embedding, _ = util.normalize(self.embedding)
else:
# TODO: verify that the vectors have the right set of words
# verify that the vectors have a matching dim
begin = time.time()
# TODO: select proper precision
dtype = collections.defaultdict(lambda: self.CpuTensor().numpy().dtype)
dtype[0] = str
self.embedding = pandas.read_csv(initial_vectors, sep=" ", header=None, dtype=dtype).iloc[:, 1:].as_matrix()
if self.embedgpu:
self.embedding = tensor_type.to_gpu(self.CpuTensor)(self.embedding)
else:
self.embedding = self.CpuTensor(self.embedding)
self.logger.info("Loading initial vectors took " + str(time.time() - begin))
if self.gpu and not self.embedgpu:
self.embedding = self.embedding.t().pin_memory().t()
if initial_bias is not None:
# TODO: merge this with init bias in glove
# TODO: verify that the biases have the right set of words
begin = time.time()
# TODO: select proper precision
dtype = collections.defaultdict(lambda: self.CpuTensor().numpy().dtype)
dtype[0] = str
self.bias = pandas.read_csv(initial_bias, sep=" ", header=None, dtype=dtype).iloc[:, 1].as_matrix()
if self.embedgpu: # TODO: own flag?
self.bias = tensor_type.to_gpu(self.CpuTensor)(self.bias)
else:
self.bias = self.CpuTensor(self.bias)
self.logger.info("Loading initial biases took " + str(time.time() - begin))
else:
self.bias = None
def preprocessing(self, mode="ppmi", negative=1., alpha=1.):
begin = time.time()
if self.matgpu:
try:
self.mat = self.mat.cuda()
logging.debug("Copying coocurrence to GPU took " + str(time.time() - begin))
except RuntimeError as e:
self.logger.warn("Cooccurrence matrix does not fit on GPU. Storing on CPU instead.")
self.matgpu = False
if mode == "none":
pass
elif mode == "log1p":
self.mat._values().log1p_()
elif mode == "ppmi":
s = time.time()
wc = util.sum_rows(self.mat)
logging.debug("Summing rows took " + str(time.time() - s)); s = time.time()
D = torch.sum(wc.pow(alpha)) # total dictionary size
logging.debug("Computing D took " + str(time.time() - s)); s = time.time()
# TODO: pytorch doesn't seem to only allow indexing by 2D tensor
wc0 = wc[self.mat._indices()[0, :]].squeeze()
wc1 = wc[self.mat._indices()[1, :]].squeeze()
logging.debug("Getting word counts took " + str(time.time() - s)); s = time.time()
ind = self.mat._indices()
v = self.mat._values()
nnz = v.shape[0]
v = torch.log(v) + (math.log(D) - math.log(negative)) - torch.log(wc0) - alpha * torch.log(wc1)
logging.debug("Computing PMI took " + str(time.time() - s)); s = time.time()
v = v.clamp(min=0)
logging.debug("Clamping took " + str(time.time() - s)); s = time.time()
if self.mat.is_cuda:
# This code is able to run on CPU, but is very slow
# Currently is not worth the processing time
# TODO: speed this up on CPU
keep = v.nonzero().squeeze(1)
logging.debug("Finding non-zeros took " + str(time.time() - s)); s = time.time()
if keep.shape[0] != v.shape[0]:
ind = ind[:, keep]
v = v[keep]
self.logger.info("nnz after ppmi processing: " + str(keep.shape[0]))
self.mat = type(self.mat)(ind, v, torch.Size([self.n, self.n]))
logging.debug("Filtering non-zeros took " + str(time.time() - s)); s = time.time()
# self.mat = self.mat.coalesce()
if self.gpu and not self.matgpu:
s = time.time()
ind = self.mat._indices().t().pin_memory().t()
v = self.mat._values().pin_memory()
self.mat = tensor_type.to_sparse(self.CpuTensor)(ind, v, torch.Size([self.n, self.n]))
logging.debug("Pinning cooccurrence matrix took " + str(time.time() - s))
self.logger.info("Preprocessing took " + str(time.time() - begin))
def solve(self, mode="pi", gpu=True, scale=0.5, normalize=True, iterations=50, eta=1e-3, momentum=0., normfreq=1, innerloop=10, batch=100000, scheme="element", sequential=True, checkpoint_every=0, checkpoint_root=""):
if momentum == 0.:
prev = None
else:
if self.embedding.is_cuda:
prev = tensor_type.to_gpu(self.CpuTensor)(self.n, self.dim)
else:
prev = self.CpuTensor(self.n, self.dim)
prev.zero_()
if checkpoint_root[-4:] == ".txt":
checkpoint_root = checkpoint_root[:-4]
def checkpoint(x, i):
if checkpoint_every > 0 and (i + 1) % checkpoint_every == 0:
util.save_to_text(checkpoint_root + "." + str(i + 1) + ".txt", x, self.words)
if (mode == "alecton" or
mode == "vr" or
mode == "sgd"):
if (type(self.mat) == scipy.sparse.csr.csr_matrix or
type(self.mat) == scipy.sparse.coo.coo_matrix or
type(self.mat) == scipy.sparse.csc.csc_matrix):
self.mat = self.mat.tocoo()
ind = torch.from_numpy(np.array([self.mat.row, self.mat.col])).type(torch.LongTensor)
val = self.CpuTensor(self.mat.data)
self.mat = tensor_type.to_sparse(self.CpuTensor)(ind, val, torch.Size(self.mat.shape))
sample = util.get_sampler(self.mat, batch, scheme, sequential)
if mode == "pi":
self.embedding, _ = solver.power_iteration(self.mat, self.embedding, x0=prev, iterations=iterations, beta=momentum, norm_freq=normfreq, gpu=gpu, checkpoint=checkpoint)
elif mode == "alecton":
self.embedding = solver.alecton(self.mat, self.embedding, iterations=iterations, eta=eta, norm_freq=normfreq, sample=sample, gpu=gpu, checkpoint=checkpoint)
elif mode == "vr":
self.embedding, _ = solver.vr(self.mat, self.embedding, x0=prev, iterations=iterations, beta=momentum, norm_freq=normfreq, batch=batch, innerloop=innerloop)
elif mode == "sgd":
self.embedding = solver.sgd(self.mat, self.embedding, iterations=iterations, eta=eta, batch=batch)
elif mode == "glove":
# TODO: fix defaults
# scale = 0
self.embedding, bias = solver.glove(self.mat, self.embedding, bias=self.bias, iterations=iterations, eta=eta, batch=batch)
elif mode == "sparsesvd":
self.embedding = solver.sparseSVD(self.mat, self.dim)
self.scale(scale)
if normalize:
self.normalize_embeddings()
if self.embedding.is_cuda:
begin = time.time()
self.embedding = self.embedding.cpu()
self.logger.info("CPU Loading: " + str(time.time() - begin))
def scale(self, p=1.):
if p != 0:
# TODO: Assumes that matrix is normalized.
begin = time.time()
# TODO: faster estimation of eigenvalues?
temp = util.mm(self.mat, self.embedding, self.gpu)
norm = torch.norm(temp, 2, 0, True).squeeze()
self.logger.info(" ".join(["{:10.2f}".format(n) for n in norm]))
norm = norm.pow(p)
self.embedding = self.embedding.mul(norm.expand_as(self.embedding))
self.logger.info("Final scaling: " + str(time.time() - begin))
def normalize_embeddings(self):
norm = torch.norm(self.embedding, 2, 1, True)
self.embedding = self.embedding.div(norm.expand_as(self.embedding))
def evaluate(self):
embedding = self.embedding
if embedding.is_cuda:
embedding = embedding.cpu()
embedding = embedding.numpy()
return evaluate.evaluate(self.words, {self.words[i]: embedding[i, :] for i in range(len(self.words))})
def save_to_text(self, filename):
util.save_to_text(filename, self.embedding, self.words)
if __name__ == "__main__":
main(sys.argv)
| embedding-master | embedding/main.py |
"""Script to allow code to run via command line."""
import sys
from .main import main
main(sys.argv[1:])
| embedding-master | embedding/__main__.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under Creative Commons-Non Commercial 4.0 found in the
# LICENSE file in the root directory of this source tree.
import os
from pathlib import Path
SRC_ROOT = Path(os.path.dirname(os.path.realpath(__file__)))
PRO_ROOT = SRC_ROOT.parent
if __name__ == '__main__':
pass
| anli-main | src/config.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under Creative Commons-Non Commercial 4.0 found in the
# LICENSE file in the root directory of this source tree.
import argparse
from pathlib import Path
from torch.optim import Adam
from transformers import RobertaTokenizer, RobertaForSequenceClassification
from transformers import XLNetTokenizer, XLNetForSequenceClassification
# from transformers import XLNetTokenizer
# from modeling.dummy_modeling_xlnet import XLNetForSequenceClassification
from transformers import BertTokenizer, BertForSequenceClassification
from transformers import AlbertTokenizer, AlbertForSequenceClassification
from transformers import DistilBertTokenizer, DistilBertForSequenceClassification
from transformers import BartTokenizer, BartForSequenceClassification
from transformers import ElectraTokenizer, ElectraForSequenceClassification
from torch.utils.data import Dataset, DataLoader, DistributedSampler, RandomSampler, SequentialSampler
import config
from transformers import AdamW
from transformers import get_linear_schedule_with_warmup
from flint.data_utils.batchbuilder import BaseBatchBuilder, move_to_device
from flint.data_utils.fields import RawFlintField, LabelFlintField, ArrayIndexFlintField
from modeling.res_encoder import ResEncoder, EmptyScheduler, BagOfWords
from utils import common, list_dict_data_tool, save_tool
import os
import torch.multiprocessing as mp
import torch.distributed as dist
import torch.nn as nn
import numpy as np
import random
import torch
from tqdm import tqdm
import math
import copy
import pprint
pp = pprint.PrettyPrinter(indent=2)
# from fairseq.data.data_utils import collate_tokens
MODEL_CLASSES = {
"lstm-resencoder": {
"model_name": "bert-large-uncased",
"tokenizer": BertTokenizer,
"sequence_classification": BertForSequenceClassification,
# "padding_token_value": 0,
"padding_segement_value": 0,
"padding_att_value": 0,
"do_lower_case": True,
},
"bag-of-words": {
"model_name": "bert-large-uncased",
"tokenizer": BertTokenizer,
"sequence_classification": BertForSequenceClassification,
# "padding_token_value": 0,
"padding_segement_value": 0,
"padding_att_value": 0,
"do_lower_case": True,
},
"bert-base": {
"model_name": "bert-base-uncased",
"tokenizer": BertTokenizer,
"sequence_classification": BertForSequenceClassification,
# "padding_token_value": 0,
"padding_segement_value": 0,
"padding_att_value": 0,
"do_lower_case": True,
},
"bert-large": {
"model_name": "bert-large-uncased",
"tokenizer": BertTokenizer,
"sequence_classification": BertForSequenceClassification,
# "padding_token_value": 0,
"padding_segement_value": 0,
"padding_att_value": 0,
"do_lower_case": True,
"internal_model_name": "bert",
'insight_supported': True,
},
"xlnet-base": {
"model_name": "xlnet-base-cased",
"tokenizer": XLNetTokenizer,
"sequence_classification": XLNetForSequenceClassification,
# "padding_token_value": 0,
"padding_segement_value": 4,
"padding_att_value": 0,
"left_pad": True,
"internal_model_name": ["transformer", "word_embedding"],
},
"xlnet-large": {
"model_name": "xlnet-large-cased",
"tokenizer": XLNetTokenizer,
"sequence_classification": XLNetForSequenceClassification,
"padding_segement_value": 4,
"padding_att_value": 0,
"left_pad": True,
"internal_model_name": ["transformer", "word_embedding"],
'insight_supported': True,
},
"roberta-base": {
"model_name": "roberta-base",
"tokenizer": RobertaTokenizer,
"sequence_classification": RobertaForSequenceClassification,
"padding_segement_value": 0,
"padding_att_value": 0,
"internal_model_name": "roberta",
'insight_supported': True,
},
"roberta-large": {
"model_name": "roberta-large",
"tokenizer": RobertaTokenizer,
"sequence_classification": RobertaForSequenceClassification,
"padding_segement_value": 0,
"padding_att_value": 0,
"internal_model_name": "roberta",
'insight_supported': True,
},
"albert-xxlarge": {
"model_name": "albert-xxlarge-v2",
"tokenizer": AlbertTokenizer,
"sequence_classification": AlbertForSequenceClassification,
"padding_segement_value": 0,
"padding_att_value": 0,
"do_lower_case": True,
"internal_model_name": "albert",
'insight_supported': True,
},
"distilbert": {
"model_name": "distilbert-base-cased",
"tokenizer": DistilBertTokenizer,
"sequence_classification": DistilBertForSequenceClassification,
"padding_segement_value": 0,
"padding_att_value": 0,
},
"bart-large": {
"model_name": "facebook/bart-large",
"tokenizer": BartTokenizer,
"sequence_classification": BartForSequenceClassification,
"padding_segement_value": 0,
"padding_att_value": 0,
"internal_model_name": ["model", "encoder", "embed_tokens"],
'insight_supported': True,
},
"electra-base": {
"model_name": "google/electra-base-discriminator",
"tokenizer": ElectraTokenizer,
"sequence_classification": ElectraForSequenceClassification,
"padding_segement_value": 0,
"padding_att_value": 0,
"internal_model_name": "electra",
'insight_supported': True,
},
"electra-large": {
"model_name": "google/electra-large-discriminator",
"tokenizer": ElectraTokenizer,
"sequence_classification": ElectraForSequenceClassification,
"padding_segement_value": 0,
"padding_att_value": 0,
"internal_model_name": "electra",
'insight_supported': True,
}
}
registered_path = {
'snli_train': config.PRO_ROOT / "data/build/snli/train.jsonl",
'snli_dev': config.PRO_ROOT / "data/build/snli/dev.jsonl",
'snli_test': config.PRO_ROOT / "data/build/snli/test.jsonl",
'mnli_train': config.PRO_ROOT / "data/build/mnli/train.jsonl",
'mnli_m_dev': config.PRO_ROOT / "data/build/mnli/m_dev.jsonl",
'mnli_mm_dev': config.PRO_ROOT / "data/build/mnli/mm_dev.jsonl",
'fever_train': config.PRO_ROOT / "data/build/fever_nli/train.jsonl",
'fever_dev': config.PRO_ROOT / "data/build/fever_nli/dev.jsonl",
'fever_test': config.PRO_ROOT / "data/build/fever_nli/test.jsonl",
'anli_r1_train': config.PRO_ROOT / "data/build/anli/r1/train.jsonl",
'anli_r1_dev': config.PRO_ROOT / "data/build/anli/r1/dev.jsonl",
'anli_r1_test': config.PRO_ROOT / "data/build/anli/r1/test.jsonl",
'anli_r2_train': config.PRO_ROOT / "data/build/anli/r2/train.jsonl",
'anli_r2_dev': config.PRO_ROOT / "data/build/anli/r2/dev.jsonl",
'anli_r2_test': config.PRO_ROOT / "data/build/anli/r2/test.jsonl",
'anli_r3_train': config.PRO_ROOT / "data/build/anli/r3/train.jsonl",
'anli_r3_dev': config.PRO_ROOT / "data/build/anli/r3/dev.jsonl",
'anli_r3_test': config.PRO_ROOT / "data/build/anli/r3/test.jsonl",
}
nli_label2index = {
'e': 0,
'n': 1,
'c': 2,
'h': -1,
}
def set_seed(seed):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
class NLIDataset(Dataset):
def __init__(self, data_list, transform) -> None:
super().__init__()
self.d_list = data_list
self.len = len(self.d_list)
self.transform = transform
def __getitem__(self, index: int):
return self.transform(self.d_list[index])
# you should write schema for each of the input elements
def __len__(self) -> int:
return self.len
class NLITransform(object):
def __init__(self, model_name, tokenizer, max_length=None):
self.model_name = model_name
self.tokenizer = tokenizer
self.max_length = max_length
def __call__(self, sample):
processed_sample = dict()
processed_sample['uid'] = sample['uid']
processed_sample['gold_label'] = sample['label']
processed_sample['y'] = nli_label2index[sample['label']]
# premise: str = sample['premise']
premise: str = sample['context'] if 'context' in sample else sample['premise']
hypothesis: str = sample['hypothesis']
if premise.strip() == '':
premise = 'empty'
if hypothesis.strip() == '':
hypothesis = 'empty'
tokenized_input_seq_pair = self.tokenizer.encode_plus(premise, hypothesis,
max_length=self.max_length,
return_token_type_ids=True, truncation=True)
processed_sample.update(tokenized_input_seq_pair)
return processed_sample
def build_eval_dataset_loader_and_sampler(d_list, data_transformer, batching_schema, batch_size_per_gpu_eval):
d_dataset = NLIDataset(d_list, data_transformer)
d_sampler = SequentialSampler(d_dataset)
d_dataloader = DataLoader(dataset=d_dataset,
batch_size=batch_size_per_gpu_eval,
shuffle=False, #
num_workers=0,
pin_memory=True,
sampler=d_sampler,
collate_fn=BaseBatchBuilder(batching_schema)) #
return d_dataset, d_sampler, d_dataloader
def sample_data_list(d_list, ratio):
if ratio <= 0:
raise ValueError("Invalid training weight ratio. Please change --train_weights.")
upper_int = int(math.ceil(ratio))
if upper_int == 1:
return d_list # if ratio is 1 then we just return the data list
else:
sampled_d_list = []
for _ in range(upper_int):
sampled_d_list.extend(copy.deepcopy(d_list))
if np.isclose(ratio, upper_int):
return sampled_d_list
else:
sampled_length = int(ratio * len(d_list))
random.shuffle(sampled_d_list)
return sampled_d_list[:sampled_length]
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--cpu", action="store_true", help="If set, we only use CPU.")
parser.add_argument("--single_gpu", action="store_true", help="If set, we only use single GPU.")
parser.add_argument("--fp16", action="store_true", help="If set, we will use fp16.")
parser.add_argument(
"--fp16_opt_level",
type=str,
default="O1",
help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html",
)
# environment arguments
parser.add_argument('-s', '--seed', default=1, type=int, metavar='N',
help='manual random seed')
parser.add_argument('-n', '--num_nodes', default=1, type=int, metavar='N',
help='number of nodes')
parser.add_argument('-g', '--gpus_per_node', default=1, type=int,
help='number of gpus per node')
parser.add_argument('-nr', '--node_rank', default=0, type=int,
help='ranking within the nodes')
# experiments specific arguments
parser.add_argument('--debug_mode',
action='store_true',
dest='debug_mode',
help='weather this is debug mode or normal')
parser.add_argument(
"--model_class_name",
type=str,
help="Set the model class of the experiment.",
)
parser.add_argument(
"--experiment_name",
type=str,
help="Set the name of the experiment. [model_name]/[data]/[task]/[other]",
)
parser.add_argument(
"--save_prediction",
action='store_true',
dest='save_prediction',
help='Do we want to save prediction')
parser.add_argument('--epochs', default=2, type=int, metavar='N',
help='number of total epochs to run')
parser.add_argument(
"--per_gpu_train_batch_size", default=16, type=int, help="Batch size per GPU/CPU for training.",
)
parser.add_argument(
"--gradient_accumulation_steps",
type=int,
default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.",
)
parser.add_argument(
"--per_gpu_eval_batch_size", default=64, type=int, help="Batch size per GPU/CPU for evaluation.",
)
parser.add_argument("--max_length", default=160, type=int, help="Max length of the sequences.")
parser.add_argument("--warmup_steps", default=-1, type=int, help="Linear warmup over warmup_steps.")
parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.")
parser.add_argument("--learning_rate", default=1e-5, type=float, help="The initial learning rate for Adam.")
parser.add_argument("--weight_decay", default=0.0, type=float, help="Weight decay if we apply some.")
parser.add_argument("--adam_epsilon", default=1e-8, type=float, help="Epsilon for Adam optimizer.")
parser.add_argument(
"--eval_frequency", default=1000, type=int, help="set the evaluation frequency, evaluate every X global step.",
)
parser.add_argument("--train_data",
type=str,
help="The training data used in the experiments.")
parser.add_argument("--train_weights",
type=str,
help="The training data weights used in the experiments.")
parser.add_argument("--eval_data",
type=str,
help="The training data used in the experiments.")
args = parser.parse_args()
if args.cpu:
args.world_size = 1
train(-1, args)
elif args.single_gpu:
args.world_size = 1
train(0, args)
else: # distributed multiGPU training
#########################################################
args.world_size = args.gpus_per_node * args.num_nodes #
# os.environ['MASTER_ADDR'] = '152.2.142.184' # This is the IP address for nlp5
# maybe we will automatically retrieve the IP later.
os.environ['MASTER_PORT'] = '88888' #
mp.spawn(train, nprocs=args.gpus_per_node, args=(args,)) # spawn how many process in this node
# remember train is called as train(i, args).
#########################################################
def train(local_rank, args):
# debug = False
# print("GPU:", gpu)
# world_size = args.world_size
args.global_rank = args.node_rank * args.gpus_per_node + local_rank
args.local_rank = local_rank
# args.warmup_steps = 20
debug_count = 1000
num_epoch = args.epochs
actual_train_batch_size = args.world_size * args.per_gpu_train_batch_size * args.gradient_accumulation_steps
args.actual_train_batch_size = actual_train_batch_size
set_seed(args.seed)
num_labels = 3 # we are doing NLI so we set num_labels = 3, for other task we can change this value.
max_length = args.max_length
model_class_item = MODEL_CLASSES[args.model_class_name]
model_class_name = args.model_class_name
model_name = model_class_item['model_name']
do_lower_case = model_class_item['do_lower_case'] if 'do_lower_case' in model_class_item else False
tokenizer = model_class_item['tokenizer'].from_pretrained(model_name,
cache_dir=str(config.PRO_ROOT / "trans_cache"),
do_lower_case=do_lower_case)
if model_class_name in ['lstm-resencoder']:
hg_model = model_class_item['sequence_classification'].from_pretrained(model_name,
cache_dir=str(
config.PRO_ROOT / "trans_cache"),
num_labels=num_labels)
embedding = hg_model.bert.embeddings.word_embeddings
model = ResEncoder(v_size=embedding.weight.size(0), embd_dim=embedding.weight.size(1))
model.Embd.weight = embedding.weight
elif model_class_name in ['bag-of-words']:
hg_model = model_class_item['sequence_classification'].from_pretrained(model_name,
cache_dir=str(
config.PRO_ROOT / "trans_cache"),
num_labels=num_labels)
embedding = hg_model.bert.embeddings.word_embeddings
model = BagOfWords(v_size=embedding.weight.size(0), embd_dim=embedding.weight.size(1))
model.Embd.weight = embedding.weight
else:
model = model_class_item['sequence_classification'].from_pretrained(model_name,
cache_dir=str(config.PRO_ROOT / "trans_cache"),
num_labels=num_labels)
padding_token_value = tokenizer.convert_tokens_to_ids([tokenizer.pad_token])[0]
padding_segement_value = model_class_item["padding_segement_value"]
padding_att_value = model_class_item["padding_att_value"]
left_pad = model_class_item['left_pad'] if 'left_pad' in model_class_item else False
batch_size_per_gpu_train = args.per_gpu_train_batch_size
batch_size_per_gpu_eval = args.per_gpu_eval_batch_size
if not args.cpu and not args.single_gpu:
dist.init_process_group(
backend='nccl',
init_method='env://',
world_size=args.world_size,
rank=args.global_rank
)
train_data_str = args.train_data
train_data_weights_str = args.train_weights
eval_data_str = args.eval_data
train_data_name = []
train_data_path = []
train_data_list = []
train_data_weights = []
eval_data_name = []
eval_data_path = []
eval_data_list = []
train_data_named_path = train_data_str.split(',')
weights_str = train_data_weights_str.split(',') if train_data_weights_str is not None else None
eval_data_named_path = eval_data_str.split(',')
for named_path in train_data_named_path:
ind = named_path.find(':')
name = named_path[:ind]
path = name[ind + 1:]
if name in registered_path:
d_list = common.load_jsonl(registered_path[name])
else:
d_list = common.load_jsonl(path)
train_data_name.append(name)
train_data_path.append(path)
train_data_list.append(d_list)
if weights_str is not None:
for weights in weights_str:
train_data_weights.append(float(weights))
else:
for i in range(len(train_data_list)):
train_data_weights.append(1)
for named_path in eval_data_named_path:
ind = named_path.find(':')
name = named_path[:ind]
path = name[ind + 1:]
if name in registered_path:
d_list = common.load_jsonl(registered_path[name])
else:
d_list = common.load_jsonl(path)
eval_data_name.append(name)
eval_data_path.append(path)
eval_data_list.append(d_list)
assert len(train_data_weights) == len(train_data_list)
batching_schema = {
'uid': RawFlintField(),
'y': LabelFlintField(),
'input_ids': ArrayIndexFlintField(pad_idx=padding_token_value, left_pad=left_pad),
'token_type_ids': ArrayIndexFlintField(pad_idx=padding_segement_value, left_pad=left_pad),
'attention_mask': ArrayIndexFlintField(pad_idx=padding_att_value, left_pad=left_pad),
}
data_transformer = NLITransform(model_name, tokenizer, max_length)
# data_transformer = NLITransform(model_name, tokenizer, max_length, with_element=True)
eval_data_loaders = []
for eval_d_list in eval_data_list:
d_dataset, d_sampler, d_dataloader = build_eval_dataset_loader_and_sampler(eval_d_list, data_transformer,
batching_schema,
batch_size_per_gpu_eval)
eval_data_loaders.append(d_dataloader)
# Estimate the training size:
training_list = []
for i in range(len(train_data_list)):
print("Build Training Data ...")
train_d_list = train_data_list[i]
train_d_name = train_data_name[i]
train_d_weight = train_data_weights[i]
cur_train_list = sample_data_list(train_d_list, train_d_weight) # change later # we can apply different sample strategy here.
print(f"Data Name:{train_d_name}; Weight: {train_d_weight}; "
f"Original Size: {len(train_d_list)}; Sampled Size: {len(cur_train_list)}")
training_list.extend(cur_train_list)
estimated_training_size = len(training_list)
print("Estimated training size:", estimated_training_size)
# Estimate the training size ends:
# t_total = estimated_training_size // args.gradient_accumulation_steps * num_epoch
t_total = estimated_training_size * num_epoch // args.actual_train_batch_size
if args.warmup_steps <= 0: # set the warmup steps to 0.1 * total step if the given warmup step is -1.
args.warmup_steps = int(t_total * 0.1)
if not args.cpu:
torch.cuda.set_device(args.local_rank)
model.cuda(args.local_rank)
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{
"params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
"weight_decay": args.weight_decay,
},
{"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], "weight_decay": 0.0},
]
if model_class_name not in ['lstm-resencoder']:
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
scheduler = get_linear_schedule_with_warmup(
optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total
)
else:
optimizer = Adam(optimizer_grouped_parameters)
scheduler = EmptyScheduler()
if args.fp16:
try:
from apex import amp
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)
if not args.cpu and not args.single_gpu:
model = nn.parallel.DistributedDataParallel(model, device_ids=[local_rank],
output_device=local_rank, find_unused_parameters=True)
args_dict = dict(vars(args))
file_path_prefix = '.'
if args.global_rank in [-1, 0]:
print("Total Steps:", t_total)
args.total_step = t_total
print("Warmup Steps:", args.warmup_steps)
print("Actual Training Batch Size:", actual_train_batch_size)
print("Arguments", pp.pprint(args))
# Let build the logger and log everything before the start of the first training epoch.
if args.global_rank in [-1, 0]: # only do logging if we use cpu or global_rank=0
if not args.debug_mode:
file_path_prefix, date = save_tool.gen_file_prefix(f"{args.experiment_name}")
# # # Create Log File
# Save the source code.
script_name = os.path.basename(__file__)
with open(os.path.join(file_path_prefix, script_name), 'w') as out_f, open(__file__, 'r') as it:
out_f.write(it.read())
out_f.flush()
# Save option file
common.save_json(args_dict, os.path.join(file_path_prefix, "args.json"))
checkpoints_path = Path(file_path_prefix) / "checkpoints"
if not checkpoints_path.exists():
checkpoints_path.mkdir()
prediction_path = Path(file_path_prefix) / "predictions"
if not prediction_path.exists():
prediction_path.mkdir()
global_step = 0
# print(f"Global Rank:{args.global_rank} ### ", 'Init!')
for epoch in tqdm(range(num_epoch), desc="Epoch", disable=args.global_rank not in [-1, 0]):
# Let's build up training dataset for this epoch
training_list = []
for i in range(len(train_data_list)):
print("Build Training Data ...")
train_d_list = train_data_list[i]
train_d_name = train_data_name[i]
train_d_weight = train_data_weights[i]
cur_train_list = sample_data_list(train_d_list, train_d_weight) # change later # we can apply different sample strategy here.
print(f"Data Name:{train_d_name}; Weight: {train_d_weight}; "
f"Original Size: {len(train_d_list)}; Sampled Size: {len(cur_train_list)}")
training_list.extend(cur_train_list)
random.shuffle(training_list)
train_dataset = NLIDataset(training_list, data_transformer)
train_sampler = SequentialSampler(train_dataset)
if not args.cpu and not args.single_gpu:
print("Use distributed sampler.")
train_sampler = DistributedSampler(train_dataset, args.world_size, args.global_rank,
shuffle=True)
train_dataloader = DataLoader(dataset=train_dataset,
batch_size=batch_size_per_gpu_train,
shuffle=False, #
num_workers=0,
pin_memory=True,
sampler=train_sampler,
collate_fn=BaseBatchBuilder(batching_schema)) #
# training build finished.
print(debug_node_info(args), "epoch: ", epoch)
if not args.cpu and not args.single_gpu:
train_sampler.set_epoch(epoch) # setup the epoch to ensure random sampling at each epoch
for forward_step, batch in enumerate(tqdm(train_dataloader, desc="Iteration",
disable=args.global_rank not in [-1, 0]), 0):
model.train()
batch = move_to_device(batch, local_rank)
# print(batch['input_ids'], batch['y'])
if args.model_class_name in ["distilbert", "bart-large", "lstm-resencoder", "bag-of-words"]:
outputs = model(batch['input_ids'],
attention_mask=batch['attention_mask'],
labels=batch['y'])
else:
outputs = model(batch['input_ids'],
attention_mask=batch['attention_mask'],
token_type_ids=batch['token_type_ids'],
labels=batch['y'])
loss, logits = outputs[:2]
# print(debug_node_info(args), loss, logits, batch['uid'])
# print(debug_node_info(args), loss, batch['uid'])
# Accumulated loss
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
# if this forward step need model updates
# handle fp16
if args.fp16:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
# Gradient clip: if max_grad_norm < 0
if (forward_step + 1) % args.gradient_accumulation_steps == 0:
if args.max_grad_norm > 0:
if args.fp16:
torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm)
else:
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
optimizer.step()
scheduler.step() # Update learning rate schedule
model.zero_grad()
global_step += 1
if args.global_rank in [-1, 0] and args.eval_frequency > 0 and global_step % args.eval_frequency == 0:
r_dict = dict()
# Eval loop:
for i in range(len(eval_data_name)):
cur_eval_data_name = eval_data_name[i]
cur_eval_data_list = eval_data_list[i]
cur_eval_dataloader = eval_data_loaders[i]
# cur_eval_raw_data_list = eval_raw_data_list[i]
evaluation_dataset(args, cur_eval_dataloader, cur_eval_data_list, model, r_dict,
eval_name=cur_eval_data_name)
# saving checkpoints
current_checkpoint_filename = \
f'e({epoch})|i({global_step})'
for i in range(len(eval_data_name)):
cur_eval_data_name = eval_data_name[i]
current_checkpoint_filename += \
f'|{cur_eval_data_name}#({round(r_dict[cur_eval_data_name]["acc"], 4)})'
if not args.debug_mode:
# save model:
model_output_dir = checkpoints_path / current_checkpoint_filename
if not model_output_dir.exists():
model_output_dir.mkdir()
model_to_save = (
model.module if hasattr(model, "module") else model
) # Take care of distributed/parallel training
torch.save(model_to_save.state_dict(), str(model_output_dir / "model.pt"))
torch.save(optimizer.state_dict(), str(model_output_dir / "optimizer.pt"))
torch.save(scheduler.state_dict(), str(model_output_dir / "scheduler.pt"))
# save prediction:
if not args.debug_mode and args.save_prediction:
cur_results_path = prediction_path / current_checkpoint_filename
if not cur_results_path.exists():
cur_results_path.mkdir(parents=True)
for key, item in r_dict.items():
common.save_jsonl(item['predictions'], cur_results_path / f"{key}.jsonl")
# avoid saving too many things
for key, item in r_dict.items():
del r_dict[key]['predictions']
common.save_json(r_dict, cur_results_path / "results_dict.json", indent=2)
# End of epoch evaluation.
if args.global_rank in [-1, 0]:
r_dict = dict()
# Eval loop:
for i in range(len(eval_data_name)):
cur_eval_data_name = eval_data_name[i]
cur_eval_data_list = eval_data_list[i]
cur_eval_dataloader = eval_data_loaders[i]
# cur_eval_raw_data_list = eval_raw_data_list[i]
evaluation_dataset(args, cur_eval_dataloader, cur_eval_data_list, model, r_dict,
eval_name=cur_eval_data_name)
# saving checkpoints
current_checkpoint_filename = \
f'e({epoch})|i({global_step})'
for i in range(len(eval_data_name)):
cur_eval_data_name = eval_data_name[i]
current_checkpoint_filename += \
f'|{cur_eval_data_name}#({round(r_dict[cur_eval_data_name]["acc"], 4)})'
if not args.debug_mode:
# save model:
model_output_dir = checkpoints_path / current_checkpoint_filename
if not model_output_dir.exists():
model_output_dir.mkdir()
model_to_save = (
model.module if hasattr(model, "module") else model
) # Take care of distributed/parallel training
torch.save(model_to_save.state_dict(), str(model_output_dir / "model.pt"))
torch.save(optimizer.state_dict(), str(model_output_dir / "optimizer.pt"))
torch.save(scheduler.state_dict(), str(model_output_dir / "scheduler.pt"))
# save prediction:
if not args.debug_mode and args.save_prediction:
cur_results_path = prediction_path / current_checkpoint_filename
if not cur_results_path.exists():
cur_results_path.mkdir(parents=True)
for key, item in r_dict.items():
common.save_jsonl(item['predictions'], cur_results_path / f"{key}.jsonl")
# avoid saving too many things
for key, item in r_dict.items():
del r_dict[key]['predictions']
common.save_json(r_dict, cur_results_path / "results_dict.json", indent=2)
id2label = {
0: 'e',
1: 'n',
2: 'c',
-1: '-',
}
def count_acc(gt_list, pred_list):
assert len(gt_list) == len(pred_list)
gt_dict = list_dict_data_tool.list_to_dict(gt_list, 'uid')
pred_list = list_dict_data_tool.list_to_dict(pred_list, 'uid')
total_count = 0
hit = 0
for key, value in pred_list.items():
if gt_dict[key]['label'] == value['predicted_label']:
hit += 1
total_count += 1
return hit, total_count
def evaluation_dataset(args, eval_dataloader, eval_list, model, r_dict, eval_name):
# r_dict = dict()
pred_output_list = eval_model(model, eval_dataloader, args.global_rank, args)
predictions = pred_output_list
hit, total = count_acc(eval_list, pred_output_list)
print(debug_node_info(args), f"{eval_name} Acc:", hit, total, hit / total)
r_dict[f'{eval_name}'] = {
'acc': hit / total,
'correct_count': hit,
'total_count': total,
'predictions': predictions,
}
def eval_model(model, dev_dataloader, device_num, args):
model.eval()
uid_list = []
y_list = []
pred_list = []
logits_list = []
with torch.no_grad():
for i, batch in enumerate(dev_dataloader, 0):
batch = move_to_device(batch, device_num)
if args.model_class_name in ["distilbert", "bart-large", 'lstm-resencoder', "bag-of-words"]:
outputs = model(batch['input_ids'],
attention_mask=batch['attention_mask'],
labels=batch['y'])
else:
outputs = model(batch['input_ids'],
attention_mask=batch['attention_mask'],
token_type_ids=batch['token_type_ids'],
labels=batch['y'])
loss, logits = outputs[:2]
uid_list.extend(list(batch['uid']))
y_list.extend(batch['y'].tolist())
pred_list.extend(torch.max(logits, 1)[1].view(logits.size(0)).tolist())
logits_list.extend(logits.tolist())
assert len(pred_list) == len(logits_list)
assert len(pred_list) == len(logits_list)
result_items_list = []
for i in range(len(uid_list)):
r_item = dict()
r_item['uid'] = uid_list[i]
r_item['logits'] = logits_list[i]
r_item['predicted_label'] = id2label[pred_list[i]]
result_items_list.append(r_item)
return result_items_list
def debug_node_info(args):
names = ['global_rank', 'local_rank', 'node_rank']
values = []
for name in names:
if name in args:
values.append(getattr(args, name))
else:
return "Pro:No node info "
return "Pro:" + '|'.join([f"{name}:{value}" for name, value in zip(names, values)]) + "||Print:"
if __name__ == '__main__':
main()
| anli-main | src/nli/training_extra.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under Creative Commons-Non Commercial 4.0 found in the
# LICENSE file in the root directory of this source tree.
import torch
import logging
from captum.attr import LayerIntegratedGradients
logger = logging.getLogger(__name__)
def summarize_attributions(attributions):
"""
Summarises the attribution across multiple runs
"""
attributions = attributions.sum(dim=-1).squeeze(0)
attributions = attributions / torch.norm(attributions)
return attributions
def get_model_prediction(input_ids, attention_mask, token_type_ids, model, model_class_item, with_gradient=False):
model.eval()
if not with_gradient:
with torch.no_grad():
if model_class_item['model_class_name'] in ["distilbert", "bart-large"]:
outputs = model(input_ids,
attention_mask=attention_mask,
labels=None)
else:
outputs = model(input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
labels=None)
else:
if model_class_item['model_class_name'] in ["distilbert", "bart-large"]:
outputs = model(input_ids,
attention_mask=attention_mask,
labels=None)
else:
outputs = model(input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
labels=None)
return outputs[0]
def get_lig_object(model, model_class_item):
insight_supported = model_class_item['insight_supported'] if 'insight_supported' in model_class_item else False
internal_model_name = model_class_item['internal_model_name']
lig = None # default is None.
if not insight_supported:
logger.warning(f"Inspection for model '{model_class_item['model_class_name']}' is not supported.")
return lig
if isinstance(internal_model_name, list):
current_layer = model
for layer_n in internal_model_name:
current_layer = current_layer.__getattr__(layer_n)
# print(current_layer)
lig = LayerIntegratedGradients(get_model_prediction, current_layer)
else:
lig = LayerIntegratedGradients(get_model_prediction,
model.__getattr__(internal_model_name).embeddings.word_embeddings)
return lig
def get_tokenized_input_tokens(tokenizer, token_ids):
raw_words_list = tokenizer.convert_ids_to_tokens(token_ids)
string_tokens = [tokenizer.convert_tokens_to_string(word) for word in raw_words_list]
# still need some cleanup, remove space within tokens
output_tokens = []
for t in string_tokens:
output_tokens.append(t.replace(" ", ""))
return output_tokens
def cleanup_tokenization_special_tokens(tokens, importance, tokenizer):
filtered_tokens = []
filtered_importance = []
for t, i in zip(tokens, importance):
if t in tokenizer.all_special_tokens:
continue
else:
filtered_tokens.append(t)
filtered_importance.append(i)
return filtered_tokens, filtered_importance
| anli-main | src/nli/inspection_tools.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under Creative Commons-Non Commercial 4.0 found in the
# LICENSE file in the root directory of this source tree.
import argparse
from pathlib import Path
import config
from flint.data_utils.fields import RawFlintField, LabelFlintField, ArrayIndexFlintField
from utils import common, list_dict_data_tool, save_tool
from nli.training import MODEL_CLASSES, registered_path, build_eval_dataset_loader_and_sampler, NLITransform, \
NLIDataset, count_acc, evaluation_dataset, eval_model
import torch
import pprint
pp = pprint.PrettyPrinter(indent=2)
def evaluation():
parser = argparse.ArgumentParser()
parser.add_argument("--cpu", action="store_true", help="If set, we only use CPU.")
parser.add_argument(
"--model_class_name",
type=str,
help="Set the model class of the experiment.",
required=True
)
parser.add_argument(
"--model_checkpoint_path",
type=str,
help='Set the path to save the prediction.', required=True)
parser.add_argument(
"--output_prediction_path",
type=str,
default=None,
help='Set the path to save the prediction.')
parser.add_argument(
"--per_gpu_eval_batch_size", default=16, type=int, help="Batch size per GPU/CPU for evaluation.",
)
parser.add_argument("--max_length", default=156, type=int, help="Max length of the sequences.")
parser.add_argument("--eval_data",
type=str,
help="The training data used in the experiments.")
args = parser.parse_args()
if args.cpu:
args.global_rank = -1
else:
args.global_rank = 0
model_checkpoint_path = args.model_checkpoint_path
num_labels = 3
# we are doing NLI so we set num_labels = 3, for other task we can change this value.
max_length = args.max_length
model_class_item = MODEL_CLASSES[args.model_class_name]
model_name = model_class_item['model_name']
do_lower_case = model_class_item['do_lower_case'] if 'do_lower_case' in model_class_item else False
tokenizer = model_class_item['tokenizer'].from_pretrained(model_name,
cache_dir=str(config.PRO_ROOT / "trans_cache"),
do_lower_case=do_lower_case)
model = model_class_item['sequence_classification'].from_pretrained(model_name,
cache_dir=str(config.PRO_ROOT / "trans_cache"),
num_labels=num_labels)
model.load_state_dict(torch.load(model_checkpoint_path))
padding_token_value = tokenizer.convert_tokens_to_ids([tokenizer.pad_token])[0]
padding_segement_value = model_class_item["padding_segement_value"]
padding_att_value = model_class_item["padding_att_value"]
left_pad = model_class_item['left_pad'] if 'left_pad' in model_class_item else False
batch_size_per_gpu_eval = args.per_gpu_eval_batch_size
eval_data_str = args.eval_data
eval_data_name = []
eval_data_path = []
eval_data_list = []
eval_data_named_path = eval_data_str.split(',')
for named_path in eval_data_named_path:
ind = named_path.find(':')
name = named_path[:ind]
path = name[ind + 1:]
if name in registered_path:
d_list = common.load_jsonl(registered_path[name])
else:
d_list = common.load_jsonl(path)
eval_data_name.append(name)
eval_data_path.append(path)
eval_data_list.append(d_list)
batching_schema = {
'uid': RawFlintField(),
'y': LabelFlintField(),
'input_ids': ArrayIndexFlintField(pad_idx=padding_token_value, left_pad=left_pad),
'token_type_ids': ArrayIndexFlintField(pad_idx=padding_segement_value, left_pad=left_pad),
'attention_mask': ArrayIndexFlintField(pad_idx=padding_att_value, left_pad=left_pad),
}
data_transformer = NLITransform(model_name, tokenizer, max_length)
eval_data_loaders = []
for eval_d_list in eval_data_list:
d_dataset, d_sampler, d_dataloader = build_eval_dataset_loader_and_sampler(eval_d_list, data_transformer,
batching_schema,
batch_size_per_gpu_eval)
eval_data_loaders.append(d_dataloader)
if not args.cpu:
torch.cuda.set_device(0)
model.cuda(0)
r_dict = dict()
# Eval loop:
for i in range(len(eval_data_name)):
cur_eval_data_name = eval_data_name[i]
cur_eval_data_list = eval_data_list[i]
cur_eval_dataloader = eval_data_loaders[i]
# cur_eval_raw_data_list = eval_raw_data_list[i]
evaluation_dataset(args, cur_eval_dataloader, cur_eval_data_list, model, r_dict,
eval_name=cur_eval_data_name)
# save prediction:
if args.output_prediction_path is not None:
cur_results_path = Path(args.output_prediction_path)
if not cur_results_path.exists():
cur_results_path.mkdir(parents=True)
for key, item in r_dict.items():
common.save_jsonl(item['predictions'], cur_results_path / f"{key}.jsonl")
# avoid saving too many things
for key, item in r_dict.items():
del r_dict[key]['predictions']
common.save_json(r_dict, cur_results_path / "results_dict.json", indent=2)
if __name__ == '__main__':
evaluation()
| anli-main | src/nli/evaluation.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under Creative Commons-Non Commercial 4.0 found in the
# LICENSE file in the root directory of this source tree. | anli-main | src/nli/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under Creative Commons-Non Commercial 4.0 found in the
# LICENSE file in the root directory of this source tree.
import argparse
from pathlib import Path
import uuid
import numpy as np
import config
from flint.data_utils.batchbuilder import move_to_device
from flint.data_utils.fields import RawFlintField, LabelFlintField, ArrayIndexFlintField
from utils import common, list_dict_data_tool, save_tool
from nli.training import MODEL_CLASSES, registered_path, build_eval_dataset_loader_and_sampler, NLITransform, \
NLIDataset, count_acc, evaluation_dataset, eval_model
import torch
import pprint
pp = pprint.PrettyPrinter(indent=2)
id2label = {
0: 'e',
1: 'n',
2: 'c',
-1: '-',
}
def softmax(x):
"""Compute softmax values for each sets of scores in x."""
e_x = np.exp(np.asarray(x) - np.max(x))
return e_x / e_x.sum()
def eval_model(model, dev_dataloader, device_num, args):
model.eval()
uid_list = []
y_list = []
pred_list = []
logits_list = []
with torch.no_grad():
for i, batch in enumerate(dev_dataloader, 0):
batch = move_to_device(batch, device_num)
if args.model_class_name in ["distilbert", "bart-large"]:
outputs = model(batch['input_ids'],
attention_mask=batch['attention_mask'],
labels=None)
else:
outputs = model(batch['input_ids'],
attention_mask=batch['attention_mask'],
token_type_ids=batch['token_type_ids'],
labels=None)
# print(outputs)
logits = outputs[0]
uid_list.extend(list(batch['uid']))
y_list.extend(batch['y'].tolist())
pred_list.extend(torch.max(logits, 1)[1].view(logits.size(0)).tolist())
logits_list.extend(logits.tolist())
assert len(pred_list) == len(logits_list)
assert len(pred_list) == len(logits_list)
result_items_list = []
for i in range(len(uid_list)):
r_item = dict()
r_item['uid'] = uid_list[i]
r_item['logits'] = logits_list[i]
r_item['probability'] = softmax(r_item['logits'])
r_item['predicted_label'] = id2label[pred_list[i]]
result_items_list.append(r_item)
return result_items_list
def inference(model_class_name, model_checkpoint_path, max_length, premise, hypothesis, cpu=True):
parser = argparse.ArgumentParser()
args = parser.parse_args()
# CPU for now
if cpu:
args.global_rank = -1
else:
args.global_rank = 0
model_checkpoint_path = model_checkpoint_path
args.model_class_name = model_class_name
num_labels = 3
# we are doing NLI so we set num_labels = 3, for other task we can change this value.
max_length = max_length
model_class_item = MODEL_CLASSES[model_class_name]
model_name = model_class_item['model_name']
do_lower_case = model_class_item['do_lower_case'] if 'do_lower_case' in model_class_item else False
tokenizer = model_class_item['tokenizer'].from_pretrained(model_name,
cache_dir=str(config.PRO_ROOT / "trans_cache"),
do_lower_case=do_lower_case)
model = model_class_item['sequence_classification'].from_pretrained(model_name,
cache_dir=str(config.PRO_ROOT / "trans_cache"),
num_labels=num_labels)
model.load_state_dict(torch.load(model_checkpoint_path))
padding_token_value = tokenizer.convert_tokens_to_ids([tokenizer.pad_token])[0]
padding_segement_value = model_class_item["padding_segement_value"]
padding_att_value = model_class_item["padding_att_value"]
left_pad = model_class_item['left_pad'] if 'left_pad' in model_class_item else False
batch_size_per_gpu_eval = 16
eval_data_list = [{
'uid': str(uuid.uuid4()),
'premise': premise,
'hypothesis': hypothesis,
'label': 'h' # hidden
}]
batching_schema = {
'uid': RawFlintField(),
'y': LabelFlintField(),
'input_ids': ArrayIndexFlintField(pad_idx=padding_token_value, left_pad=left_pad),
'token_type_ids': ArrayIndexFlintField(pad_idx=padding_segement_value, left_pad=left_pad),
'attention_mask': ArrayIndexFlintField(pad_idx=padding_att_value, left_pad=left_pad),
}
data_transformer = NLITransform(model_name, tokenizer, max_length)
d_dataset, d_sampler, d_dataloader = build_eval_dataset_loader_and_sampler(eval_data_list, data_transformer,
batching_schema,
batch_size_per_gpu_eval)
if not cpu:
torch.cuda.set_device(0)
model.cuda(0)
pred_output_list = eval_model(model, d_dataloader, args.global_rank, args)
# r_dict = dict()
# Eval loop:
# print(pred_output_list)
return pred_output_list[0]
if __name__ == '__main__':
# model_class_name = "roberta-large"
# model_checkpoint_path = config.PRO_ROOT / "saved_models/06-29-22:16:24_roberta-large|snli+mnli+fnli+r1*10+r2*20+r3*10|nli/checkpoints/e(0)|i(24000)|snli_dev#(0.9252)|mnli_m_dev#(0.899)|mnli_mm_dev#(0.9002)|anli_r1_dev#(0.74)|anli_r1_test#(0.742)|anli_r2_dev#(0.506)|anli_r2_test#(0.498)|anli_r3_dev#(0.4667)|anli_r3_test#(0.455)/model.pt"
# model_class_name = "xlnet-large"
# model_checkpoint_path = config.PRO_ROOT / "saved_models/06-29-23:04:33_xlnet-large|snli+mnli+fnli+r1*10+r2*20+r3*10|nli/checkpoints/e(1)|i(30000)|snli_dev#(0.9274)|mnli_m_dev#(0.8981)|mnli_mm_dev#(0.8947)|anli_r1_dev#(0.735)|anli_r1_test#(0.701)|anli_r2_dev#(0.521)|anli_r2_test#(0.514)|anli_r3_dev#(0.5075)|anli_r3_test#(0.4975)/model.pt"
model_class_name = "albert-xxlarge"
model_checkpoint_path = config.PRO_ROOT / "saved_models/06-29-23:09:03_albert-xxlarge|snli+mnli+fnli+r1*10+r2*20+r3*10|nli/checkpoints/e(0)|i(16000)|snli_dev#(0.9246)|mnli_m_dev#(0.8948)|mnli_mm_dev#(0.8932)|anli_r1_dev#(0.733)|anli_r1_test#(0.711)|anli_r2_dev#(0.571)|anli_r2_test#(0.57)|anli_r3_dev#(0.5817)|anli_r3_test#(0.5375)/model.pt"
#
# model_class_name = "bart-large"
# model_checkpoint_path = config.PRO_ROOT / "saved_models/06-30-08:23:44_bart-large|snli+mnli+fnli+r1*10+r2*20+r3*10|nli/checkpoints/e(1)|i(40000)|snli_dev#(0.9298)|mnli_m_dev#(0.8941)|mnli_mm_dev#(0.8973)|anli_r1_dev#(0.736)|anli_r1_test#(0.72)|anli_r2_dev#(0.533)|anli_r2_test#(0.514)|anli_r3_dev#(0.5058)|anli_r3_test#(0.5042)/model.pt"
#
# model_class_name = "electra-large"
# model_checkpoint_path = config.PRO_ROOT / "saved_models/08-02-08:58:05_electra-large|snli+mnli+fnli+r1*10+r2*20+r3*10|nli/checkpoints/e(0)|i(12000)|snli_dev#(0.9168)|mnli_m_dev#(0.8597)|mnli_mm_dev#(0.8661)|anli_r1_dev#(0.672)|anli_r1_test#(0.678)|anli_r2_dev#(0.536)|anli_r2_test#(0.522)|anli_r3_dev#(0.55)|anli_r3_test#(0.5217)/model.pt"
max_length = 184
premise = "Two women are embracing while holding to go packages."
hypothesis = "The men are fighting outside a deli."
pred_output = inference(model_class_name, model_checkpoint_path, max_length, premise, hypothesis, cpu=True)
print(pred_output)
| anli-main | src/nli/inference_debug.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under Creative Commons-Non Commercial 4.0 found in the
# LICENSE file in the root directory of this source tree.
import argparse
from pathlib import Path
from transformers import RobertaTokenizer, RobertaForSequenceClassification
from transformers import XLNetTokenizer, XLNetForSequenceClassification
# from transformers import XLNetTokenizer
# from modeling.dummy_modeling_xlnet import XLNetForSequenceClassification
from transformers import BertTokenizer, BertForSequenceClassification
from transformers import AlbertTokenizer, AlbertForSequenceClassification
from transformers import DistilBertTokenizer, DistilBertForSequenceClassification
from transformers import BartTokenizer, BartForSequenceClassification
from transformers import ElectraTokenizer, ElectraForSequenceClassification
from torch.utils.data import Dataset, DataLoader, DistributedSampler, RandomSampler, SequentialSampler
import config
from transformers import AdamW
from transformers import get_linear_schedule_with_warmup
from flint.data_utils.batchbuilder import BaseBatchBuilder, move_to_device
from flint.data_utils.fields import RawFlintField, LabelFlintField, ArrayIndexFlintField
from utils import common, list_dict_data_tool, save_tool
import os
import torch.multiprocessing as mp
import torch.distributed as dist
import torch.nn as nn
import numpy as np
import random
import torch
from tqdm import tqdm
import math
import copy
import pprint
pp = pprint.PrettyPrinter(indent=2)
# from fairseq.data.data_utils import collate_tokens
MODEL_CLASSES = {
"bert-base": {
"model_name": "bert-base-uncased",
"tokenizer": BertTokenizer,
"sequence_classification": BertForSequenceClassification,
# "padding_token_value": 0,
"padding_segement_value": 0,
"padding_att_value": 0,
"do_lower_case": True,
},
"bert-large": {
"model_name": "bert-large-uncased",
"tokenizer": BertTokenizer,
"sequence_classification": BertForSequenceClassification,
# "padding_token_value": 0,
"padding_segement_value": 0,
"padding_att_value": 0,
"do_lower_case": True,
"internal_model_name": "bert",
'insight_supported': True,
},
"xlnet-base": {
"model_name": "xlnet-base-cased",
"tokenizer": XLNetTokenizer,
"sequence_classification": XLNetForSequenceClassification,
# "padding_token_value": 0,
"padding_segement_value": 4,
"padding_att_value": 0,
"left_pad": True,
"internal_model_name": ["transformer", "word_embedding"],
},
"xlnet-large": {
"model_name": "xlnet-large-cased",
"tokenizer": XLNetTokenizer,
"sequence_classification": XLNetForSequenceClassification,
"padding_segement_value": 4,
"padding_att_value": 0,
"left_pad": True,
"internal_model_name": ["transformer", "word_embedding"],
'insight_supported': True,
},
"roberta-base": {
"model_name": "roberta-base",
"tokenizer": RobertaTokenizer,
"sequence_classification": RobertaForSequenceClassification,
"padding_segement_value": 0,
"padding_att_value": 0,
"internal_model_name": "roberta",
'insight_supported': True,
},
"roberta-large": {
"model_name": "roberta-large",
"tokenizer": RobertaTokenizer,
"sequence_classification": RobertaForSequenceClassification,
"padding_segement_value": 0,
"padding_att_value": 0,
"internal_model_name": "roberta",
'insight_supported': True,
},
"albert-xxlarge": {
"model_name": "albert-xxlarge-v2",
"tokenizer": AlbertTokenizer,
"sequence_classification": AlbertForSequenceClassification,
"padding_segement_value": 0,
"padding_att_value": 0,
"do_lower_case": True,
"internal_model_name": "albert",
'insight_supported': True,
},
"distilbert": {
"model_name": "distilbert-base-cased",
"tokenizer": DistilBertTokenizer,
"sequence_classification": DistilBertForSequenceClassification,
"padding_segement_value": 0,
"padding_att_value": 0,
},
"bart-large": {
"model_name": "facebook/bart-large",
"tokenizer": BartTokenizer,
"sequence_classification": BartForSequenceClassification,
"padding_segement_value": 0,
"padding_att_value": 0,
"internal_model_name": ["model", "encoder", "embed_tokens"],
'insight_supported': True,
},
"electra-base": {
"model_name": "google/electra-base-discriminator",
"tokenizer": ElectraTokenizer,
"sequence_classification": ElectraForSequenceClassification,
"padding_segement_value": 0,
"padding_att_value": 0,
"internal_model_name": "electra",
'insight_supported': True,
},
"electra-large": {
"model_name": "google/electra-large-discriminator",
"tokenizer": ElectraTokenizer,
"sequence_classification": ElectraForSequenceClassification,
"padding_segement_value": 0,
"padding_att_value": 0,
"internal_model_name": "electra",
'insight_supported': True,
}
}
registered_path = {
'snli_train': config.PRO_ROOT / "data/build/snli/train.jsonl",
'snli_dev': config.PRO_ROOT / "data/build/snli/dev.jsonl",
'snli_test': config.PRO_ROOT / "data/build/snli/test.jsonl",
'mnli_train': config.PRO_ROOT / "data/build/mnli/train.jsonl",
'mnli_m_dev': config.PRO_ROOT / "data/build/mnli/m_dev.jsonl",
'mnli_mm_dev': config.PRO_ROOT / "data/build/mnli/mm_dev.jsonl",
'fever_train': config.PRO_ROOT / "data/build/fever_nli/train.jsonl",
'fever_dev': config.PRO_ROOT / "data/build/fever_nli/dev.jsonl",
'fever_test': config.PRO_ROOT / "data/build/fever_nli/test.jsonl",
'anli_r1_train': config.PRO_ROOT / "data/build/anli/r1/train.jsonl",
'anli_r1_dev': config.PRO_ROOT / "data/build/anli/r1/dev.jsonl",
'anli_r1_test': config.PRO_ROOT / "data/build/anli/r1/test.jsonl",
'anli_r2_train': config.PRO_ROOT / "data/build/anli/r2/train.jsonl",
'anli_r2_dev': config.PRO_ROOT / "data/build/anli/r2/dev.jsonl",
'anli_r2_test': config.PRO_ROOT / "data/build/anli/r2/test.jsonl",
'anli_r3_train': config.PRO_ROOT / "data/build/anli/r3/train.jsonl",
'anli_r3_dev': config.PRO_ROOT / "data/build/anli/r3/dev.jsonl",
'anli_r3_test': config.PRO_ROOT / "data/build/anli/r3/test.jsonl",
}
nli_label2index = {
'e': 0,
'n': 1,
'c': 2,
'h': -1,
}
def set_seed(seed):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
class NLIDataset(Dataset):
def __init__(self, data_list, transform) -> None:
super().__init__()
self.d_list = data_list
self.len = len(self.d_list)
self.transform = transform
def __getitem__(self, index: int):
return self.transform(self.d_list[index])
# you should write schema for each of the input elements
def __len__(self) -> int:
return self.len
class NLITransform(object):
def __init__(self, model_name, tokenizer, max_length=None):
self.model_name = model_name
self.tokenizer = tokenizer
self.max_length = max_length
def __call__(self, sample):
processed_sample = dict()
processed_sample['uid'] = sample['uid']
processed_sample['gold_label'] = sample['label']
processed_sample['y'] = nli_label2index[sample['label']]
# premise: str = sample['premise']
premise: str = sample['context'] if 'context' in sample else sample['premise']
hypothesis: str = sample['hypothesis']
if premise.strip() == '':
premise = 'empty'
if hypothesis.strip() == '':
hypothesis = 'empty'
tokenized_input_seq_pair = self.tokenizer.encode_plus(premise, hypothesis,
max_length=self.max_length,
return_token_type_ids=True, truncation=True)
processed_sample.update(tokenized_input_seq_pair)
return processed_sample
def build_eval_dataset_loader_and_sampler(d_list, data_transformer, batching_schema, batch_size_per_gpu_eval):
d_dataset = NLIDataset(d_list, data_transformer)
d_sampler = SequentialSampler(d_dataset)
d_dataloader = DataLoader(dataset=d_dataset,
batch_size=batch_size_per_gpu_eval,
shuffle=False, #
num_workers=0,
pin_memory=True,
sampler=d_sampler,
collate_fn=BaseBatchBuilder(batching_schema)) #
return d_dataset, d_sampler, d_dataloader
def sample_data_list(d_list, ratio):
if ratio <= 0:
raise ValueError("Invalid training weight ratio. Please change --train_weights.")
upper_int = int(math.ceil(ratio))
if upper_int == 1:
return d_list # if ratio is 1 then we just return the data list
else:
sampled_d_list = []
for _ in range(upper_int):
sampled_d_list.extend(copy.deepcopy(d_list))
if np.isclose(ratio, upper_int):
return sampled_d_list
else:
sampled_length = int(ratio * len(d_list))
random.shuffle(sampled_d_list)
return sampled_d_list[:sampled_length]
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--cpu", action="store_true", help="If set, we only use CPU.")
parser.add_argument("--single_gpu", action="store_true", help="If set, we only use single GPU.")
parser.add_argument("--fp16", action="store_true", help="If set, we will use fp16.")
parser.add_argument(
"--fp16_opt_level",
type=str,
default="O1",
help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html",
)
# environment arguments
parser.add_argument('-s', '--seed', default=1, type=int, metavar='N',
help='manual random seed')
parser.add_argument('-n', '--num_nodes', default=1, type=int, metavar='N',
help='number of nodes')
parser.add_argument('-g', '--gpus_per_node', default=1, type=int,
help='number of gpus per node')
parser.add_argument('-nr', '--node_rank', default=0, type=int,
help='ranking within the nodes')
# experiments specific arguments
parser.add_argument('--debug_mode',
action='store_true',
dest='debug_mode',
help='weather this is debug mode or normal')
parser.add_argument(
"--model_class_name",
type=str,
help="Set the model class of the experiment.",
)
parser.add_argument(
"--experiment_name",
type=str,
help="Set the name of the experiment. [model_name]/[data]/[task]/[other]",
)
parser.add_argument(
"--save_prediction",
action='store_true',
dest='save_prediction',
help='Do we want to save prediction')
parser.add_argument(
"--resume_path",
type=str,
default=None,
help="If we want to resume model training, we need to set the resume path to restore state dicts.",
)
parser.add_argument(
"--global_iteration",
type=int,
default=0,
help="This argument is only used if we resume model training.",
)
parser.add_argument('--epochs', default=2, type=int, metavar='N',
help='number of total epochs to run')
parser.add_argument('--total_step', default=-1, type=int, metavar='N',
help='number of step to update, default calculate with total data size.'
'if we set this step, then epochs will be 100 to run forever.')
parser.add_argument('--sampler_seed', default=-1, type=int, metavar='N',
help='The seed the controls the data sampling order.')
parser.add_argument(
"--per_gpu_train_batch_size", default=16, type=int, help="Batch size per GPU/CPU for training.",
)
parser.add_argument(
"--gradient_accumulation_steps",
type=int,
default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.",
)
parser.add_argument(
"--per_gpu_eval_batch_size", default=64, type=int, help="Batch size per GPU/CPU for evaluation.",
)
parser.add_argument("--max_length", default=160, type=int, help="Max length of the sequences.")
parser.add_argument("--warmup_steps", default=-1, type=int, help="Linear warmup over warmup_steps.")
parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.")
parser.add_argument("--learning_rate", default=1e-5, type=float, help="The initial learning rate for Adam.")
parser.add_argument("--weight_decay", default=0.0, type=float, help="Weight decay if we apply some.")
parser.add_argument("--adam_epsilon", default=1e-8, type=float, help="Epsilon for Adam optimizer.")
parser.add_argument(
"--eval_frequency", default=1000, type=int, help="set the evaluation frequency, evaluate every X global step.",
)
parser.add_argument("--train_data",
type=str,
help="The training data used in the experiments.")
parser.add_argument("--train_weights",
type=str,
help="The training data weights used in the experiments.")
parser.add_argument("--eval_data",
type=str,
help="The training data used in the experiments.")
args = parser.parse_args()
if args.cpu:
args.world_size = 1
train(-1, args)
elif args.single_gpu:
args.world_size = 1
train(0, args)
else: # distributed multiGPU training
#########################################################
args.world_size = args.gpus_per_node * args.num_nodes #
# os.environ['MASTER_ADDR'] = '152.2.142.184' # This is the IP address for nlp5
# maybe we will automatically retrieve the IP later.
os.environ['MASTER_PORT'] = '88888' #
mp.spawn(train, nprocs=args.gpus_per_node, args=(args,)) # spawn how many process in this node
# remember train is called as train(i, args).
#########################################################
def train(local_rank, args):
# debug = False
# print("GPU:", gpu)
# world_size = args.world_size
args.global_rank = args.node_rank * args.gpus_per_node + local_rank
args.local_rank = local_rank
# args.warmup_steps = 20
debug_count = 1000
if args.total_step > 0:
num_epoch = 10000 # if we set total step, num_epoch will be forever.
else:
num_epoch = args.epochs
actual_train_batch_size = args.world_size * args.per_gpu_train_batch_size * args.gradient_accumulation_steps
args.actual_train_batch_size = actual_train_batch_size
set_seed(args.seed)
num_labels = 3 # we are doing NLI so we set num_labels = 3, for other task we can change this value.
max_length = args.max_length
model_class_item = MODEL_CLASSES[args.model_class_name]
model_name = model_class_item['model_name']
do_lower_case = model_class_item['do_lower_case'] if 'do_lower_case' in model_class_item else False
tokenizer = model_class_item['tokenizer'].from_pretrained(model_name,
cache_dir=str(config.PRO_ROOT / "trans_cache"),
do_lower_case=do_lower_case)
model = model_class_item['sequence_classification'].from_pretrained(model_name,
cache_dir=str(config.PRO_ROOT / "trans_cache"),
num_labels=num_labels)
padding_token_value = tokenizer.convert_tokens_to_ids([tokenizer.pad_token])[0]
padding_segement_value = model_class_item["padding_segement_value"]
padding_att_value = model_class_item["padding_att_value"]
left_pad = model_class_item['left_pad'] if 'left_pad' in model_class_item else False
batch_size_per_gpu_train = args.per_gpu_train_batch_size
batch_size_per_gpu_eval = args.per_gpu_eval_batch_size
if not args.cpu and not args.single_gpu:
dist.init_process_group(
backend='nccl',
init_method='env://',
world_size=args.world_size,
rank=args.global_rank
)
train_data_str = args.train_data
train_data_weights_str = args.train_weights
eval_data_str = args.eval_data
train_data_name = []
train_data_path = []
train_data_list = []
train_data_weights = []
eval_data_name = []
eval_data_path = []
eval_data_list = []
train_data_named_path = train_data_str.split(',')
weights_str = train_data_weights_str.split(',') if train_data_weights_str is not None else None
eval_data_named_path = eval_data_str.split(',')
for named_path in train_data_named_path:
ind = named_path.find(':')
name = named_path[:ind]
path = named_path[ind + 1:]
if name in registered_path:
d_list = common.load_jsonl(registered_path[name])
else:
d_list = common.load_jsonl(path)
train_data_name.append(name)
train_data_path.append(path)
train_data_list.append(d_list)
if weights_str is not None:
for weights in weights_str:
train_data_weights.append(float(weights))
else:
for i in range(len(train_data_list)):
train_data_weights.append(1)
for named_path in eval_data_named_path:
ind = named_path.find(':')
name = named_path[:ind]
path = named_path[ind + 1:]
if name in registered_path:
d_list = common.load_jsonl(registered_path[name])
else:
d_list = common.load_jsonl(path)
eval_data_name.append(name)
eval_data_path.append(path)
eval_data_list.append(d_list)
assert len(train_data_weights) == len(train_data_list)
batching_schema = {
'uid': RawFlintField(),
'y': LabelFlintField(),
'input_ids': ArrayIndexFlintField(pad_idx=padding_token_value, left_pad=left_pad),
'token_type_ids': ArrayIndexFlintField(pad_idx=padding_segement_value, left_pad=left_pad),
'attention_mask': ArrayIndexFlintField(pad_idx=padding_att_value, left_pad=left_pad),
}
data_transformer = NLITransform(model_name, tokenizer, max_length)
# data_transformer = NLITransform(model_name, tokenizer, max_length, with_element=True)
eval_data_loaders = []
for eval_d_list in eval_data_list:
d_dataset, d_sampler, d_dataloader = build_eval_dataset_loader_and_sampler(eval_d_list, data_transformer,
batching_schema,
batch_size_per_gpu_eval)
eval_data_loaders.append(d_dataloader)
# Estimate the training size:
training_list = []
for i in range(len(train_data_list)):
print("Build Training Data ...")
train_d_list = train_data_list[i]
train_d_name = train_data_name[i]
train_d_weight = train_data_weights[i]
cur_train_list = sample_data_list(train_d_list, train_d_weight) # change later # we can apply different sample strategy here.
print(f"Data Name:{train_d_name}; Weight: {train_d_weight}; "
f"Original Size: {len(train_d_list)}; Sampled Size: {len(cur_train_list)}")
training_list.extend(cur_train_list)
estimated_training_size = len(training_list)
print("Estimated training size:", estimated_training_size)
# Estimate the training size ends:
# t_total = estimated_training_size // args.gradient_accumulation_steps * num_epoch
# t_total = estimated_training_size * num_epoch // args.actual_train_batch_size
if args.total_step <= 0:
t_total = estimated_training_size * num_epoch // args.actual_train_batch_size
else:
t_total = args.total_step
if args.warmup_steps <= 0: # set the warmup steps to 0.1 * total step if the given warmup step is -1.
args.warmup_steps = int(t_total * 0.1)
if not args.cpu:
torch.cuda.set_device(args.local_rank)
model.cuda(args.local_rank)
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{
"params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
"weight_decay": args.weight_decay,
},
{"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], "weight_decay": 0.0},
]
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
scheduler = get_linear_schedule_with_warmup(
optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total
)
global_step = 0
if args.resume_path:
print("Resume Training")
global_step = args.global_iteration
print("Resume Global Step: ", global_step)
model.load_state_dict(torch.load(str(Path(args.resume_path) / "model.pt"), map_location=torch.device('cpu')))
optimizer.load_state_dict(torch.load(str(Path(args.resume_path) / "optimizer.pt"), map_location=torch.device('cpu')))
scheduler.load_state_dict(torch.load(str(Path(args.resume_path) / "scheduler.pt"), map_location=torch.device('cpu')))
print("State Resumed")
if args.fp16:
try:
from apex import amp
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)
if not args.cpu and not args.single_gpu:
model = nn.parallel.DistributedDataParallel(model, device_ids=[local_rank],
output_device=local_rank, find_unused_parameters=True)
args_dict = dict(vars(args))
file_path_prefix = '.'
if args.global_rank in [-1, 0]:
print("Total Steps:", t_total)
args.total_step = t_total
print("Warmup Steps:", args.warmup_steps)
print("Actual Training Batch Size:", actual_train_batch_size)
print("Arguments", pp.pprint(args))
is_finished = False
# Let build the logger and log everything before the start of the first training epoch.
if args.global_rank in [-1, 0]: # only do logging if we use cpu or global_rank=0
resume_prefix = ""
# if args.resume_path:
# resume_prefix = "resumed_"
if not args.debug_mode:
file_path_prefix, date = save_tool.gen_file_prefix(f"{args.experiment_name}")
# # # Create Log File
# Save the source code.
script_name = os.path.basename(__file__)
with open(os.path.join(file_path_prefix, script_name), 'w') as out_f, open(__file__, 'r') as it:
out_f.write(it.read())
out_f.flush()
# Save option file
common.save_json(args_dict, os.path.join(file_path_prefix, "args.json"))
checkpoints_path = Path(file_path_prefix) / "checkpoints"
if not checkpoints_path.exists():
checkpoints_path.mkdir()
prediction_path = Path(file_path_prefix) / "predictions"
if not prediction_path.exists():
prediction_path.mkdir()
# if this is a resumed, then we save the resumed path.
if args.resume_path:
with open(os.path.join(file_path_prefix, "resume_log.txt"), 'w') as out_f:
out_f.write(str(args.resume_path))
out_f.flush()
# print(f"Global Rank:{args.global_rank} ### ", 'Init!')
for epoch in tqdm(range(num_epoch), desc="Epoch", disable=args.global_rank not in [-1, 0]):
# Let's build up training dataset for this epoch
training_list = []
for i in range(len(train_data_list)):
print("Build Training Data ...")
train_d_list = train_data_list[i]
train_d_name = train_data_name[i]
train_d_weight = train_data_weights[i]
cur_train_list = sample_data_list(train_d_list, train_d_weight) # change later # we can apply different sample strategy here.
print(f"Data Name:{train_d_name}; Weight: {train_d_weight}; "
f"Original Size: {len(train_d_list)}; Sampled Size: {len(cur_train_list)}")
training_list.extend(cur_train_list)
random.shuffle(training_list)
train_dataset = NLIDataset(training_list, data_transformer)
train_sampler = SequentialSampler(train_dataset)
if not args.cpu and not args.single_gpu:
print("Use distributed sampler.")
train_sampler = DistributedSampler(train_dataset, args.world_size, args.global_rank,
shuffle=True)
train_dataloader = DataLoader(dataset=train_dataset,
batch_size=batch_size_per_gpu_train,
shuffle=False, #
num_workers=0,
pin_memory=True,
sampler=train_sampler,
collate_fn=BaseBatchBuilder(batching_schema)) #
# training build finished.
print(debug_node_info(args), "epoch: ", epoch)
if not args.cpu and not args.single_gpu:
if args.sampler_seed == -1:
train_sampler.set_epoch(epoch) # setup the epoch to ensure random sampling at each epoch
else:
train_sampler.set_epoch(epoch + args.sampler_seed)
for forward_step, batch in enumerate(tqdm(train_dataloader, desc="Iteration",
disable=args.global_rank not in [-1, 0]), 0):
model.train()
batch = move_to_device(batch, local_rank)
# print(batch['input_ids'], batch['y'])
if args.model_class_name in ["distilbert", "bart-large"]:
outputs = model(batch['input_ids'],
attention_mask=batch['attention_mask'],
labels=batch['y'])
else:
outputs = model(batch['input_ids'],
attention_mask=batch['attention_mask'],
token_type_ids=batch['token_type_ids'],
labels=batch['y'])
loss, logits = outputs[:2]
# print(debug_node_info(args), loss, logits, batch['uid'])
# print(debug_node_info(args), loss, batch['uid'])
# Accumulated loss
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
# if this forward step need model updates
# handle fp16
if args.fp16:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
# Gradient clip: if max_grad_norm < 0
if (forward_step + 1) % args.gradient_accumulation_steps == 0:
if args.max_grad_norm > 0:
if args.fp16:
torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm)
else:
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
optimizer.step()
scheduler.step() # Update learning rate schedule
model.zero_grad()
global_step += 1
if args.global_rank in [-1, 0] and args.eval_frequency > 0 and global_step % args.eval_frequency == 0:
r_dict = dict()
# Eval loop:
for i in range(len(eval_data_name)):
cur_eval_data_name = eval_data_name[i]
cur_eval_data_list = eval_data_list[i]
cur_eval_dataloader = eval_data_loaders[i]
# cur_eval_raw_data_list = eval_raw_data_list[i]
evaluation_dataset(args, cur_eval_dataloader, cur_eval_data_list, model, r_dict,
eval_name=cur_eval_data_name)
# saving checkpoints
current_checkpoint_filename = \
f'e({epoch})|i({global_step})'
for i in range(len(eval_data_name)):
cur_eval_data_name = eval_data_name[i]
current_checkpoint_filename += \
f'|{cur_eval_data_name}#({round(r_dict[cur_eval_data_name]["acc"], 4)})'
if not args.debug_mode:
# save model:
model_output_dir = checkpoints_path / current_checkpoint_filename
if not model_output_dir.exists():
model_output_dir.mkdir()
model_to_save = (
model.module if hasattr(model, "module") else model
) # Take care of distributed/parallel training
torch.save(model_to_save.state_dict(), str(model_output_dir / "model.pt"))
torch.save(optimizer.state_dict(), str(model_output_dir / "optimizer.pt"))
torch.save(scheduler.state_dict(), str(model_output_dir / "scheduler.pt"))
# save prediction:
if not args.debug_mode and args.save_prediction:
cur_results_path = prediction_path / current_checkpoint_filename
if not cur_results_path.exists():
cur_results_path.mkdir(parents=True)
for key, item in r_dict.items():
common.save_jsonl(item['predictions'], cur_results_path / f"{key}.jsonl")
# avoid saving too many things
for key, item in r_dict.items():
del r_dict[key]['predictions']
common.save_json(r_dict, cur_results_path / "results_dict.json", indent=2)
if args.total_step > 0 and global_step == t_total:
# if we set total step and global step s t_total.
is_finished = True
break
# End of epoch evaluation.
if args.global_rank in [-1, 0] and args.total_step <= 0:
r_dict = dict()
# Eval loop:
for i in range(len(eval_data_name)):
cur_eval_data_name = eval_data_name[i]
cur_eval_data_list = eval_data_list[i]
cur_eval_dataloader = eval_data_loaders[i]
# cur_eval_raw_data_list = eval_raw_data_list[i]
evaluation_dataset(args, cur_eval_dataloader, cur_eval_data_list, model, r_dict,
eval_name=cur_eval_data_name)
# saving checkpoints
current_checkpoint_filename = \
f'e({epoch})|i({global_step})'
for i in range(len(eval_data_name)):
cur_eval_data_name = eval_data_name[i]
current_checkpoint_filename += \
f'|{cur_eval_data_name}#({round(r_dict[cur_eval_data_name]["acc"], 4)})'
if not args.debug_mode:
# save model:
model_output_dir = checkpoints_path / current_checkpoint_filename
if not model_output_dir.exists():
model_output_dir.mkdir()
model_to_save = (
model.module if hasattr(model, "module") else model
) # Take care of distributed/parallel training
torch.save(model_to_save.state_dict(), str(model_output_dir / "model.pt"))
torch.save(optimizer.state_dict(), str(model_output_dir / "optimizer.pt"))
torch.save(scheduler.state_dict(), str(model_output_dir / "scheduler.pt"))
# save prediction:
if not args.debug_mode and args.save_prediction:
cur_results_path = prediction_path / current_checkpoint_filename
if not cur_results_path.exists():
cur_results_path.mkdir(parents=True)
for key, item in r_dict.items():
common.save_jsonl(item['predictions'], cur_results_path / f"{key}.jsonl")
# avoid saving too many things
for key, item in r_dict.items():
del r_dict[key]['predictions']
common.save_json(r_dict, cur_results_path / "results_dict.json", indent=2)
if is_finished:
break
id2label = {
0: 'e',
1: 'n',
2: 'c',
-1: '-',
}
def count_acc(gt_list, pred_list):
assert len(gt_list) == len(pred_list)
gt_dict = list_dict_data_tool.list_to_dict(gt_list, 'uid')
pred_list = list_dict_data_tool.list_to_dict(pred_list, 'uid')
total_count = 0
hit = 0
for key, value in pred_list.items():
if gt_dict[key]['label'] == value['predicted_label']:
hit += 1
total_count += 1
return hit, total_count
def evaluation_dataset(args, eval_dataloader, eval_list, model, r_dict, eval_name):
# r_dict = dict()
pred_output_list = eval_model(model, eval_dataloader, args.global_rank, args)
predictions = pred_output_list
hit, total = count_acc(eval_list, pred_output_list)
print(debug_node_info(args), f"{eval_name} Acc:", hit, total, hit / total)
r_dict[f'{eval_name}'] = {
'acc': hit / total,
'correct_count': hit,
'total_count': total,
'predictions': predictions,
}
def eval_model(model, dev_dataloader, device_num, args):
model.eval()
uid_list = []
y_list = []
pred_list = []
logits_list = []
with torch.no_grad():
for i, batch in enumerate(dev_dataloader, 0):
batch = move_to_device(batch, device_num)
if args.model_class_name in ["distilbert", "bart-large"]:
outputs = model(batch['input_ids'],
attention_mask=batch['attention_mask'],
labels=batch['y'])
else:
outputs = model(batch['input_ids'],
attention_mask=batch['attention_mask'],
token_type_ids=batch['token_type_ids'],
labels=batch['y'])
loss, logits = outputs[:2]
uid_list.extend(list(batch['uid']))
y_list.extend(batch['y'].tolist())
pred_list.extend(torch.max(logits, 1)[1].view(logits.size(0)).tolist())
logits_list.extend(logits.tolist())
assert len(pred_list) == len(logits_list)
assert len(pred_list) == len(logits_list)
result_items_list = []
for i in range(len(uid_list)):
r_item = dict()
r_item['uid'] = uid_list[i]
r_item['logits'] = logits_list[i]
r_item['predicted_label'] = id2label[pred_list[i]]
result_items_list.append(r_item)
return result_items_list
def debug_node_info(args):
names = ['global_rank', 'local_rank', 'node_rank']
values = []
for name in names:
if name in args:
values.append(getattr(args, name))
else:
return "Pro:No node info "
return "Pro:" + '|'.join([f"{name}:{value}" for name, value in zip(names, values)]) + "||Print:"
if __name__ == '__main__':
main() | anli-main | src/nli/training.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under Creative Commons-Non Commercial 4.0 found in the
# LICENSE file in the root directory of this source tree.
import os
from pathlib import Path
import config
from datetime import datetime
from utils import common
class ScoreLogger(object):
def __init__(self, init_tracking_dict) -> None:
super().__init__()
self.logging_item_list = []
self.score_tracker = dict()
self.score_tracker.update(init_tracking_dict)
def incorporate_results(self, score_dict, save_key, item=None) -> bool:
assert len(score_dict.keys()) == len(self.score_tracker.keys())
for fieldname in score_dict.keys():
assert fieldname in self.score_tracker
valid_improvement = False
for fieldname, value in score_dict.items():
if score_dict[fieldname] >= self.score_tracker[fieldname]:
self.score_tracker[fieldname] = score_dict[fieldname]
valid_improvement = True
self.logging_item_list.append({'k': save_key, 'v': item})
return valid_improvement
def logging_to_file(self, filename):
if Path(filename).is_file():
old_logging_list = common.load_json(filename)
current_saved_key = set()
for item in self.logging_item_list:
current_saved_key.add(item['k'])
for item in old_logging_list:
if item['k'] not in current_saved_key:
raise ValueError("Previous logged item can not be found!")
common.save_json(self.logging_item_list, filename, indent=2, sort_keys=True)
def gen_file_prefix(model_name, directory_name='saved_models', date=None):
date_now = datetime.now().strftime("%m-%d-%H:%M:%S") if not date else date
file_path = os.path.join(config.PRO_ROOT / directory_name / '_'.join((date_now, model_name)))
if not os.path.exists(file_path):
os.makedirs(file_path)
return file_path, date_now
def get_cur_time_str():
date_now = datetime.now().strftime("%m-%d[%H:%M:%S]")
return date_now
if __name__ == "__main__":
# print(gen_file_prefix("this_is_my_model."))
# print(get_cur_time_str())
score_logger = ScoreLogger({'a_score': -1, 'b_score': -1})
print(score_logger.incorporate_results({'a_score': 2, 'b_score': -1}, 'key-1', {'a_score': 2, 'b_score': -1}))
print(score_logger.incorporate_results({'a_score': 2, 'b_score': 3}, 'key-2', {'a_score': 2, 'b_score': 3}))
print(score_logger.incorporate_results({'a_score': 2, 'b_score': 4}, 'key-2', {'a_score': 2, 'b_score': 4}))
print(score_logger.incorporate_results({'a_score': 1, 'b_score': 2}, 'key-2', {'a_score': 1, 'b_score': 2}))
print(score_logger.score_tracker)
score_logger.logging_to_file('for_testing.json') | anli-main | src/utils/save_tool.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under Creative Commons-Non Commercial 4.0 found in the
# LICENSE file in the root directory of this source tree. | anli-main | src/utils/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under Creative Commons-Non Commercial 4.0 found in the
# LICENSE file in the root directory of this source tree.
import uuid
def list_to_dict(d_list, key_fields): # '_id' or 'pid'
d_dict = dict()
for item in d_list:
assert key_fields in item
d_dict[item[key_fields]] = item
return d_dict
def dict_to_list(d_dict):
d_list = []
for key, value in d_dict.items():
d_list.append(value)
return d_list
def append_item_from_dict_to_list(d_list, d_dict, key_fieldname, append_fieldnames):
if not isinstance(append_fieldnames, list):
append_fieldnames = [append_fieldnames]
for item in d_list:
key = item[key_fieldname]
if key in d_dict:
for append_fieldname in append_fieldnames:
item[append_fieldname] = d_dict[key][append_fieldname]
else:
print(f"Potential Error: {key} not in scored_dict. Maybe bc all forward items are empty.")
for append_fieldname in append_fieldnames:
item[append_fieldname] = []
return d_list
def append_item_from_dict_to_list_hotpot_style(d_list, d_dict, key_fieldname, append_fieldnames):
if not isinstance(append_fieldnames, list):
append_fieldnames = [append_fieldnames]
for item in d_list:
key = item[key_fieldname]
for append_fieldname in append_fieldnames:
if key in d_dict[append_fieldname]:
item[append_fieldname] = d_dict[append_fieldname][key]
else:
print(f"Potential Error: {key} not in scored_dict. Maybe bc all forward items are empty.")
# for append_fieldname in append_fieldnames:
item[append_fieldname] = []
return d_list
def append_subfield_from_list_to_dict(subf_list, d_dict, o_key_field_name, subfield_key_name,
subfield_name='merged_field', check=False):
# Often times, we will need to split the one data point to multiple items to be feeded into neural networks
# and after we obtain the results we will need to map the results back to original data point with some keys.
# This method is used for this purpose.
# The method can be invoke multiple times, (in practice usually one batch per time.)
"""
:param subf_list: The forward list.
:param d_dict: The dict that contain keys mapping to original data point.
:param o_key_field_name: The fieldname of original data point key. 'pid'
:param subfield_key_name: The fieldname of the sub item. 'fid'
:param subfield_name: The merge field name. 'merged_field'
:param check:
:return:
"""
for key in d_dict.keys():
d_dict[key][subfield_name] = dict()
for item in subf_list:
assert o_key_field_name in item
assert subfield_key_name in item
map_id = item[o_key_field_name]
sub_filed_id = item[subfield_key_name]
assert map_id in d_dict
# if subfield_name not in d_dict[map_id]:
# d_dict[map_id][subfield_name] = dict()
if sub_filed_id not in d_dict[map_id][subfield_name]:
if check:
assert item[o_key_field_name] == map_id
d_dict[map_id][subfield_name][sub_filed_id] = item
else:
print("Duplicate forward item with key:", sub_filed_id)
return d_dict
if __name__ == '__main__':
oitems = []
for i in range(3):
oitems.append({'_id': i})
fitems = []
for item in oitems:
oid = item['_id']
for i in range(int(oid) + 1):
fid = str(uuid.uuid4())
fitems.append({
'oid': oid,
'fid': fid,
})
o_dict = list_to_dict(oitems, '_id')
append_subfield_from_list_to_dict(fitems, o_dict, 'oid', 'fid', check=True)
print(fitems)
print(o_dict)
| anli-main | src/utils/list_dict_data_tool.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under Creative Commons-Non Commercial 4.0 found in the
# LICENSE file in the root directory of this source tree.
import json
from json import JSONEncoder
from tqdm import tqdm
import config
registered_jsonabl_classes = {}
# Some Jsonable classes, for easy json serialization.
def register_class(cls):
global registered_jsonabl_classes
if cls not in registered_jsonabl_classes:
registered_jsonabl_classes.update({cls.__name__: cls})
class JsonableObj(object):
pass
class JsonableObjectEncoder(JSONEncoder):
def default(self, o):
if isinstance(o, JsonableObj):
d = {'_jcls_': type(o).__name__}
d.update(vars(o))
return d
else:
return super().default(o)
def unserialize_JsonableObject(d):
global registered_jsonabl_classes
classname = d.pop('_jcls_', None)
if classname:
cls = registered_jsonabl_classes[classname]
obj = cls.__new__(cls) # Make instance without calling __init__
for key, value in d.items():
setattr(obj, key, value)
return obj
else:
return d
def json_dumps(item):
return json.dumps(item, cls=JsonableObjectEncoder)
def json_loads(item_str):
return json.loads(item_str, object_hook=unserialize_JsonableObject)
# Json Serializable object finished.
def save_jsonl(d_list, filename):
print("Save to Jsonl:", filename)
with open(filename, encoding='utf-8', mode='w') as out_f:
for item in d_list:
out_f.write(json.dumps(item, cls=JsonableObjectEncoder) + '\n')
def load_jsonl(filename, debug_num=None):
d_list = []
with open(filename, encoding='utf-8', mode='r') as in_f:
print("Load Jsonl:", filename)
for line in tqdm(in_f):
item = json.loads(line.strip(), object_hook=unserialize_JsonableObject)
d_list.append(item)
if debug_num is not None and 0 < debug_num == len(d_list):
break
return d_list
def load_json(filename, **kwargs):
with open(filename, encoding='utf-8', mode='r') as in_f:
return json.load(in_f, object_hook=unserialize_JsonableObject, **kwargs)
def save_json(obj, filename, **kwargs):
with open(filename, encoding='utf-8', mode='w') as out_f:
json.dump(obj, out_f, cls=JsonableObjectEncoder, **kwargs)
out_f.close() | anli-main | src/utils/common.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under Creative Commons-Non Commercial 4.0 found in the
# LICENSE file in the root directory of this source tree. | anli-main | src/modeling/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under Creative Commons-Non Commercial 4.0 found in the
# LICENSE file in the root directory of this source tree.
""" PyTorch Dummy XLNet model.
"""
import logging
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from torch.nn import functional as F
from transformers.activations import gelu_new, swish
from transformers.configuration_xlnet import XLNetConfig
from transformers.file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_callable
from transformers.modeling_utils import PoolerAnswerClass, PoolerEndLogits, PoolerStartLogits, PreTrainedModel, SequenceSummary
logger = logging.getLogger(__name__)
_TOKENIZER_FOR_DOC = "XLNetTokenizer"
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST = [
"xlnet-base-cased",
"xlnet-large-cased",
# See all XLNet models at https://huggingface.co/models?filter=xlnet
]
def build_tf_xlnet_to_pytorch_map(model, config, tf_weights=None):
""" A map of modules from TF to PyTorch.
I use a map to keep the PyTorch model as
identical to the original PyTorch model as possible.
"""
tf_to_pt_map = {}
if hasattr(model, "transformer"):
if hasattr(model, "lm_loss"):
# We will load also the output bias
tf_to_pt_map["model/lm_loss/bias"] = model.lm_loss.bias
if hasattr(model, "sequence_summary") and "model/sequnece_summary/summary/kernel" in tf_weights:
# We will load also the sequence summary
tf_to_pt_map["model/sequnece_summary/summary/kernel"] = model.sequence_summary.summary.weight
tf_to_pt_map["model/sequnece_summary/summary/bias"] = model.sequence_summary.summary.bias
if (
hasattr(model, "logits_proj")
and config.finetuning_task is not None
and "model/regression_{}/logit/kernel".format(config.finetuning_task) in tf_weights
):
tf_to_pt_map["model/regression_{}/logit/kernel".format(config.finetuning_task)] = model.logits_proj.weight
tf_to_pt_map["model/regression_{}/logit/bias".format(config.finetuning_task)] = model.logits_proj.bias
# Now load the rest of the transformer
model = model.transformer
# Embeddings and output
tf_to_pt_map.update(
{
"model/transformer/word_embedding/lookup_table": model.word_embedding.weight,
"model/transformer/mask_emb/mask_emb": model.mask_emb,
}
)
# Transformer blocks
for i, b in enumerate(model.layer):
layer_str = "model/transformer/layer_%d/" % i
tf_to_pt_map.update(
{
layer_str + "rel_attn/LayerNorm/gamma": b.rel_attn.layer_norm.weight,
layer_str + "rel_attn/LayerNorm/beta": b.rel_attn.layer_norm.bias,
layer_str + "rel_attn/o/kernel": b.rel_attn.o,
layer_str + "rel_attn/q/kernel": b.rel_attn.q,
layer_str + "rel_attn/k/kernel": b.rel_attn.k,
layer_str + "rel_attn/r/kernel": b.rel_attn.r,
layer_str + "rel_attn/v/kernel": b.rel_attn.v,
layer_str + "ff/LayerNorm/gamma": b.ff.layer_norm.weight,
layer_str + "ff/LayerNorm/beta": b.ff.layer_norm.bias,
layer_str + "ff/layer_1/kernel": b.ff.layer_1.weight,
layer_str + "ff/layer_1/bias": b.ff.layer_1.bias,
layer_str + "ff/layer_2/kernel": b.ff.layer_2.weight,
layer_str + "ff/layer_2/bias": b.ff.layer_2.bias,
}
)
# Relative positioning biases
if config.untie_r:
r_r_list = []
r_w_list = []
r_s_list = []
seg_embed_list = []
for b in model.layer:
r_r_list.append(b.rel_attn.r_r_bias)
r_w_list.append(b.rel_attn.r_w_bias)
r_s_list.append(b.rel_attn.r_s_bias)
seg_embed_list.append(b.rel_attn.seg_embed)
else:
r_r_list = [model.r_r_bias]
r_w_list = [model.r_w_bias]
r_s_list = [model.r_s_bias]
seg_embed_list = [model.seg_embed]
tf_to_pt_map.update(
{
"model/transformer/r_r_bias": r_r_list,
"model/transformer/r_w_bias": r_w_list,
"model/transformer/r_s_bias": r_s_list,
"model/transformer/seg_embed": seg_embed_list,
}
)
return tf_to_pt_map
def load_tf_weights_in_xlnet(model, config, tf_path):
""" Load tf checkpoints in a pytorch model
"""
try:
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
"Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions."
)
raise
# Load weights from TF model
init_vars = tf.train.list_variables(tf_path)
tf_weights = {}
for name, shape in init_vars:
logger.info("Loading TF weight {} with shape {}".format(name, shape))
array = tf.train.load_variable(tf_path, name)
tf_weights[name] = array
# Build TF to PyTorch weights loading map
tf_to_pt_map = build_tf_xlnet_to_pytorch_map(model, config, tf_weights)
for name, pointer in tf_to_pt_map.items():
logger.info("Importing {}".format(name))
if name not in tf_weights:
logger.info("{} not in tf pre-trained weights, skipping".format(name))
continue
array = tf_weights[name]
# adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v
# which are not required for using pretrained model
if "kernel" in name and ("ff" in name or "summary" in name or "logit" in name):
logger.info("Transposing")
array = np.transpose(array)
if isinstance(pointer, list):
# Here we will split the TF weights
assert len(pointer) == array.shape[0]
for i, p_i in enumerate(pointer):
arr_i = array[i, ...]
try:
assert p_i.shape == arr_i.shape
except AssertionError as e:
e.args += (p_i.shape, arr_i.shape)
raise
logger.info("Initialize PyTorch weight {} for layer {}".format(name, i))
p_i.data = torch.from_numpy(arr_i)
else:
try:
assert pointer.shape == array.shape
except AssertionError as e:
e.args += (pointer.shape, array.shape)
raise
logger.info("Initialize PyTorch weight {}".format(name))
pointer.data = torch.from_numpy(array)
tf_weights.pop(name, None)
tf_weights.pop(name + "/Adam", None)
tf_weights.pop(name + "/Adam_1", None)
logger.info("Weights not copied to PyTorch model: {}".format(", ".join(tf_weights.keys())))
return model
ACT2FN = {"gelu": gelu_new, "relu": torch.nn.functional.relu, "swish": swish}
XLNetLayerNorm = nn.LayerNorm
class XLNetRelativeAttention(nn.Module):
def __init__(self, config):
super().__init__()
if config.d_model % config.n_head != 0:
raise ValueError(
"The hidden size (%d) is not a multiple of the number of attention "
"heads (%d)" % (config.d_model, config.n_head)
)
self.n_head = config.n_head
self.d_head = config.d_head
self.d_model = config.d_model
self.scale = 1 / (config.d_head ** 0.5)
self.q = nn.Parameter(torch.FloatTensor(config.d_model, self.n_head, self.d_head))
self.k = nn.Parameter(torch.FloatTensor(config.d_model, self.n_head, self.d_head))
self.v = nn.Parameter(torch.FloatTensor(config.d_model, self.n_head, self.d_head))
self.o = nn.Parameter(torch.FloatTensor(config.d_model, self.n_head, self.d_head))
self.r = nn.Parameter(torch.FloatTensor(config.d_model, self.n_head, self.d_head))
self.r_r_bias = nn.Parameter(torch.FloatTensor(self.n_head, self.d_head))
self.r_s_bias = nn.Parameter(torch.FloatTensor(self.n_head, self.d_head))
self.r_w_bias = nn.Parameter(torch.FloatTensor(self.n_head, self.d_head))
self.seg_embed = nn.Parameter(torch.FloatTensor(2, self.n_head, self.d_head))
self.layer_norm = XLNetLayerNorm(config.d_model, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.dropout)
def prune_heads(self, heads):
raise NotImplementedError
@staticmethod
def rel_shift(x, klen=-1):
"""perform relative shift to form the relative attention score."""
x_size = x.shape
x = x.reshape(x_size[1], x_size[0], x_size[2], x_size[3])
x = x[1:, ...]
x = x.reshape(x_size[0], x_size[1] - 1, x_size[2], x_size[3])
# x = x[:, 0:klen, :, :]
x = torch.index_select(x, 1, torch.arange(klen, device=x.device, dtype=torch.long))
return x
@staticmethod
def rel_shift_bnij(x, klen=-1):
x_size = x.shape
x = x.reshape(x_size[0], x_size[1], x_size[3], x_size[2])
x = x[:, :, 1:, :]
x = x.reshape(x_size[0], x_size[1], x_size[2], x_size[3] - 1)
# Note: the tensor-slice form was faster in my testing than torch.index_select
# However, tracing doesn't like the nature of the slice, and if klen changes
# during the run then it'll fail, whereas index_select will be fine.
x = torch.index_select(x, 3, torch.arange(klen, device=x.device, dtype=torch.long))
# x = x[:, :, :, :klen]
return x
def rel_attn_core(
self,
q_head,
k_head_h,
v_head_h,
k_head_r,
seg_mat=None,
attn_mask=None,
head_mask=None,
output_attentions=False,
):
"""Core relative positional attention operations."""
# content based attention score
ac = torch.einsum("ibnd,jbnd->bnij", q_head + self.r_w_bias, k_head_h)
# position based attention score
bd = torch.einsum("ibnd,jbnd->bnij", q_head + self.r_r_bias, k_head_r)
bd = self.rel_shift_bnij(bd, klen=ac.shape[3])
# segment based attention score
if seg_mat is None:
ef = 0
else:
ef = torch.einsum("ibnd,snd->ibns", q_head + self.r_s_bias, self.seg_embed)
ef = torch.einsum("ijbs,ibns->bnij", seg_mat, ef)
# merge attention scores and perform masking
attn_score = (ac + bd + ef) * self.scale
if attn_mask is not None:
# attn_score = attn_score * (1 - attn_mask) - 1e30 * attn_mask
if attn_mask.dtype == torch.float16:
attn_score = attn_score - 65500 * torch.einsum("ijbn->bnij", attn_mask)
else:
attn_score = attn_score - 1e30 * torch.einsum("ijbn->bnij", attn_mask)
# attention probability
attn_prob = F.softmax(attn_score, dim=3)
attn_prob = self.dropout(attn_prob)
# Mask heads if we want to
if head_mask is not None:
attn_prob = attn_prob * torch.einsum("ijbn->bnij", head_mask)
# attention output
attn_vec = torch.einsum("bnij,jbnd->ibnd", attn_prob, v_head_h)
if output_attentions:
return attn_vec, torch.einsum("bnij->ijbn", attn_prob)
return attn_vec
def post_attention(self, h, attn_vec, residual=True):
"""Post-attention processing."""
# post-attention projection (back to `d_model`)
attn_out = torch.einsum("ibnd,hnd->ibh", attn_vec, self.o)
attn_out = self.dropout(attn_out)
if residual:
attn_out = attn_out + h
output = self.layer_norm(attn_out)
return output
def forward(
self,
h,
g,
attn_mask_h,
attn_mask_g,
r,
seg_mat,
mems=None,
target_mapping=None,
head_mask=None,
output_attentions=False,
):
if g is not None:
# Two-stream attention with relative positional encoding.
# content based attention score
if mems is not None and mems.dim() > 1:
cat = torch.cat([mems, h], dim=0)
else:
cat = h
# content-based key head
k_head_h = torch.einsum("ibh,hnd->ibnd", cat, self.k)
# content-based value head
v_head_h = torch.einsum("ibh,hnd->ibnd", cat, self.v)
# position-based key head
k_head_r = torch.einsum("ibh,hnd->ibnd", r, self.r)
# h-stream
# content-stream query head
q_head_h = torch.einsum("ibh,hnd->ibnd", h, self.q)
# core attention ops
attn_vec_h = self.rel_attn_core(
q_head_h,
k_head_h,
v_head_h,
k_head_r,
seg_mat=seg_mat,
attn_mask=attn_mask_h,
head_mask=head_mask,
output_attentions=output_attentions,
)
if output_attentions:
attn_vec_h, attn_prob_h = attn_vec_h
# post processing
output_h = self.post_attention(h, attn_vec_h)
# g-stream
# query-stream query head
q_head_g = torch.einsum("ibh,hnd->ibnd", g, self.q)
# core attention ops
if target_mapping is not None:
q_head_g = torch.einsum("mbnd,mlb->lbnd", q_head_g, target_mapping)
attn_vec_g = self.rel_attn_core(
q_head_g,
k_head_h,
v_head_h,
k_head_r,
seg_mat=seg_mat,
attn_mask=attn_mask_g,
head_mask=head_mask,
output_attentions=output_attentions,
)
if output_attentions:
attn_vec_g, attn_prob_g = attn_vec_g
attn_vec_g = torch.einsum("lbnd,mlb->mbnd", attn_vec_g, target_mapping)
else:
attn_vec_g = self.rel_attn_core(
q_head_g,
k_head_h,
v_head_h,
k_head_r,
seg_mat=seg_mat,
attn_mask=attn_mask_g,
head_mask=head_mask,
output_attentions=output_attentions,
)
if output_attentions:
attn_vec_g, attn_prob_g = attn_vec_g
# post processing
output_g = self.post_attention(g, attn_vec_g)
if output_attentions:
attn_prob = attn_prob_h, attn_prob_g
else:
# Multi-head attention with relative positional encoding
if mems is not None and mems.dim() > 1:
cat = torch.cat([mems, h], dim=0)
else:
cat = h
# content heads
q_head_h = torch.einsum("ibh,hnd->ibnd", h, self.q)
k_head_h = torch.einsum("ibh,hnd->ibnd", cat, self.k)
v_head_h = torch.einsum("ibh,hnd->ibnd", cat, self.v)
# positional heads
k_head_r = torch.einsum("ibh,hnd->ibnd", r, self.r)
# core attention ops
attn_vec = self.rel_attn_core(
q_head_h,
k_head_h,
v_head_h,
k_head_r,
seg_mat=seg_mat,
attn_mask=attn_mask_h,
head_mask=head_mask,
output_attentions=output_attentions,
)
if output_attentions:
attn_vec, attn_prob = attn_vec
# post processing
output_h = self.post_attention(h, attn_vec)
output_g = None
outputs = (output_h, output_g)
if output_attentions:
outputs = outputs + (attn_prob,)
return outputs
class XLNetFeedForward(nn.Module):
def __init__(self, config):
super().__init__()
self.layer_norm = XLNetLayerNorm(config.d_model, eps=config.layer_norm_eps)
self.layer_1 = nn.Linear(config.d_model, config.d_inner)
self.layer_2 = nn.Linear(config.d_inner, config.d_model)
self.dropout = nn.Dropout(config.dropout)
if isinstance(config.ff_activation, str):
self.activation_function = ACT2FN[config.ff_activation]
else:
self.activation_function = config.ff_activation
def forward(self, inp):
output = inp
output = self.layer_1(output)
output = self.activation_function(output)
output = self.dropout(output)
output = self.layer_2(output)
output = self.dropout(output)
output = self.layer_norm(output + inp)
return output
class XLNetLayer(nn.Module):
def __init__(self, config):
super().__init__()
self.rel_attn = XLNetRelativeAttention(config)
self.ff = XLNetFeedForward(config)
self.dropout = nn.Dropout(config.dropout)
def forward(
self,
output_h,
output_g,
attn_mask_h,
attn_mask_g,
r,
seg_mat,
mems=None,
target_mapping=None,
head_mask=None,
output_attentions=False,
):
outputs = self.rel_attn(
output_h,
output_g,
attn_mask_h,
attn_mask_g,
r,
seg_mat,
mems=mems,
target_mapping=target_mapping,
head_mask=head_mask,
output_attentions=output_attentions,
)
output_h, output_g = outputs[:2]
if output_g is not None:
output_g = self.ff(output_g)
output_h = self.ff(output_h)
outputs = (output_h, output_g) + outputs[2:] # Add again attentions if there are there
return outputs
class XLNetPreTrainedModel(PreTrainedModel):
""" An abstract class to handle weights initialization and
a simple interface for downloading and loading pretrained models.
"""
config_class = XLNetConfig
load_tf_weights = load_tf_weights_in_xlnet
base_model_prefix = "transformer"
def _init_weights(self, module):
""" Initialize the weights.
"""
if isinstance(module, (nn.Linear, nn.Embedding)):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, XLNetLayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
elif isinstance(module, XLNetRelativeAttention):
for param in [
module.q,
module.k,
module.v,
module.o,
module.r,
module.r_r_bias,
module.r_s_bias,
module.r_w_bias,
module.seg_embed,
]:
param.data.normal_(mean=0.0, std=self.config.initializer_range)
elif isinstance(module, XLNetModel):
module.mask_emb.data.normal_(mean=0.0, std=self.config.initializer_range)
XLNET_START_DOCSTRING = r"""
This model is a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`_ sub-class.
Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general
usage and behavior.
Parameters:
config (:class:`~transformers.XLNetConfig`): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the configuration.
Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights.
"""
XLNET_INPUTS_DOCSTRING = r"""
Args:
input_ids (:obj:`torch.LongTensor` of shape :obj:`{0}`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using :class:`transformers.BertTokenizer`.
See :func:`transformers.PreTrainedTokenizer.encode` and
:func:`transformers.PreTrainedTokenizer.__call__` for details.
`What are input IDs? <../glossary.html#input-ids>`__
attention_mask (:obj:`torch.FloatTensor` of shape :obj:`{0}`, `optional`, defaults to :obj:`None`):
Mask to avoid performing attention on padding token indices.
Mask values selected in ``[0, 1]``:
``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens.
`What are attention masks? <../glossary.html#attention-mask>`__
mems (:obj:`List[torch.FloatTensor]` of length :obj:`config.n_layers`):
Contains pre-computed hidden-states (key and values in the attention blocks) as computed by the model
(see `mems` output below). Can be used to speed up sequential decoding. The token ids which have their mems
given to this model should not be passed as input ids as they have already been computed.
`use_cache` has to be set to `True` to make use of `mems`.
perm_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, sequence_length)`, `optional`, defaults to :obj:`None`):
Mask to indicate the attention pattern for each input token with values selected in ``[0, 1]``:
If ``perm_mask[k, i, j] = 0``, i attend to j in batch k;
if ``perm_mask[k, i, j] = 1``, i does not attend to j in batch k.
If None, each token attends to all the others (full bidirectional attention).
Only used during pretraining (to define factorization order) or for sequential decoding (generation).
target_mapping (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, num_predict, sequence_length)`, `optional`, defaults to :obj:`None`):
Mask to indicate the output tokens to use.
If ``target_mapping[k, i, j] = 1``, the i-th predict in batch k is on the j-th token.
Only used during pretraining for partial prediction or for sequential decoding (generation).
token_type_ids (:obj:`torch.LongTensor` of shape :obj:`{0}`, `optional`, defaults to :obj:`None`):
Segment token indices to indicate first and second portions of the inputs.
Indices are selected in ``[0, 1]``: ``0`` corresponds to a `sentence A` token, ``1``
corresponds to a `sentence B` token. The classifier token should be represented by a ``2``.
`What are token type IDs? <../glossary.html#token-type-ids>`_
input_mask (:obj:`torch.FloatTensor` of shape :obj:`{0}`, `optional`, defaults to :obj:`None`):
Mask to avoid performing attention on padding token indices.
Negative of `attention_mask`, i.e. with 0 for real tokens and 1 for padding.
Kept for compatibility with the original code base.
You can only uses one of `input_mask` and `attention_mask`
Mask values selected in ``[0, 1]``:
``1`` for tokens that are MASKED, ``0`` for tokens that are NOT MASKED.
head_mask (:obj:`torch.FloatTensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`, defaults to :obj:`None`):
Mask to nullify selected heads of the self-attention modules.
Mask values selected in ``[0, 1]``:
:obj:`1` indicates the head is **not masked**, :obj:`0` indicates the head is **masked**.
inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`, defaults to :obj:`None`):
Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert `input_ids` indices into associated vectors
than the model's internal embedding lookup matrix.
use_cache (:obj:`bool`):
If `use_cache` is True, `mems` are returned and can be used to speed up decoding (see `mems`). Defaults to `True`.
output_attentions (:obj:`bool`, `optional`, defaults to :obj:`None`):
If set to ``True``, the attentions tensors of all attention layers are returned. See ``attentions`` under returned tensors for more detail.
"""
@add_start_docstrings(
"The bare XLNet Model transformer outputting raw hidden-states without any specific head on top.",
XLNET_START_DOCSTRING,
)
class XLNetModel(XLNetPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.mem_len = config.mem_len
self.reuse_len = config.reuse_len
self.d_model = config.d_model
self.same_length = config.same_length
self.attn_type = config.attn_type
self.bi_data = config.bi_data
self.clamp_len = config.clamp_len
self.n_layer = config.n_layer
self.word_embedding = nn.Embedding(config.vocab_size, config.d_model)
self.mask_emb = nn.Parameter(torch.FloatTensor(1, 1, config.d_model))
self.layer = nn.ModuleList([XLNetLayer(config) for _ in range(config.n_layer)])
self.dropout = nn.Dropout(config.dropout)
self.init_weights()
def get_input_embeddings(self):
return self.word_embedding
def set_input_embeddings(self, new_embeddings):
self.word_embedding = new_embeddings
def _prune_heads(self, heads_to_prune):
raise NotImplementedError
def create_mask(self, qlen, mlen):
"""
Creates causal attention mask. Float mask where 1.0 indicates masked, 0.0 indicates not-masked.
Args:
qlen: Sequence length
mlen: Mask length
::
same_length=False: same_length=True:
<mlen > < qlen > <mlen > < qlen >
^ [0 0 0 0 0 1 1 1 1] [0 0 0 0 0 1 1 1 1]
[0 0 0 0 0 0 1 1 1] [1 0 0 0 0 0 1 1 1]
qlen [0 0 0 0 0 0 0 1 1] [1 1 0 0 0 0 0 1 1]
[0 0 0 0 0 0 0 0 1] [1 1 1 0 0 0 0 0 1]
v [0 0 0 0 0 0 0 0 0] [1 1 1 1 0 0 0 0 0]
"""
attn_mask = torch.ones([qlen, qlen])
mask_up = torch.triu(attn_mask, diagonal=1)
attn_mask_pad = torch.zeros([qlen, mlen])
ret = torch.cat([attn_mask_pad, mask_up], dim=1)
if self.same_length:
mask_lo = torch.tril(attn_mask, diagonal=-1)
ret = torch.cat([ret[:, :qlen] + mask_lo, ret[:, qlen:]], dim=1)
ret = ret.to(self.device)
return ret
def cache_mem(self, curr_out, prev_mem):
# cache hidden states into memory.
if self.reuse_len is not None and self.reuse_len > 0:
curr_out = curr_out[: self.reuse_len]
if prev_mem is None:
new_mem = curr_out[-self.mem_len :]
else:
new_mem = torch.cat([prev_mem, curr_out], dim=0)[-self.mem_len :]
return new_mem.detach()
@staticmethod
def positional_embedding(pos_seq, inv_freq, bsz=None):
sinusoid_inp = torch.einsum("i,d->id", pos_seq, inv_freq)
pos_emb = torch.cat([torch.sin(sinusoid_inp), torch.cos(sinusoid_inp)], dim=-1)
pos_emb = pos_emb[:, None, :]
if bsz is not None:
pos_emb = pos_emb.expand(-1, bsz, -1)
return pos_emb
def relative_positional_encoding(self, qlen, klen, bsz=None):
# create relative positional encoding.
freq_seq = torch.arange(0, self.d_model, 2.0, dtype=torch.float)
inv_freq = 1 / torch.pow(10000, (freq_seq / self.d_model))
if self.attn_type == "bi":
# beg, end = klen - 1, -qlen
beg, end = klen, -qlen
elif self.attn_type == "uni":
# beg, end = klen - 1, -1
beg, end = klen, -1
else:
raise ValueError("Unknown `attn_type` {}.".format(self.attn_type))
if self.bi_data:
fwd_pos_seq = torch.arange(beg, end, -1.0, dtype=torch.float)
bwd_pos_seq = torch.arange(-beg, -end, 1.0, dtype=torch.float)
if self.clamp_len > 0:
fwd_pos_seq = fwd_pos_seq.clamp(-self.clamp_len, self.clamp_len)
bwd_pos_seq = bwd_pos_seq.clamp(-self.clamp_len, self.clamp_len)
if bsz is not None:
fwd_pos_emb = self.positional_embedding(fwd_pos_seq, inv_freq, bsz // 2)
bwd_pos_emb = self.positional_embedding(bwd_pos_seq, inv_freq, bsz // 2)
else:
fwd_pos_emb = self.positional_embedding(fwd_pos_seq, inv_freq)
bwd_pos_emb = self.positional_embedding(bwd_pos_seq, inv_freq)
pos_emb = torch.cat([fwd_pos_emb, bwd_pos_emb], dim=1)
else:
fwd_pos_seq = torch.arange(beg, end, -1.0)
if self.clamp_len > 0:
fwd_pos_seq = fwd_pos_seq.clamp(-self.clamp_len, self.clamp_len)
pos_emb = self.positional_embedding(fwd_pos_seq, inv_freq, bsz)
pos_emb = pos_emb.to(self.device)
return pos_emb
@add_start_docstrings_to_callable(XLNET_INPUTS_DOCSTRING.format("(batch_size, sequence_length)"))
@add_code_sample_docstrings(tokenizer_class=_TOKENIZER_FOR_DOC, checkpoint="xlnet-base-cased")
def forward(
self,
input_ids=None,
attention_mask=None,
mems=None,
perm_mask=None,
target_mapping=None,
token_type_ids=None,
input_mask=None,
head_mask=None,
inputs_embeds=None,
use_cache=True,
output_attentions=None,
output_hidden_states=None,
):
r"""
Return:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.XLNetConfig`) and inputs:
last_hidden_state (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, num_predict, hidden_size)`):
Sequence of hidden-states at the last layer of the model.
`num_predict` corresponds to `target_mapping.shape[1]`. If `target_mapping` is `None`, then `num_predict` corresponds to `sequence_length`.
mems (:obj:`List[torch.FloatTensor]` of length :obj:`config.n_layers`):
Contains pre-computed hidden-states (key and values in the attention blocks).
Can be used (see `mems` input) to speed up sequential decoding. The token ids which have their past given to this model
should not be passed as input ids as they have already been computed.
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
# the original code for XLNet uses shapes [len, bsz] with the batch dimension at the end
# but we want a unified interface in the library with the batch size on the first dimension
# so we move here the first dimension (batch) to the end
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_ids = input_ids.transpose(0, 1).contiguous()
qlen, bsz = input_ids.shape[0], input_ids.shape[1]
elif inputs_embeds is not None:
inputs_embeds = inputs_embeds.transpose(0, 1).contiguous()
qlen, bsz = inputs_embeds.shape[0], inputs_embeds.shape[1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
token_type_ids = token_type_ids.transpose(0, 1).contiguous() if token_type_ids is not None else None
input_mask = input_mask.transpose(0, 1).contiguous() if input_mask is not None else None
attention_mask = attention_mask.transpose(0, 1).contiguous() if attention_mask is not None else None
perm_mask = perm_mask.permute(1, 2, 0).contiguous() if perm_mask is not None else None
target_mapping = target_mapping.permute(1, 2, 0).contiguous() if target_mapping is not None else None
mlen = mems[0].shape[0] if mems is not None and mems[0] is not None else 0
klen = mlen + qlen
dtype_float = self.dtype
device = self.device
# Attention mask
# causal attention mask
if self.attn_type == "uni":
attn_mask = self.create_mask(qlen, mlen)
attn_mask = attn_mask[:, :, None, None]
elif self.attn_type == "bi":
attn_mask = None
else:
raise ValueError("Unsupported attention type: {}".format(self.attn_type))
# data mask: input mask & perm mask
assert input_mask is None or attention_mask is None, "You can only use one of input_mask (uses 1 for padding) "
"or attention_mask (uses 0 for padding, added for compatbility with BERT). Please choose one."
if input_mask is None and attention_mask is not None:
input_mask = 1.0 - attention_mask
if input_mask is not None and perm_mask is not None:
data_mask = input_mask[None] + perm_mask
elif input_mask is not None and perm_mask is None:
data_mask = input_mask[None]
elif input_mask is None and perm_mask is not None:
data_mask = perm_mask
else:
data_mask = None
if data_mask is not None:
# all mems can be attended to
if mlen > 0:
mems_mask = torch.zeros([data_mask.shape[0], mlen, bsz]).to(data_mask)
data_mask = torch.cat([mems_mask, data_mask], dim=1)
if attn_mask is None:
attn_mask = data_mask[:, :, :, None]
else:
attn_mask += data_mask[:, :, :, None]
if attn_mask is not None:
attn_mask = (attn_mask > 0).to(dtype_float)
if attn_mask is not None:
non_tgt_mask = -torch.eye(qlen).to(attn_mask)
if mlen > 0:
non_tgt_mask = torch.cat([torch.zeros([qlen, mlen]).to(attn_mask), non_tgt_mask], dim=-1)
non_tgt_mask = ((attn_mask + non_tgt_mask[:, :, None, None]) > 0).to(attn_mask)
else:
non_tgt_mask = None
# Word embeddings and prepare h & g hidden states
if inputs_embeds is not None:
word_emb_k = inputs_embeds
else:
# Important
# input_id is input_ids.transpose(0, 1).contiguous()
input_ids = input_ids.transpose(0, 1)
word_emb_k = self.word_embedding(input_ids)
# reverse the function
word_emb_k = word_emb_k.transpose(0, 1)
output_h = self.dropout(word_emb_k)
if target_mapping is not None:
word_emb_q = self.mask_emb.expand(target_mapping.shape[0], bsz, -1)
# else: # We removed the inp_q input which was same as target mapping
# inp_q_ext = inp_q[:, :, None]
# word_emb_q = inp_q_ext * self.mask_emb + (1 - inp_q_ext) * word_emb_k
output_g = self.dropout(word_emb_q)
else:
output_g = None
# Segment embedding
if token_type_ids is not None:
# Convert `token_type_ids` to one-hot `seg_mat`
if mlen > 0:
mem_pad = torch.zeros([mlen, bsz], dtype=torch.long, device=device)
cat_ids = torch.cat([mem_pad, token_type_ids], dim=0)
else:
cat_ids = token_type_ids
# `1` indicates not in the same segment [qlen x klen x bsz]
seg_mat = (token_type_ids[:, None] != cat_ids[None, :]).long()
seg_mat = F.one_hot(seg_mat, num_classes=2).to(dtype_float)
else:
seg_mat = None
# Positional encoding
pos_emb = self.relative_positional_encoding(qlen, klen, bsz=bsz)
pos_emb = self.dropout(pos_emb)
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] (a head_mask for each layer)
# and head_mask is converted to shape [num_hidden_layers x qlen x klen x bsz x n_head]
if head_mask is not None:
if head_mask.dim() == 1:
head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(0).unsqueeze(0)
head_mask = head_mask.expand(self.n_layer, -1, -1, -1, -1)
elif head_mask.dim() == 2:
head_mask = head_mask.unsqueeze(1).unsqueeze(1).unsqueeze(1)
head_mask = head_mask.to(
dtype=next(self.parameters()).dtype
) # switch to fload if need + fp16 compatibility
else:
head_mask = [None] * self.n_layer
new_mems = ()
if mems is None:
mems = [None] * len(self.layer)
attentions = []
hidden_states = []
for i, layer_module in enumerate(self.layer):
if self.mem_len is not None and self.mem_len > 0 and use_cache is True:
# cache new mems
new_mems = new_mems + (self.cache_mem(output_h, mems[i]),)
if output_hidden_states:
hidden_states.append((output_h, output_g) if output_g is not None else output_h)
outputs = layer_module(
output_h,
output_g,
attn_mask_h=non_tgt_mask,
attn_mask_g=attn_mask,
r=pos_emb,
seg_mat=seg_mat,
mems=mems[i],
target_mapping=target_mapping,
head_mask=head_mask[i],
output_attentions=output_attentions,
)
output_h, output_g = outputs[:2]
if output_attentions:
attentions.append(outputs[2])
# Add last hidden state
if output_hidden_states:
hidden_states.append((output_h, output_g) if output_g is not None else output_h)
output = self.dropout(output_g if output_g is not None else output_h)
# Prepare outputs, we transpose back here to shape [bsz, len, hidden_dim] (cf. beginning of forward() method)
outputs = (output.permute(1, 0, 2).contiguous(),)
if self.mem_len is not None and self.mem_len > 0 and use_cache is True:
outputs = outputs + (new_mems,)
if output_hidden_states:
if output_g is not None:
hidden_states = tuple(h.permute(1, 0, 2).contiguous() for hs in hidden_states for h in hs)
else:
hidden_states = tuple(hs.permute(1, 0, 2).contiguous() for hs in hidden_states)
outputs = outputs + (hidden_states,)
if output_attentions:
if target_mapping is not None:
# when target_mapping is provided, there are 2-tuple of attentions
attentions = tuple(
tuple(att_stream.permute(2, 3, 0, 1).contiguous() for att_stream in t) for t in attentions
)
else:
attentions = tuple(t.permute(2, 3, 0, 1).contiguous() for t in attentions)
outputs = outputs + (attentions,)
return outputs # outputs, (new_mems), (hidden_states), (attentions)
@add_start_docstrings(
"""XLNet Model with a language modeling head on top
(linear layer with weights tied to the input embeddings). """,
XLNET_START_DOCSTRING,
)
class XLNetLMHeadModel(XLNetPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.attn_type = config.attn_type
self.same_length = config.same_length
self.transformer = XLNetModel(config)
self.lm_loss = nn.Linear(config.d_model, config.vocab_size, bias=True)
self.init_weights()
def get_output_embeddings(self):
return self.lm_loss
def prepare_inputs_for_generation(self, input_ids, past, **kwargs):
# Add dummy token at the end (no attention on this one)
effective_batch_size = input_ids.shape[0]
dummy_token = torch.zeros((effective_batch_size, 1), dtype=torch.long, device=input_ids.device)
input_ids = torch.cat([input_ids, dummy_token], dim=1)
# Build permutation mask so that previous tokens don't see last token
sequence_length = input_ids.shape[1]
perm_mask = torch.zeros(
(effective_batch_size, sequence_length, sequence_length), dtype=torch.float, device=input_ids.device
)
perm_mask[:, :, -1] = 1.0
# We'll only predict the last token
target_mapping = torch.zeros(
(effective_batch_size, 1, sequence_length), dtype=torch.float, device=input_ids.device
)
target_mapping[0, 0, -1] = 1.0
inputs = {
"input_ids": input_ids,
"perm_mask": perm_mask,
"target_mapping": target_mapping,
"use_cache": kwargs["use_cache"],
}
# if past is defined in model kwargs then use it for faster decoding
if past:
inputs["mems"] = past
return inputs
@add_start_docstrings_to_callable(XLNET_INPUTS_DOCSTRING.format("(batch_size, sequence_length)"))
def forward(
self,
input_ids=None,
attention_mask=None,
mems=None,
perm_mask=None,
target_mapping=None,
token_type_ids=None,
input_mask=None,
head_mask=None,
inputs_embeds=None,
use_cache=True,
labels=None,
output_attentions=None,
output_hidden_states=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, num_predict)`, `optional`, defaults to :obj:`None`):
Labels for masked language modeling.
`num_predict` corresponds to `target_mapping.shape[1]`. If `target_mapping` is `None`, then `num_predict` corresponds to `sequence_length`.
The labels should correspond to the masked input words that should be predicted and depends on `target_mapping`. Note in order to perform standard auto-regressive language modeling a `<mask>` token has to be added to the `input_ids` (see `prepare_inputs_for_generation` fn and examples below)
Indices are selected in ``[-100, 0, ..., config.vocab_size]``
All labels set to ``-100`` are ignored, the loss is only
computed for labels in ``[0, ..., config.vocab_size]``
Return:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.XLNetConfig`) and inputs:
loss (:obj:`torch.FloatTensor` of shape `(1,)`, `optional`, returned when ``labels`` is provided)
Language modeling loss.
prediction_scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, num_predict, config.vocab_size)`):
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
`num_predict` corresponds to `target_mapping.shape[1]`. If `target_mapping` is `None`, then `num_predict` corresponds to `sequence_length`.
mems (:obj:`List[torch.FloatTensor]` of length :obj:`config.n_layers`):
Contains pre-computed hidden-states (key and values in the attention blocks).
Can be used (see `past` input) to speed up sequential decoding. The token ids which have their past given to this model
should not be passed as input ids as they have already been computed.
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
Examples::
from transformers import XLNetTokenizer, XLNetLMHeadModel
import torch
tokenizer = XLNetTokenizer.from_pretrained('xlnet-large-cased')
model = XLNetLMHeadModel.from_pretrained('xlnet-large-cased')
# We show how to setup inputs to predict a next token using a bi-directional context.
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is very <mask>", add_special_tokens=False)).unsqueeze(0) # We will predict the masked token
perm_mask = torch.zeros((1, input_ids.shape[1], input_ids.shape[1]), dtype=torch.float)
perm_mask[:, :, -1] = 1.0 # Previous tokens don't see last token
target_mapping = torch.zeros((1, 1, input_ids.shape[1]), dtype=torch.float) # Shape [1, 1, seq_length] => let's predict one token
target_mapping[0, 0, -1] = 1.0 # Our first (and only) prediction will be the last token of the sequence (the masked token)
outputs = model(input_ids, perm_mask=perm_mask, target_mapping=target_mapping)
next_token_logits = outputs[0] # Output has shape [target_mapping.size(0), target_mapping.size(1), config.vocab_size]
# The same way can the XLNetLMHeadModel be used to be trained by standard auto-regressive language modeling.
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is very <mask>", add_special_tokens=False)).unsqueeze(0) # We will predict the masked token
labels = torch.tensor(tokenizer.encode("cute", add_special_tokens=False)).unsqueeze(0)
assert labels.shape[0] == 1, 'only one word will be predicted'
perm_mask = torch.zeros((1, input_ids.shape[1], input_ids.shape[1]), dtype=torch.float)
perm_mask[:, :, -1] = 1.0 # Previous tokens don't see last token as is done in standard auto-regressive lm training
target_mapping = torch.zeros((1, 1, input_ids.shape[1]), dtype=torch.float) # Shape [1, 1, seq_length] => let's predict one token
target_mapping[0, 0, -1] = 1.0 # Our first (and only) prediction will be the last token of the sequence (the masked token)
outputs = model(input_ids, perm_mask=perm_mask, target_mapping=target_mapping, labels=labels)
loss, next_token_logits = outputs[:2] # Output has shape [target_mapping.size(0), target_mapping.size(1), config.vocab_size]
"""
transformer_outputs = self.transformer(
input_ids,
attention_mask=attention_mask,
mems=mems,
perm_mask=perm_mask,
target_mapping=target_mapping,
token_type_ids=token_type_ids,
input_mask=input_mask,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
)
logits = self.lm_loss(transformer_outputs[0])
outputs = (logits,) + transformer_outputs[1:] # Keep mems, hidden states, attentions if there are in it
if labels is not None:
# Flatten the tokens
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, logits.size(-1)), labels.view(-1))
outputs = (loss,) + outputs
return outputs # return (loss), logits, (mems), (hidden states), (attentions)
@add_start_docstrings(
"""XLNet Model with a sequence classification/regression head on top (a linear layer on top of
the pooled output) e.g. for GLUE tasks. """,
XLNET_START_DOCSTRING,
)
class XLNetForSequenceClassification(XLNetPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.transformer = XLNetModel(config)
self.sequence_summary = SequenceSummary(config)
self.logits_proj = nn.Linear(config.d_model, config.num_labels)
self.init_weights()
@add_start_docstrings_to_callable(XLNET_INPUTS_DOCSTRING.format("(batch_size, sequence_length)"))
@add_code_sample_docstrings(tokenizer_class=_TOKENIZER_FOR_DOC, checkpoint="xlnet-base-cased")
def forward(
self,
input_ids=None,
attention_mask=None,
mems=None,
perm_mask=None,
target_mapping=None,
token_type_ids=None,
input_mask=None,
head_mask=None,
inputs_embeds=None,
use_cache=True,
labels=None,
output_attentions=None,
output_hidden_states=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`)
Labels for computing the sequence classification/regression loss.
Indices should be in ``[0, ..., config.num_labels - 1]``.
If ``config.num_labels == 1`` a regression loss is computed (Mean-Square loss),
If ``config.num_labels > 1`` a classification loss is computed (Cross-Entropy).
Return:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.XLNetConfig`) and inputs:
loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`labels` is provided):
Classification (or regression if config.num_labels==1) loss.
logits (:obj:`torch.FloatTensor` of shape :obj:(batch_size, config.num_labels)`):
Classification (or regression if config.num_labels==1) scores (before SoftMax).
mems (:obj:`List[torch.FloatTensor]` of length :obj:`config.n_layers`):
Contains pre-computed hidden-states (key and values in the attention blocks).
Can be used (see `past` input) to speed up sequential decoding. The token ids which have their past given to this model
should not be passed as input ids as they have already been computed.
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
"""
transformer_outputs = self.transformer(
input_ids,
attention_mask=attention_mask,
mems=mems,
perm_mask=perm_mask,
target_mapping=target_mapping,
token_type_ids=token_type_ids,
input_mask=input_mask,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
)
output = transformer_outputs[0]
output = self.sequence_summary(output)
logits = self.logits_proj(output)
outputs = (logits,) + transformer_outputs[1:] # Keep mems, hidden states, attentions if there are in it
if labels is not None:
if self.num_labels == 1:
# We are doing regression
loss_fct = MSELoss()
loss = loss_fct(logits.view(-1), labels.view(-1))
else:
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
outputs = (loss,) + outputs
return outputs # return (loss), logits, (mems), (hidden states), (attentions)
@add_start_docstrings(
"""XLNet Model with a token classification head on top (a linear layer on top of
the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks. """,
XLNET_START_DOCSTRING,
)
class XLNetForTokenClassification(XLNetPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.transformer = XLNetModel(config)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
self.init_weights()
@add_start_docstrings_to_callable(XLNET_INPUTS_DOCSTRING.format("(batch_size, sequence_length)"))
@add_code_sample_docstrings(tokenizer_class=_TOKENIZER_FOR_DOC, checkpoint="xlnet-base-cased")
def forward(
self,
input_ids=None,
attention_mask=None,
mems=None,
perm_mask=None,
target_mapping=None,
token_type_ids=None,
input_mask=None,
head_mask=None,
inputs_embeds=None,
use_cache=True,
labels=None,
output_attentions=None,
output_hidden_states=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`):
Labels for computing the multiple choice classification loss.
Indices should be in ``[0, ..., num_choices]`` where `num_choices` is the size of the second dimension
of the input tensors. (see `input_ids` above)
Return:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.XLNetConfig`) and inputs:
loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`labels` is provided):
Classification loss.
logits (:obj:`torch.FloatTensor` of shape :obj:(batch_size, config.num_labels)`):
Classification scores (before SoftMax).
mems (:obj:`List[torch.FloatTensor]` of length :obj:`config.n_layers`):
Contains pre-computed hidden-states (key and values in the attention blocks).
Can be used (see `past` input) to speed up sequential decoding. The token ids which have their past given to this model
should not be passed as input ids as they have already been computed.
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
"""
outputs = self.transformer(
input_ids,
attention_mask=attention_mask,
mems=mems,
perm_mask=perm_mask,
target_mapping=target_mapping,
token_type_ids=token_type_ids,
input_mask=input_mask,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
)
sequence_output = outputs[0]
logits = self.classifier(sequence_output)
outputs = (logits,) + outputs[1:] # Keep mems, hidden states, attentions if there are in it
if labels is not None:
loss_fct = CrossEntropyLoss()
# Only keep active parts of the loss
if attention_mask is not None:
active_loss = attention_mask.view(-1) == 1
active_logits = logits.view(-1, self.num_labels)
active_labels = torch.where(
active_loss, labels.view(-1), torch.tensor(loss_fct.ignore_index).type_as(labels)
)
loss = loss_fct(active_logits, active_labels)
else:
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
outputs = (loss,) + outputs
return outputs # return (loss), logits, (mems), (hidden states), (attentions)
@add_start_docstrings(
"""XLNet Model with a multiple choice classification head on top (a linear layer on top of
the pooled output and a softmax) e.g. for RACE/SWAG tasks. """,
XLNET_START_DOCSTRING,
)
class XLNetForMultipleChoice(XLNetPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.transformer = XLNetModel(config)
self.sequence_summary = SequenceSummary(config)
self.logits_proj = nn.Linear(config.d_model, 1)
self.init_weights()
@add_start_docstrings_to_callable(XLNET_INPUTS_DOCSTRING.format("(batch_size, num_choices, sequence_length)"))
@add_code_sample_docstrings(tokenizer_class=_TOKENIZER_FOR_DOC, checkpoint="xlnet-base-cased")
def forward(
self,
input_ids=None,
token_type_ids=None,
input_mask=None,
attention_mask=None,
mems=None,
perm_mask=None,
target_mapping=None,
head_mask=None,
inputs_embeds=None,
use_cache=True,
labels=None,
output_attentions=None,
output_hidden_states=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`):
Labels for computing the multiple choice classification loss.
Indices should be in ``[0, ..., num_choices]`` where `num_choices` is the size of the second dimension
of the input tensors. (see `input_ids` above)
Returns:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.XLNetConfig`) and inputs:
loss (:obj:`torch.FloatTensor`` of shape `(1,)`, `optional`, returned when :obj:`labels` is provided):
Classification loss.
classification_scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, num_choices)`):
`num_choices` is the second dimension of the input tensors. (see `input_ids` above).
Classification scores (before SoftMax).
mems (:obj:`List[torch.FloatTensor]` of length :obj:`config.n_layers`):
Contains pre-computed hidden-states (key and values in the attention blocks).
Can be used (see `past` input) to speed up sequential decoding. The token ids which have their past given to this model
should not be passed as input ids as they have already been computed.
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
"""
num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]
flat_input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None
flat_token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None
flat_attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None
flat_input_mask = input_mask.view(-1, input_mask.size(-1)) if input_mask is not None else None
flat_inputs_embeds = (
inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1))
if inputs_embeds is not None
else None
)
transformer_outputs = self.transformer(
flat_input_ids,
token_type_ids=flat_token_type_ids,
input_mask=flat_input_mask,
attention_mask=flat_attention_mask,
mems=mems,
perm_mask=perm_mask,
target_mapping=target_mapping,
head_mask=head_mask,
inputs_embeds=flat_inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
)
output = transformer_outputs[0]
output = self.sequence_summary(output)
logits = self.logits_proj(output)
reshaped_logits = logits.view(-1, num_choices)
outputs = (reshaped_logits,) + transformer_outputs[
1:
] # Keep mems, hidden states, attentions if there are in it
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(reshaped_logits, labels.view(-1))
outputs = (loss,) + outputs
return outputs # return (loss), logits, (mems), (hidden states), (attentions)
@add_start_docstrings(
"""XLNet Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of
the hidden-states output to compute `span start logits` and `span end logits`). """,
XLNET_START_DOCSTRING,
)
class XLNetForQuestionAnsweringSimple(XLNetPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.transformer = XLNetModel(config)
self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
self.init_weights()
@add_start_docstrings_to_callable(XLNET_INPUTS_DOCSTRING.format("(batch_size, sequence_length)"))
@add_code_sample_docstrings(tokenizer_class=_TOKENIZER_FOR_DOC, checkpoint="xlnet-base-cased")
def forward(
self,
input_ids=None,
attention_mask=None,
mems=None,
perm_mask=None,
target_mapping=None,
token_type_ids=None,
input_mask=None,
head_mask=None,
inputs_embeds=None,
use_cache=True,
start_positions=None,
end_positions=None,
output_attentions=None,
output_hidden_states=None,
):
r"""
start_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`):
Labels for position (index) of the start of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (`sequence_length`).
Position outside of the sequence are not taken into account for computing the loss.
end_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`):
Labels for position (index) of the end of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (`sequence_length`).
Position outside of the sequence are not taken into account for computing the loss.
Returns:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.XLNetConfig`) and inputs:
loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`labels` is provided):
Total span extraction loss is the sum of a Cross-Entropy for the start and end positions.
start_scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length,)`):
Span-start scores (before SoftMax).
end_scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length,)`):
Span-end scores (before SoftMax).
mems (:obj:`List[torch.FloatTensor]` of length :obj:`config.n_layers`):
Contains pre-computed hidden-states (key and values in the attention blocks).
Can be used (see `past` input) to speed up sequential decoding. The token ids which have their past given to this model
should not be passed as input ids as they have already been computed.
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
"""
outputs = self.transformer(
input_ids,
attention_mask=attention_mask,
mems=mems,
perm_mask=perm_mask,
target_mapping=target_mapping,
token_type_ids=token_type_ids,
input_mask=input_mask,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
)
sequence_output = outputs[0]
logits = self.qa_outputs(sequence_output)
start_logits, end_logits = logits.split(1, dim=-1)
start_logits = start_logits.squeeze(-1)
end_logits = end_logits.squeeze(-1)
outputs = (start_logits, end_logits,) + outputs[2:]
if start_positions is not None and end_positions is not None:
# If we are on multi-GPU, split add a dimension
if len(start_positions.size()) > 1:
start_positions = start_positions.squeeze(-1)
if len(end_positions.size()) > 1:
end_positions = end_positions.squeeze(-1)
# sometimes the start/end positions are outside our model inputs, we ignore these terms
ignored_index = start_logits.size(1)
start_positions.clamp_(0, ignored_index)
end_positions.clamp_(0, ignored_index)
loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2
outputs = (total_loss,) + outputs
return outputs # (loss), start_logits, end_logits, (mems), (hidden_states), (attentions)
@add_start_docstrings(
"""XLNet Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of
the hidden-states output to compute `span start logits` and `span end logits`). """,
XLNET_START_DOCSTRING,
)
class XLNetForQuestionAnswering(XLNetPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.start_n_top = config.start_n_top
self.end_n_top = config.end_n_top
self.transformer = XLNetModel(config)
self.start_logits = PoolerStartLogits(config)
self.end_logits = PoolerEndLogits(config)
self.answer_class = PoolerAnswerClass(config)
self.init_weights()
@add_start_docstrings_to_callable(XLNET_INPUTS_DOCSTRING.format("(batch_size, sequence_length)"))
def forward(
self,
input_ids=None,
attention_mask=None,
mems=None,
perm_mask=None,
target_mapping=None,
token_type_ids=None,
input_mask=None,
head_mask=None,
inputs_embeds=None,
use_cache=True,
start_positions=None,
end_positions=None,
is_impossible=None,
cls_index=None,
p_mask=None,
output_attentions=None,
output_hidden_states=None,
):
r"""
start_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`):
Labels for position (index) of the start of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (`sequence_length`).
Position outside of the sequence are not taken into account for computing the loss.
end_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`, defaults to :obj:`None`):
Labels for position (index) of the end of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (`sequence_length`).
Position outside of the sequence are not taken into account for computing the loss.
is_impossible (``torch.LongTensor`` of shape ``(batch_size,)``, `optional`, defaults to :obj:`None`):
Labels whether a question has an answer or no answer (SQuAD 2.0)
cls_index (``torch.LongTensor`` of shape ``(batch_size,)``, `optional`, defaults to :obj:`None`):
Labels for position (index) of the classification token to use as input for computing plausibility of the answer.
p_mask (``torch.FloatTensor`` of shape ``(batch_size, sequence_length)``, `optional`, defaults to :obj:`None`):
Optional mask of tokens which can't be in answers (e.g. [CLS], [PAD], ...).
1.0 means token should be masked. 0.0 mean token is not masked.
Returns:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.XLNetConfig`) and inputs:
loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned if both :obj:`start_positions` and :obj:`end_positions` are provided):
Classification loss as the sum of start token, end token (and is_impossible if provided) classification losses.
start_top_log_probs (``torch.FloatTensor`` of shape ``(batch_size, config.start_n_top)``, `optional`, returned if ``start_positions`` or ``end_positions`` is not provided):
Log probabilities for the top config.start_n_top start token possibilities (beam-search).
start_top_index (``torch.LongTensor`` of shape ``(batch_size, config.start_n_top)``, `optional`, returned if ``start_positions`` or ``end_positions`` is not provided):
Indices for the top config.start_n_top start token possibilities (beam-search).
end_top_log_probs (``torch.FloatTensor`` of shape ``(batch_size, config.start_n_top * config.end_n_top)``, `optional`, returned if ``start_positions`` or ``end_positions`` is not provided):
Log probabilities for the top ``config.start_n_top * config.end_n_top`` end token possibilities (beam-search).
end_top_index (``torch.LongTensor`` of shape ``(batch_size, config.start_n_top * config.end_n_top)``, `optional`, returned if ``start_positions`` or ``end_positions`` is not provided):
Indices for the top ``config.start_n_top * config.end_n_top`` end token possibilities (beam-search).
cls_logits (``torch.FloatTensor`` of shape ``(batch_size,)``, `optional`, returned if ``start_positions`` or ``end_positions`` is not provided):
Log probabilities for the ``is_impossible`` label of the answers.
mems (:obj:`List[torch.FloatTensor]` of length :obj:`config.n_layers`):
Contains pre-computed hidden-states (key and values in the attention blocks).
Can be used (see `past` input) to speed up sequential decoding. The token ids which have their past given to this model
should not be passed as input ids as they have already been computed.
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
Example::
>>> from transformers import XLNetTokenizer, XLNetForQuestionAnswering
>>> import torch
>>> tokenizer = XLNetTokenizer.from_pretrained('xlnet-base-cased')
>>> model = XLNetForQuestionAnswering.from_pretrained('xlnet-base-cased')
>>> input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True)).unsqueeze(0) # Batch size 1
>>> start_positions = torch.tensor([1])
>>> end_positions = torch.tensor([3])
>>> outputs = model(input_ids, start_positions=start_positions, end_positions=end_positions)
>>> loss = outputs[0]
"""
transformer_outputs = self.transformer(
input_ids,
attention_mask=attention_mask,
mems=mems,
perm_mask=perm_mask,
target_mapping=target_mapping,
token_type_ids=token_type_ids,
input_mask=input_mask,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
)
hidden_states = transformer_outputs[0]
start_logits = self.start_logits(hidden_states, p_mask=p_mask)
outputs = transformer_outputs[1:] # Keep mems, hidden states, attentions if there are in it
if start_positions is not None and end_positions is not None:
# If we are on multi-GPU, let's remove the dimension added by batch splitting
for x in (start_positions, end_positions, cls_index, is_impossible):
if x is not None and x.dim() > 1:
x.squeeze_(-1)
# during training, compute the end logits based on the ground truth of the start position
end_logits = self.end_logits(hidden_states, start_positions=start_positions, p_mask=p_mask)
loss_fct = CrossEntropyLoss()
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2
if cls_index is not None and is_impossible is not None:
# Predict answerability from the representation of CLS and START
cls_logits = self.answer_class(hidden_states, start_positions=start_positions, cls_index=cls_index)
loss_fct_cls = nn.BCEWithLogitsLoss()
cls_loss = loss_fct_cls(cls_logits, is_impossible)
# note(zhiliny): by default multiply the loss by 0.5 so that the scale is comparable to start_loss and end_loss
total_loss += cls_loss * 0.5
outputs = (total_loss,) + outputs
else:
# during inference, compute the end logits based on beam search
bsz, slen, hsz = hidden_states.size()
start_log_probs = F.softmax(start_logits, dim=-1) # shape (bsz, slen)
start_top_log_probs, start_top_index = torch.topk(
start_log_probs, self.start_n_top, dim=-1
) # shape (bsz, start_n_top)
start_top_index_exp = start_top_index.unsqueeze(-1).expand(-1, -1, hsz) # shape (bsz, start_n_top, hsz)
start_states = torch.gather(hidden_states, -2, start_top_index_exp) # shape (bsz, start_n_top, hsz)
start_states = start_states.unsqueeze(1).expand(-1, slen, -1, -1) # shape (bsz, slen, start_n_top, hsz)
hidden_states_expanded = hidden_states.unsqueeze(2).expand_as(
start_states
) # shape (bsz, slen, start_n_top, hsz)
p_mask = p_mask.unsqueeze(-1) if p_mask is not None else None
end_logits = self.end_logits(hidden_states_expanded, start_states=start_states, p_mask=p_mask)
end_log_probs = F.softmax(end_logits, dim=1) # shape (bsz, slen, start_n_top)
end_top_log_probs, end_top_index = torch.topk(
end_log_probs, self.end_n_top, dim=1
) # shape (bsz, end_n_top, start_n_top)
end_top_log_probs = end_top_log_probs.view(-1, self.start_n_top * self.end_n_top)
end_top_index = end_top_index.view(-1, self.start_n_top * self.end_n_top)
start_states = torch.einsum(
"blh,bl->bh", hidden_states, start_log_probs
) # get the representation of START as weighted sum of hidden states
cls_logits = self.answer_class(
hidden_states, start_states=start_states, cls_index=cls_index
) # Shape (batch size,): one single `cls_logits` for each sample
outputs = (start_top_log_probs, start_top_index, end_top_log_probs, end_top_index, cls_logits) + outputs
# return start_top_log_probs, start_top_index, end_top_log_probs, end_top_index, cls_logits
# or (if labels are provided) (total_loss,)
return outputs | anli-main | src/modeling/dummy_modeling_xlnet.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under Creative Commons-Non Commercial 4.0 found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.nn as nn
from torch import optim
from torch.autograd import Variable
from torch.nn import MSELoss, CrossEntropyLoss
import flint.torch_util as torch_util
from tqdm import tqdm
import os
from datetime import datetime
class EmptyScheduler(object):
def __init__(self):
self._state_dict = dict()
def step(self):
pass
def state_dict(self):
return self._state_dict
class ResEncoder(nn.Module):
def __init__(self, h_size=[1024, 1024, 1024], v_size=10, embd_dim=300, mlp_d=1024,
dropout_r=0.1, k=3, n_layers=1, num_labels=3):
super(ResEncoder, self).__init__()
self.Embd = nn.Embedding(v_size, embd_dim)
self.num_labels = num_labels
self.lstm = nn.LSTM(input_size=embd_dim, hidden_size=h_size[0],
num_layers=1, bidirectional=True)
self.lstm_1 = nn.LSTM(input_size=(embd_dim + h_size[0] * 2), hidden_size=h_size[1],
num_layers=1, bidirectional=True)
self.lstm_2 = nn.LSTM(input_size=(embd_dim + h_size[0] * 2), hidden_size=h_size[2],
num_layers=1, bidirectional=True)
self.h_size = h_size
self.k = k
# self.mlp_1 = nn.Linear(h_size[2] * 2 * 4, mlp_d)
self.mlp_1 = nn.Linear(h_size[2] * 2, mlp_d)
self.mlp_2 = nn.Linear(mlp_d, mlp_d)
self.sm = nn.Linear(mlp_d, self.num_labels)
if n_layers == 1:
self.classifier = nn.Sequential(*[self.mlp_1, nn.ReLU(), nn.Dropout(dropout_r),
self.sm])
elif n_layers == 2:
self.classifier = nn.Sequential(*[self.mlp_1, nn.ReLU(), nn.Dropout(dropout_r),
self.mlp_2, nn.ReLU(), nn.Dropout(dropout_r),
self.sm])
else:
print("Error num layers")
def init_embedding(self, embedding):
self.Embd.weight = embedding.weight
def forward(self, input_ids, attention_mask, labels=None):
# if self.max_l:
# l1 = l1.clamp(max=self.max_l)
# l2 = l2.clamp(max=self.max_l)
# if s1.size(0) > self.max_l:
# s1 = s1[:self.max_l, :]
# if s2.size(0) > self.max_l:
# s2 = s2[:self.max_l, :]
batch_l_1 = torch.sum(attention_mask, dim=1)
# p_s1 = self.Embd(s1)
embedding_1 = self.Embd(input_ids)
s1_layer1_out = torch_util.auto_rnn(self.lstm, embedding_1, batch_l_1)
# s2_layer1_out = torch_util.auto_rnn_bilstm(self.lstm, p_s2, l2)
# Length truncate
# len1 = s1_layer1_out.size(0)
# len2 = s2_layer1_out.size(0)
# p_s1 = p_s1[:len1, :, :]
# p_s2 = p_s2[:len2, :, :]
# Using high way
s1_layer2_in = torch.cat([embedding_1, s1_layer1_out], dim=2)
# s2_layer2_in = torch.cat([p_s2, s2_layer1_out], dim=2)
s1_layer2_out = torch_util.auto_rnn(self.lstm_1, s1_layer2_in, batch_l_1)
# s2_layer2_out = torch_util.auto_rnn_bilstm(self.lstm_1, s2_layer2_in, l2)
s1_layer3_in = torch.cat([embedding_1, s1_layer1_out + s1_layer2_out], dim=2)
# s2_layer3_in = torch.cat([p_s2, s2_layer1_out + s2_layer2_out], dim=2)
s1_layer3_out = torch_util.auto_rnn(self.lstm_2, s1_layer3_in, batch_l_1)
# s2_layer3_out = torch_util.auto_rnn_bilstm(self.lstm_2, s2_layer3_in, l2)
s1_layer3_maxout = torch_util.max_along_time(s1_layer3_out, batch_l_1)
# s2_layer3_maxout = torch_util.max_along_time(s2_layer3_out, l2)
# Only use the last layer
# features = torch.cat([s1_layer3_maxout, s2_layer3_maxout,
# torch.abs(s1_layer3_maxout - s2_layer3_maxout),
# s1_layer3_maxout * s2_layer3_maxout],
# dim=1)
features = torch.cat([s1_layer3_maxout],
dim=1)
logits = self.classifier(features)
loss = None
if labels is not None:
if self.num_labels == 1:
# We are doing regression
loss_fct = MSELoss()
loss = loss_fct(logits.view(-1), labels.view(-1))
else:
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
return (loss, logits)
class BagOfWords(nn.Module):
def __init__(self, v_size=10, embd_dim=300, mlp_d=1024,
dropout_r=0.1, n_layers=1, num_labels=3):
super(BagOfWords, self).__init__()
self.Embd = nn.Embedding(v_size, embd_dim)
self.num_labels = num_labels
# self.mlp_1 = nn.Linear(h_size[2] * 2 * 4, mlp_d)
self.mlp_1 = nn.Linear(embd_dim, mlp_d)
self.mlp_2 = nn.Linear(mlp_d, mlp_d)
self.sm = nn.Linear(mlp_d, self.num_labels)
if n_layers == 1:
self.classifier = nn.Sequential(*[self.mlp_1, nn.ReLU(), nn.Dropout(dropout_r),
self.sm])
elif n_layers == 2:
self.classifier = nn.Sequential(*[self.mlp_1, nn.ReLU(), nn.Dropout(dropout_r),
self.mlp_2, nn.ReLU(), nn.Dropout(dropout_r),
self.sm])
else:
print("Error num layers")
def init_embedding(self, embedding):
self.Embd.weight = embedding.weight
def forward(self, input_ids, attention_mask, labels=None):
# if self.max_l:
# l1 = l1.clamp(max=self.max_l)
# l2 = l2.clamp(max=self.max_l)
# if s1.size(0) > self.max_l:
# s1 = s1[:self.max_l, :]
# if s2.size(0) > self.max_l:
# s2 = s2[:self.max_l, :]
batch_l_1 = torch.sum(attention_mask, dim=1)
# p_s1 = self.Embd(s1)
embedding_1 = self.Embd(input_ids)
s1_layer3_maxout = torch_util.avg_along_time(embedding_1, batch_l_1)
# s2_layer3_maxout = torch_util.max_along_time(s2_layer3_out, l2)
# Only use the last layer
# features = torch.cat([s1_layer3_maxout, s2_layer3_maxout,
# torch.abs(s1_layer3_maxout - s2_layer3_maxout),
# s1_layer3_maxout * s2_layer3_maxout],
# dim=1)
features = torch.cat([s1_layer3_maxout],
dim=1)
logits = self.classifier(features)
loss = None
if labels is not None:
if self.num_labels == 1:
# We are doing regression
loss_fct = MSELoss()
loss = loss_fct(logits.view(-1), labels.view(-1))
else:
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
return (loss, logits) | anli-main | src/modeling/res_encoder.py |
anli-main | src/hg_api/__init__.py |
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under Creative Commons-Non Commercial 4.0 found in the
# LICENSE file in the root directory of this source tree.
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch
def evaluate(tokenizer, model, premise, hypothesis):
max_length = 256
tokenized_input_seq_pair = tokenizer.encode_plus(premise, hypothesis,
max_length=max_length,
return_token_type_ids=True, truncation=True)
input_ids = torch.Tensor(tokenized_input_seq_pair['input_ids']).long().unsqueeze(0)
# remember bart doesn't have 'token_type_ids', remove the line below if you are using bart.
token_type_ids = torch.Tensor(tokenized_input_seq_pair['token_type_ids']).long().unsqueeze(0)
attention_mask = torch.Tensor(tokenized_input_seq_pair['attention_mask']).long().unsqueeze(0)
outputs = model(input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
labels=None)
# Note:
# "id2label": {
# "0": "entailment",
# "1": "neutral",
# "2": "contradiction"
# },
predicted_probability = torch.softmax(outputs[0], dim=1)[0].tolist() # batch_size only one
#print("Premise:", premise)
#print("Hypothesis:", hypothesis)
print("Prediction:")
print("Entailment:", predicted_probability[0])
print("Neutral:", predicted_probability[1])
print("Contradiction:", predicted_probability[2])
print("="*20)
if __name__ == '__main__':
print("Loading model...")
# hg_model_hub_name = "ynie/roberta-large-snli_mnli_fever_anli_R1_R2_R3-nli"
# hg_model_hub_name = "ynie/albert-xxlarge-v2-snli_mnli_fever_anli_R1_R2_R3-nli"
# hg_model_hub_name = "ynie/bart-large-snli_mnli_fever_anli_R1_R2_R3-nli"
# hg_model_hub_name = "ynie/electra-large-discriminator-snli_mnli_fever_anli_R1_R2_R3-nli"
hg_model_hub_name = "ynie/xlnet-large-cased-snli_mnli_fever_anli_R1_R2_R3-nli"
tokenizer = AutoTokenizer.from_pretrained(hg_model_hub_name)
model = AutoModelForSequenceClassification.from_pretrained(hg_model_hub_name)
print("Model loaded!")
while True:
premise = input("Premise> ")
hypothesis = input("Hypothesis> ")
evaluate(tokenizer, model, premise, hypothesis)
| anli-main | src/hg_api/interactive.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under Creative Commons-Non Commercial 4.0 found in the
# LICENSE file in the root directory of this source tree.
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch
import json
def get_prediction(tokenizer, model, premise, hypothesis, max_length=256):
tokenized_input_seq_pair = tokenizer.encode_plus(premise, hypothesis,
max_length=max_length,
return_token_type_ids=True, truncation=True)
input_ids = torch.Tensor(tokenized_input_seq_pair['input_ids']).long().unsqueeze(0)
token_type_ids = torch.Tensor(tokenized_input_seq_pair['token_type_ids']).long().unsqueeze(0)
attention_mask = torch.Tensor(tokenized_input_seq_pair['attention_mask']).long().unsqueeze(0)
outputs = model(input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
labels=None)
predicted_probability = torch.softmax(outputs[0], dim=1)[0] # batch_size only one
predicted_index = torch.argmax(predicted_probability)
predicted_probability = predicted_probability.tolist()
return predicted_probability, predicted_index
if __name__ == '__main__':
premise = "Two women are embracing while holding to go packages."
hypothesis = "The men are fighting outside a deli."
hg_model_hub_name = "ynie/roberta-large-snli_mnli_fever_anli_R1_R2_R3-nli"
# hg_model_hub_name = "ynie/albert-xxlarge-v2-snli_mnli_fever_anli_R1_R2_R3-nli"
# hg_model_hub_name = "ynie/bart-large-snli_mnli_fever_anli_R1_R2_R3-nli"
# hg_model_hub_name = "ynie/electra-large-discriminator-snli_mnli_fever_anli_R1_R2_R3-nli"
# hg_model_hub_name = "ynie/xlnet-large-cased-snli_mnli_fever_anli_R1_R2_R3-nli"
tokenizer = AutoTokenizer.from_pretrained(hg_model_hub_name)
model = AutoModelForSequenceClassification.from_pretrained(hg_model_hub_name)
snli_dev = []
SNLI_DEV_FILE_PATH = "../../data/snli_1.0/snli_1.0_dev.jsonl" # you can change this to other path.
with open(SNLI_DEV_FILE_PATH, mode='r', encoding='utf-8') as in_f:
for line in in_f:
if line:
cur_item = json.loads(line)
if cur_item['gold_label'] != '-':
snli_dev.append(cur_item)
total = 0
correct = 0
label_mapping = {
0: 'entailment',
1: 'neutral',
2: 'contradiction',
}
print("Start evaluating...") # this might take a while.
for item in snli_dev:
_, pred_index = get_prediction(tokenizer, model, item['sentence1'], item['sentence2'])
if label_mapping[int(pred_index)] == item['gold_label']:
correct += 1
total += 1
if total % 200 == 0 and total != 0:
print(f"{total} finished.")
print("Total / Correct / Accuracy:", f"{total} / {correct} / {correct / total}") | anli-main | src/hg_api/interactive_eval.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under Creative Commons-Non Commercial 4.0 found in the
# LICENSE file in the root directory of this source tree. | anli-main | src/dataset_tools/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under Creative Commons-Non Commercial 4.0 found in the
# LICENSE file in the root directory of this source tree.
from pathlib import Path
import config
from dataset_tools.format_convert import sm_nli2std_format, fever_nli2std_format, a_nli2std_format
from utils import common
# ANLI_VERSION = 1.0
def build_snli(path: Path):
snli_data_root_path = (path / "snli")
if not snli_data_root_path.exists():
snli_data_root_path.mkdir()
o_train = common.load_jsonl(config.PRO_ROOT / "data/snli_1.0/snli_1.0_train.jsonl")
o_dev = common.load_jsonl(config.PRO_ROOT / "data/snli_1.0/snli_1.0_dev.jsonl")
o_test = common.load_jsonl(config.PRO_ROOT / "data/snli_1.0/snli_1.0_test.jsonl")
d_trian = sm_nli2std_format(o_train)
d_dev = sm_nli2std_format(o_dev)
d_test = sm_nli2std_format(o_test)
print("SNLI examples without gold label have been filtered.")
print("SNLI Train size:", len(d_trian))
print("SNLI Dev size:", len(d_dev))
print("SNLI Test size:", len(d_test))
common.save_jsonl(d_trian, snli_data_root_path / 'train.jsonl')
common.save_jsonl(d_dev, snli_data_root_path / 'dev.jsonl')
common.save_jsonl(d_test, snli_data_root_path / 'test.jsonl')
def build_mnli(path: Path):
data_root_path = (path / "mnli")
if not data_root_path.exists():
data_root_path.mkdir()
o_train = common.load_jsonl(config.PRO_ROOT / "data/multinli_1.0/multinli_1.0_train.jsonl")
o_mm_dev = common.load_jsonl(config.PRO_ROOT / "data/multinli_1.0/multinli_1.0_dev_mismatched.jsonl")
o_m_dev = common.load_jsonl(config.PRO_ROOT / "data/multinli_1.0/multinli_1.0_dev_matched.jsonl")
d_trian = sm_nli2std_format(o_train)
d_mm_dev = sm_nli2std_format(o_mm_dev)
d_m_test = sm_nli2std_format(o_m_dev)
print("MNLI examples without gold label have been filtered.")
print("MNLI Train size:", len(d_trian))
print("MNLI MisMatched Dev size:", len(d_mm_dev))
print("MNLI Matched dev size:", len(d_m_test))
common.save_jsonl(d_trian, data_root_path / 'train.jsonl')
common.save_jsonl(d_mm_dev, data_root_path / 'mm_dev.jsonl')
common.save_jsonl(d_m_test, data_root_path / 'm_dev.jsonl')
def build_fever_nli(path: Path):
data_root_path = (path / "fever_nli")
if not data_root_path.exists():
data_root_path.mkdir()
o_train = common.load_jsonl(config.PRO_ROOT / "data/nli_fever/train_fitems.jsonl")
o_dev = common.load_jsonl(config.PRO_ROOT / "data/nli_fever/dev_fitems.jsonl")
o_test = common.load_jsonl(config.PRO_ROOT / "data/nli_fever/test_fitems.jsonl")
d_trian = fever_nli2std_format(o_train)
d_dev = fever_nli2std_format(o_dev)
d_test = fever_nli2std_format(o_test)
print("FEVER-NLI Train size:", len(d_trian))
print("FEVER-NLI Dev size:", len(d_dev))
print("FEVER-NLI Test size:", len(d_test))
common.save_jsonl(d_trian, data_root_path / 'train.jsonl')
common.save_jsonl(d_dev, data_root_path / 'dev.jsonl')
common.save_jsonl(d_test, data_root_path / 'test.jsonl')
def build_anli(path: Path, round=1, version='1.0'):
data_root_path = (path / "anli")
if not data_root_path.exists():
data_root_path.mkdir()
round_tag = str(round)
o_train = common.load_jsonl(config.PRO_ROOT / f"data/anli_v{version}/R{round_tag}/train.jsonl")
o_dev = common.load_jsonl(config.PRO_ROOT / f"data/anli_v{version}/R{round_tag}/dev.jsonl")
o_test = common.load_jsonl(config.PRO_ROOT / f"data/anli_v{version}/R{round_tag}/test.jsonl")
d_trian = a_nli2std_format(o_train)
d_dev = a_nli2std_format(o_dev)
d_test = a_nli2std_format(o_test)
print(f"ANLI (R{round_tag}) Train size:", len(d_trian))
print(f"ANLI (R{round_tag}) Dev size:", len(d_dev))
print(f"ANLI (R{round_tag}) Test size:", len(d_test))
if not (data_root_path / f"r{round_tag}").exists():
(data_root_path / f"r{round_tag}").mkdir()
common.save_jsonl(d_trian, data_root_path / f"r{round_tag}" / 'train.jsonl')
common.save_jsonl(d_dev, data_root_path / f"r{round_tag}" / 'dev.jsonl')
common.save_jsonl(d_test, data_root_path / f"r{round_tag}" / 'test.jsonl')
def build_data():
processed_data_root = config.PRO_ROOT / "data" / "build"
if not processed_data_root.exists():
processed_data_root.mkdir()
build_snli(processed_data_root)
build_mnli(processed_data_root)
build_fever_nli(processed_data_root)
for round in [1, 2, 3]:
build_anli(processed_data_root, round)
print("NLI data built!")
if __name__ == '__main__':
build_data() | anli-main | src/dataset_tools/build_data.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under Creative Commons-Non Commercial 4.0 found in the
# LICENSE file in the root directory of this source tree.
from utils import common
from typing import List, Dict
from tqdm import tqdm
from collections import defaultdict
import config
from pathlib import Path
smnli_label2std_label = defaultdict(lambda: "o") # o stands for all other label that is invalid.
smnli_label2std_label.update({
"entailment": "e",
"neutral": "n",
"contradiction": "c",
"hidden": "h",
})
fever_label2std_label = defaultdict(lambda: "o")
fever_label2std_label.update({
'SUPPORTS': "e",
'NOT ENOUGH INFO': "n",
'REFUTES': "c",
'hidden': "h",
})
anli_label2std_label = defaultdict(lambda: "o")
anli_label2std_label.update({
'e': "e",
'n': "n",
'c': "c",
'hidden': "h",
})
# standard output format: {uid, premise, hypothesis, label, extra_dataset_related_field.}
def sm_nli2std_format(d_list, filter_invalid=True):
p_list: List[Dict] = []
for item in d_list:
formatted_item: Dict = dict()
formatted_item['uid']: str = item["pairID"]
formatted_item['premise']: str = item["sentence1"]
formatted_item['hypothesis']: str = item["sentence2"]
formatted_item['label']: str = smnli_label2std_label[item["gold_label"]]
if filter_invalid and formatted_item['label'] == 'o':
continue # Skip example with invalid label.
p_list.append(formatted_item)
return p_list
def fever_nli2std_format(d_list, filter_invalid=True):
p_list: List[Dict] = []
for item in d_list:
formatted_item: Dict = dict()
formatted_item['uid']: str = item["fid"]
formatted_item['premise']: str = item["context"]
formatted_item['hypothesis']: str = item["query"]
formatted_item['label']: str = fever_label2std_label[item["label"]]
if filter_invalid and formatted_item['label'] == 'o':
continue # Skip example with invalid label.
p_list.append(formatted_item)
return p_list
def a_nli2std_format(d_list, filter_invalid=True):
p_list: List[Dict] = []
for item in d_list:
formatted_item: Dict = dict()
formatted_item['uid']: str = item["uid"]
formatted_item['premise']: str = item["context"]
formatted_item['hypothesis']: str = item["hypothesis"]
formatted_item['label']: str = anli_label2std_label[item["label"]]
formatted_item['reason']: str = item["reason"]
if filter_invalid and formatted_item['label'] == 'o':
continue # Skip example with invalid label.
p_list.append(formatted_item)
return p_list
if __name__ == '__main__':
pass | anli-main | src/dataset_tools/format_convert.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under Creative Commons-Non Commercial 4.0 found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import numpy as np
import functools
# def get_length_and_mask(seq):
# len_mask = (seq != 0).long()
# len_t = get_lengths_from_binary_sequence_mask(len_mask)
# return len_mask, len_t
def length_truncate(seq, max_l, is_elmo=False):
def _truncate(seq):
if seq.size(1) > max_l:
return seq[:, :max_l, ...]
else:
return seq
if not is_elmo:
return _truncate(seq)
else:
s1_elmo_embd = dict()
s1_elmo_embd['mask'] = _truncate(seq['mask'])
s1_elmo_embd['elmo_representations'] = []
for e_rep in seq['elmo_representations']:
s1_elmo_embd['elmo_representations'].append(_truncate(e_rep))
return s1_elmo_embd
def pad_1d(seq, pad_l):
"""
The seq is a sequence having shape [T, ..]. Note: The seq contains only one instance. This is not batched.
:param seq: Input sequence with shape [T, ...]
:param pad_l: The required pad_length.
:return: Output sequence will have shape [Pad_L, ...]
"""
l = seq.size(0)
if l >= pad_l:
return seq[:pad_l, ] # Truncate the length if the length is bigger than required padded_length.
else:
pad_seq = Variable(seq.data.new(pad_l - l, *seq.size()[1:]).zero_()) # Requires_grad is False
return torch.cat([seq, pad_seq], dim=0)
def get_state_shape(rnn: nn.RNN, batch_size, bidirectional=False):
"""
Return the state shape of a given RNN. This is helpful when you want to create a init state for RNN.
Example:
c0 = h0 = Variable(src_seq_p.data.new(*get_state_shape([your rnn], 3, bidirectional)).zero_())
:param rnn: nn.LSTM, nn.GRU or subclass of nn.RNN
:param batch_size:
:param bidirectional:
:return:
"""
if bidirectional:
return rnn.num_layers * 2, batch_size, rnn.hidden_size
else:
return rnn.num_layers, batch_size, rnn.hidden_size
def pack_list_sequence(inputs, l, max_l=None, batch_first=True):
"""
Pack a batch of Tensor into one Tensor with max_length.
:param inputs:
:param l:
:param max_l: The max_length of the packed sequence.
:param batch_first:
:return:
"""
batch_list = []
max_l = max(list(l)) if not max_l else max_l
batch_size = len(inputs)
for b_i in range(batch_size):
batch_list.append(pad_1d(inputs[b_i], max_l))
pack_batch_list = torch.stack(batch_list, dim=1) if not batch_first \
else torch.stack(batch_list, dim=0)
return pack_batch_list
def pack_for_rnn_seq(inputs, lengths, batch_first=True, states=None):
"""
:param states: [rnn.num_layers, batch_size, rnn.hidden_size]
:param inputs: Shape of the input should be [B, T, D] if batch_first else [T, B, D].
:param lengths: [B]
:param batch_first:
:return:
"""
if not batch_first:
_, sorted_indices = lengths.sort()
'''
Reverse to decreasing order
'''
r_index = reversed(list(sorted_indices))
s_inputs_list = []
lengths_list = []
reverse_indices = np.zeros(lengths.size(0), dtype=np.int64)
for j, i in enumerate(r_index):
s_inputs_list.append(inputs[:, i, :].unsqueeze(1))
lengths_list.append(lengths[i])
reverse_indices[i] = j
reverse_indices = list(reverse_indices)
s_inputs = torch.cat(s_inputs_list, 1)
packed_seq = nn.utils.rnn.pack_padded_sequence(s_inputs, lengths_list)
return packed_seq, reverse_indices
else:
_, sorted_indices = lengths.sort()
'''
Reverse to decreasing order
'''
r_index = reversed(list(sorted_indices))
s_inputs_list = []
lengths_list = []
reverse_indices = np.zeros(lengths.size(0), dtype=np.int64)
if states is None:
states = ()
elif not isinstance(states, tuple):
states = (states,) # rnn.num_layers, batch_size, rnn.hidden_size
states_lists = tuple([] for _ in states)
for j, i in enumerate(r_index):
s_inputs_list.append(inputs[i, :, :])
lengths_list.append(lengths[i])
reverse_indices[i] = j
for state_list, state in zip(states_lists, states):
state_list.append(state[:, i, :].unsqueeze(1))
reverse_indices = list(reverse_indices)
s_inputs = torch.stack(s_inputs_list, dim=0)
packed_seq = nn.utils.rnn.pack_padded_sequence(s_inputs, lengths_list, batch_first=batch_first)
r_states = tuple(torch.cat(state_list, dim=1) for state_list in states_lists)
if len(r_states) == 1:
r_states = r_states[0]
return packed_seq, reverse_indices, r_states
def unpack_from_rnn_seq(packed_seq, reverse_indices, batch_first=True):
unpacked_seq, _ = nn.utils.rnn.pad_packed_sequence(packed_seq, batch_first=batch_first)
s_inputs_list = []
if not batch_first:
for i in reverse_indices:
s_inputs_list.append(unpacked_seq[:, i, :].unsqueeze(1))
return torch.cat(s_inputs_list, 1)
else:
for i in reverse_indices:
s_inputs_list.append(unpacked_seq[i, :, :].unsqueeze(0))
return torch.cat(s_inputs_list, 0)
def reverse_indice_for_state(states, reverse_indices):
"""
:param states: [rnn.num_layers, batch_size, rnn.hidden_size]
:param reverse_indices: [batch_size]
:return:
"""
if states is None:
states = ()
elif not isinstance(states, tuple):
states = (states,) # rnn.num_layers, batch_size, rnn.hidden_size
states_lists = tuple([] for _ in states)
for i in reverse_indices:
for state_list, state in zip(states_lists, states):
state_list.append(state[:, i, :].unsqueeze(1))
r_states = tuple(torch.cat(state_list, dim=1) for state_list in states_lists)
if len(r_states) == 1:
r_states = r_states[0]
return r_states
def auto_rnn(rnn: nn.RNN, seqs, lengths, batch_first=True, init_state=None, output_last_states=False):
batch_size = seqs.size(0) if batch_first else seqs.size(1)
state_shape = get_state_shape(rnn, batch_size, rnn.bidirectional)
# if init_state is None:
# h0 = c0 = Variable(seqs.data.new(*state_shape).zero_())
# else:
# h0 = init_state[0] # rnn.num_layers, batch_size, rnn.hidden_size
# c0 = init_state[1]
packed_pinputs, r_index, init_state = pack_for_rnn_seq(seqs, lengths, batch_first, init_state)
if len(init_state) == 0:
h0 = c0 = Variable(seqs.data.new(*state_shape).zero_())
init_state = (h0, c0)
output, last_state = rnn(packed_pinputs, init_state)
output = unpack_from_rnn_seq(output, r_index, batch_first)
if not output_last_states:
return output
else:
last_state = reverse_indice_for_state(last_state, r_index)
return output, last_state
def pack_sequence_for_linear(inputs, lengths, batch_first=True):
"""
:param inputs: [B, T, D] if batch_first
:param lengths: [B]
:param batch_first:
:return:
"""
batch_list = []
if batch_first:
for i, l in enumerate(lengths):
# print(inputs[i, :l].size())
batch_list.append(inputs[i, :l])
packed_sequence = torch.cat(batch_list, 0)
# if chuck:
# return list(torch.chunk(packed_sequence, chuck, dim=0))
# else:
return packed_sequence
else:
raise NotImplemented()
def chucked_forward(inputs, net, chuck=None):
if not chuck:
return net(inputs)
else:
output_list = [net(chuck) for chuck in torch.chunk(inputs, chuck, dim=0)]
return torch.cat(output_list, dim=0)
def unpack_sequence_for_linear(inputs, lengths, batch_first=True):
batch_list = []
max_l = max(lengths)
if not isinstance(inputs, list):
inputs = [inputs]
inputs = torch.cat(inputs)
if batch_first:
start = 0
for l in lengths:
end = start + l
batch_list.append(pad_1d(inputs[start:end], max_l))
start = end
return torch.stack(batch_list)
else:
raise NotImplemented()
def seq2seq_cross_entropy(logits, label, l, chuck=None, sos_truncate=True):
"""
:param logits: [exB, V] : exB = sum(l)
:param label: [B] : a batch of Label
:param l: [B] : a batch of LongTensor indicating the lengths of each inputs
:param chuck: Number of chuck to process
:return: A loss value
"""
packed_label = pack_sequence_for_linear(label, l)
cross_entropy_loss = functools.partial(F.cross_entropy, size_average=False)
total = sum(l)
assert total == logits.size(0) or packed_label.size(0) == logits.size(0), \
"logits length mismatch with label length."
if chuck:
logits_losses = 0
for x, y in zip(torch.chunk(logits, chuck, dim=0), torch.chunk(packed_label, chuck, dim=0)):
logits_losses += cross_entropy_loss(x, y)
return logits_losses * (1 / total)
else:
return cross_entropy_loss(logits, packed_label) * (1 / total)
def max_along_time(inputs, lengths, list_in=False):
"""
:param inputs: [B, T, D]
:param lengths: [B]
:return: [B * D] max_along_time
:param list_in:
"""
ls = list(lengths)
if not list_in:
b_seq_max_list = []
for i, l in enumerate(ls):
seq_i = inputs[i, :l, :]
seq_i_max, _ = seq_i.max(dim=0)
seq_i_max = seq_i_max.squeeze()
b_seq_max_list.append(seq_i_max)
return torch.stack(b_seq_max_list)
else:
b_seq_max_list = []
for i, l in enumerate(ls):
seq_i = inputs[i]
seq_i_max, _ = seq_i.max(dim=0)
seq_i_max = seq_i_max.squeeze()
b_seq_max_list.append(seq_i_max)
return torch.stack(b_seq_max_list)
def avg_along_time(inputs, lengths, list_in=False):
"""
:param inputs: [B, T, D]
:param lengths: [B]
:return: [B * D] max_along_time
:param list_in:
"""
ls = list(lengths)
if not list_in:
b_seq_avg_list = []
for i, l in enumerate(ls):
seq_i = inputs[i, :l, :]
seq_i_avg = seq_i.mean(dim=0)
seq_i_avg = seq_i_avg.squeeze()
b_seq_avg_list.append(seq_i_avg)
return torch.stack(b_seq_avg_list)
else:
b_seq_avg_list = []
for i, l in enumerate(ls):
seq_i = inputs[i]
seq_i_avg, _ = seq_i.mean(dim=0)
seq_i_avg = seq_i_avg.squeeze()
b_seq_avg_list.append(seq_i_avg)
return torch.stack(b_seq_avg_list)
# def length_truncate(inputs, lengths, max_len):
# """
# :param inputs: [B, T]
# :param lengths: [B]
# :param max_len: int
# :return: [B, T]
# """
# max_l = max(1, max_len)
# max_s1_l = min(max(lengths), max_l)
# lengths = lengths.clamp(min=1, max=max_s1_l)
# if inputs.size(1) > max_s1_l:
# inputs = inputs[:, :max_s1_l]
#
# return inputs, lengths, max_s1_l
def get_reverse_indices(indices, lengths):
r_indices = indices.data.new(indices.size()).fill_(0)
batch_size = indices.size(0)
for i in range(int(batch_size)):
b_ind = indices[i]
b_l = lengths[i]
for k, ind in enumerate(b_ind):
if k >= b_l:
break
r_indices[i, int(ind)] = k
return r_indices
def index_ordering(inputs, lengths, indices, pad_value=0):
"""
:param inputs: [B, T, ~]
:param lengths: [B]
:param indices: [B, T]
:return:
"""
batch_size = inputs.size(0)
ordered_out_list = []
for i in range(int(batch_size)):
b_input = inputs[i]
b_l = lengths[i]
b_ind = indices[i]
b_out = b_input[b_ind]
if b_out.size(0) > b_l:
b_out[b_l:] = pad_value
ordered_out_list.append(b_out)
outs = torch.stack(ordered_out_list, dim=0)
return outs
def start_and_end_token_handling(inputs, lengths, sos_index=1, eos_index=2, pad_index=0,
op=None):
"""
:param inputs: [B, T]
:param lengths: [B]
:param sos_index:
:param eos_index:
:param pad_index:
:return:
"""
batch_size = inputs.size(0)
if not op:
return inputs, lengths
elif op == 'rm_start':
inputs = torch.cat([inputs[:, 1:], Variable(inputs.data.new(batch_size, 1).zero_())], dim=1)
return inputs, lengths - 1
elif op == 'rm_end':
for i in range(batch_size):
pass
# Potential problems!?
# inputs[i, lengths[i] - 1] = pad_index
return inputs, lengths - 1
elif op == 'rm_both':
for i in range(batch_size):
pass
# Potential problems!?
# inputs[i, lengths[i] - 1] = pad_index
inputs = torch.cat([inputs[:, 1:], Variable(inputs.data.new(batch_size, 1).zero_())], dim=1)
return inputs, lengths - 2
def seq2seq_att(mems, lengths, state, att_net=None):
"""
:param mems: [B, T, D_mem] This are the memories.
I call memory for this variable because I think attention is just like read something and then
make alignments with your memories.
This memory here is usually the input hidden state of the encoder.
:param lengths: [B]
:param state: [B, D_state]
I call state for this variable because it's the state I percepts at this time step.
:param att_net: This is the attention network that will be used to calculate the alignment score between
state and memories.
input of the att_net is mems and state with shape:
mems: [exB, D_mem]
state: [exB, D_state]
return of the att_net is [exB, 1]
So any function that map a vector to a scalar could work.
:return: [B, D_result]
"""
d_state = state.size(1)
if not att_net:
return state
else:
batch_list_mems = []
batch_list_state = []
for i, l in enumerate(lengths):
b_mems = mems[i, :l] # [T, D_mem]
batch_list_mems.append(b_mems)
b_state = state[i].expand(b_mems.size(0), d_state) # [T, D_state]
batch_list_state.append(b_state)
packed_sequence_mems = torch.cat(batch_list_mems, 0) # [sum(l), D_mem]
packed_sequence_state = torch.cat(batch_list_state, 0) # [sum(l), D_state]
align_score = att_net(packed_sequence_mems, packed_sequence_state) # [sum(l), 1]
# The score grouped as [(a1, a2, a3), (a1, a2), (a1, a2, a3, a4)].
# aligned_seq = packed_sequence_mems * align_score
start = 0
result_list = []
for i, l in enumerate(lengths):
end = start + l
b_mems = packed_sequence_mems[start:end, :] # [l, D_mems]
b_score = align_score[start:end, :] # [l, 1]
softed_b_score = F.softmax(b_score.transpose(0, 1)).transpose(0, 1) # [l, 1]
weighted_sum = torch.sum(b_mems * softed_b_score, dim=0, keepdim=False) # [D_mems]
result_list.append(weighted_sum)
start = end
result = torch.stack(result_list, dim=0)
return result
# Test something | anli-main | src/flint/torch_util.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under Creative Commons-Non Commercial 4.0 found in the
# LICENSE file in the root directory of this source tree. | anli-main | src/flint/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under Creative Commons-Non Commercial 4.0 found in the
# LICENSE file in the root directory of this source tree.
import torch
class FlintField(object):
@classmethod
def batching(cls, batched_data):
raise NotImplemented()
class RawFlintField(FlintField):
@classmethod
def batching(cls, batched_data):
return batched_data
class LabelFlintField(FlintField):
def batching(self, batched_data):
return torch.tensor(batched_data)
class ArrayIndexFlintField(FlintField):
def __init__(self, pad_idx, eos_idx=None, left_pad=False, move_eos_to_beginning=False) -> None:
super().__init__()
self.pad_idx = pad_idx
self.eos_idx = eos_idx
self.left_pad = left_pad
self.move_eos_to_beginning = move_eos_to_beginning
def collate_tokens(self, values, pad_idx, eos_idx=None, left_pad=False, move_eos_to_beginning=False):
"""
Convert a list of 1d tensors into a padded 2d tensor.
"""
if not torch.is_tensor(values[0]):
values = [torch.tensor(v) for v in values]
size = max(v.size(0) for v in values)
res = values[0].new(len(values), size).fill_(pad_idx)
def copy_tensor(src, dst):
assert dst.numel() == src.numel()
if move_eos_to_beginning:
assert src[-1] == eos_idx
dst[0] = eos_idx
dst[1:] = src[:-1]
else:
dst.copy_(src)
for i, v in enumerate(values):
copy_tensor(v, res[i][size - len(v):] if left_pad else res[i][:len(v)])
return res
def batching(self, batched_data):
return self.collate_tokens(batched_data,
self.pad_idx,
self.eos_idx,
self.left_pad,
self.move_eos_to_beginning)
| anli-main | src/flint/data_utils/fields.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under Creative Commons-Non Commercial 4.0 found in the
# LICENSE file in the root directory of this source tree.
import torch
from typing import Dict, Type
from flint.data_utils.fields import FlintField, RawFlintField
class BaseBatchBuilder(object):
def __init__(self, batching_schema: Dict[str, FlintField]) -> None:
super().__init__()
self.batching_schema: Dict[str, FlintField] = batching_schema
def __call__(self, batch):
field_names = batch[0].keys()
batched_data = dict()
for field_name in field_names:
if field_name not in self.batching_schema:
# default is RawFlintField
batched_data[field_name] = RawFlintField.batching([item[field_name] for item in batch])
else:
batched_data[field_name] = self.batching_schema[field_name].batching([item[field_name] for item in batch])
return batched_data
def has_tensor(obj) -> bool:
"""
Given a possibly complex data structure,
check if it has any torch.Tensors in it.
"""
if isinstance(obj, torch.Tensor):
return True
elif isinstance(obj, dict):
return any(has_tensor(value) for value in obj.values())
elif isinstance(obj, (list, tuple)):
return any(has_tensor(item) for item in obj)
else:
return False
def move_to_device(obj, cuda_device: int):
"""
Given a structure (possibly) containing Tensors on the CPU,
move all the Tensors to the specified GPU (or do nothing, if they should be on the CPU).
"""
if cuda_device < 0 or not has_tensor(obj):
return obj
elif isinstance(obj, torch.Tensor):
return obj.cuda(cuda_device)
elif isinstance(obj, dict):
return {key: move_to_device(value, cuda_device) for key, value in obj.items()}
elif isinstance(obj, list):
return [move_to_device(item, cuda_device) for item in obj]
elif isinstance(obj, tuple) and hasattr(obj, "_fields"):
# This is the best way to detect a NamedTuple, it turns out.
return obj.__class__(*(move_to_device(item, cuda_device) for item in obj))
elif isinstance(obj, tuple):
return tuple(move_to_device(item, cuda_device) for item in obj)
else:
return obj
if __name__ == '__main__':
print(RawFlintField.batching) | anli-main | src/flint/data_utils/batchbuilder.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under Creative Commons-Non Commercial 4.0 found in the
# LICENSE file in the root directory of this source tree. | anli-main | src/flint/data_utils/__init__.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import dreamerv2.api as dv2
from dreamerv2.train import run
def str2bool(v):
if isinstance(v, bool):
return v
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
def main(args):
## get defaults
config = dv2.defaults
if args.task:
if 'crafter' in args.task:
config = config.update(dv2.configs['crafter'])
elif 'minigrid' in args.task:
config = config.update(dv2.configs['minigrid'])
elif 'atari' in args.task:
config = config.update(dv2.configs['atari'])
elif 'dmc' in args.task:
config = config.update(dv2.configs['dmc_vision'])
params = vars(args)
config = config.update(params)
config = config.update({
'expl_behavior': 'Plan2Explore',
'pred_discount': False,
'grad_heads': ['decoder'], # this means we dont learn the reward head
'expl_intr_scale': 1.0,
'expl_extr_scale': 0.0,
'discount': 0.99,
})
run(config)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='RL')
# DreamerV2
parser.add_argument('--xpid', type=str, default=None, help='experiment id')
parser.add_argument('--steps', type=int, default=1e6, help='number of environment steps to train')
parser.add_argument('--train_every', type=int, default=1e5, help='number of environment steps to train')
parser.add_argument('--offline_model_train_steps', type=int, default=25001, help='=250 * train_every (in thousands) + 1. Default assumes 100k.')
parser.add_argument('--task', type=str, default='crafter_noreward', help='environment to train on')
parser.add_argument('--logdir', default='~/wm_logs/', help='directory to save agent logs')
parser.add_argument('--num_agents', type=int, default=1, help='exploration population size.')
parser.add_argument('--seed', type=int, default=100, help='seed for init NNs.')
parser.add_argument('--envs', type=int, default=1, help='number of training envs.')
parser.add_argument('--envs_parallel', type=str, default="none", help='how to parallelize.')
parser.add_argument('--eval_envs', type=int, default=1, help='number of parallel eval envs.')
parser.add_argument('--eval_eps', type=int, default=100, help='number of eval eps.')
parser.add_argument('--eval_type', type=str, default='coincidental', help='how to evaluate the model.')
parser.add_argument('--expl_behavior', type=str, default='Plan2Explore', help='algorithm for exploration: Plan2Explore or Random.')
parser.add_argument('--load_pretrained', type=str, default='none', help='name of pretrained model')
parser.add_argument('--offline_dir', type=str, default='none', help='directory to load offline dataset')
# CASCADE
parser.add_argument('--cascade_alpha', type=float, default=0, help='Cascade weight.')
parser.add_argument('--cascade_feat', type=str, default="deter", help='Cascade features if state based.')
parser.add_argument('--cascade_k', type=int, default=5, help='number of nearest neighbors to use in the mean dist.')
parser.add_argument('--cascade_sample', type=int, default=100, help='max number of cascade states')
args = parser.parse_args()
main(args)
| cascade-main | main.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import tensorflow as tf
from tensorflow_probability import distributions as tfd
import agent
import common
class Random(common.Module):
def __init__(self, config, act_space, wm, tfstep, reward):
self.config = config
self.act_space = act_space
discrete = hasattr(act_space, 'n')
if self.config.actor.dist == 'auto':
self.config = self.config.update({
'actor.dist': 'onehot' if discrete else 'trunc_normal'})
def actor(self, feat):
shape = feat.shape[:-1] + self.act_space.shape
if self.config.actor.dist == 'onehot':
return common.OneHotDist(tf.zeros(shape))
else:
dist = tfd.Uniform(-tf.ones(shape), tf.ones(shape))
return tfd.Independent(dist, 1)
def train(self, start, context, data):
return None, {}
class Plan2Explore(common.Module):
def __init__(self, config, act_space, wm, tfstep, reward):
self.config = config
self.act_space = act_space
self.tfstep = tfstep
self.reward = reward
self.wm = wm
self._init_actors()
stoch_size = config.rssm.stoch
if config.rssm.discrete:
stoch_size *= config.rssm.discrete
size = {
'embed': 32 * config.encoder.cnn_depth,
'stoch': stoch_size,
'deter': config.rssm.deter,
'feat': config.rssm.stoch + config.rssm.deter,
}[self.config.disag_target]
self._networks = [
common.MLP(size, **config.expl_head)
for _ in range(config.disag_models)]
self.opt = common.Optimizer('expl', **config.expl_opt)
self.extr_rewnorm = common.StreamNorm(**self.config.expl_reward_norm)
def _init_actors(self):
self.intr_rewnorm = common.StreamNorm(**self.config.expl_reward_norm)
self.ac = [agent.ActorCritic(self.config, self.act_space, self.tfstep) for _ in range(self.config.num_agents)]
if self.config.cascade_alpha > 0:
self.intr_rewnorm_cascade = [common.StreamNorm(**self.config.expl_reward_norm) for _ in range(self.config.num_agents)]
self.actor = [ac.actor for ac in self.ac]
def train(self, start, context, data):
metrics = {}
stoch = start['stoch']
if self.config.rssm.discrete:
stoch = tf.reshape(
stoch, stoch.shape[:-2] + (stoch.shape[-2] * stoch.shape[-1]))
target = {
'embed': context['embed'],
'stoch': stoch,
'deter': start['deter'],
'feat': context['feat'],
}[self.config.disag_target]
inputs = context['feat']
if self.config.disag_action_cond:
action = tf.cast(data['action'], inputs.dtype)
inputs = tf.concat([inputs, action], -1)
metrics.update(self._train_ensemble(inputs, target))
gpu = tf.config.list_physical_devices('GPU')
if gpu:
tf.config.experimental.set_memory_growth(gpu[0], True)
print(f"Before: {tf.config.experimental.get_memory_usage('GPU:0')}", flush=True)
self.cascade = []
reward_func = self._intr_reward_incr
print("training explorers", flush=True)
[metrics.update(ac.train(self.wm, start, data['is_terminal'], reward_func)) for ac in self.ac]
self.cascade = []
print("finished training explorers", flush=True)
return None, metrics
def _intr_reward(self, seq, rtn_meta=True):
inputs = seq['feat']
if self.config.disag_action_cond:
action = tf.cast(seq['action'], inputs.dtype)
inputs = tf.concat([inputs, action], -1)
preds = [head(inputs).mode() for head in self._networks]
disag = tf.cast(tf.tensor(preds).std(0).mean(-1), tf.float16)
if self.config.disag_log:
disag = tf.math.log(disag)
reward = self.config.expl_intr_scale * self.intr_rewnorm(disag)[0]
if self.config.expl_extr_scale:
reward += self.config.expl_extr_scale * self.extr_rewnorm(
self.reward(seq))[0]
if rtn_meta:
return reward, {'Disagreement': [disag.mean()]}
else:
return reward
@tf.function
def get_dists(self, obs, cascade):
### zzz way to do this
out = []
for idx in range(obs.shape[1]):
cascade = tf.reshape(cascade, [-1, cascade.shape[-1]])
ob = tf.reshape(obs[:, idx, :], [obs.shape[0], 1, obs.shape[-1]])
dists = tf.math.sqrt(tf.einsum('ijk, ijk->ij', cascade - ob, cascade - ob))
topk_mean = tf.negative(tf.math.top_k(tf.negative(dists), k=self.config.cascade_k)[0])
out += [tf.reshape(tf.math.reduce_mean(topk_mean, axis=-1), (1, -1))]
return tf.concat(out, axis=1)
def get_cascade_entropy(self):
cascade = tf.concat(self.cascade, axis=0)
cascade = tf.reshape(cascade, [-1, cascade.shape[-1]])
entropy = tf.math.reduce_variance(cascade, axis=-1).mean()
self.entropy = entropy
return entropy
def _intr_reward_incr(self, seq):
agent_idx = len(self.cascade)
## disagreement
reward, met = self._intr_reward(seq)
# CASCADE
if self.config.cascade_alpha > 0:
## reward = (1 - \alpha) * disagreement + \alpha * diversity
if len(self.cascade) == 0:
idxs = tf.range(tf.shape(seq[self.config.cascade_feat])[1])
size = min(seq[self.config.cascade_feat].shape[1], self.config.cascade_sample)
self.ridxs = tf.random.shuffle(idxs)[:size]
self.dist = None
self.entropy = 0
self.cascade.append(tf.gather(seq[self.config.cascade_feat][-1], self.ridxs, axis=1))
cascade_reward = self.get_cascade_entropy()
cascade_reward = tf.concat([tf.cast(tf.zeros([seq[self.config.cascade_feat].shape[0] - 1, seq[self.config.cascade_feat].shape[1]]), tf.float16), tf.cast(tf.broadcast_to(cascade_reward, shape=(1, seq[self.config.cascade_feat].shape[1])), tf.float16)], axis=0)
cascade_reward = self.intr_rewnorm_cascade[agent_idx](cascade_reward)[0]
met.update({'Diversity': [cascade_reward.mean()]})
reward = reward * (1 - self.config.cascade_alpha) + self.config.cascade_alpha * cascade_reward
return reward, met
def _train_ensemble(self, inputs, targets):
if self.config.disag_offset:
targets = targets[:, self.config.disag_offset:]
inputs = inputs[:, :-self.config.disag_offset]
targets = tf.stop_gradient(targets)
inputs = tf.stop_gradient(inputs)
with tf.GradientTape() as tape:
preds = [head(inputs) for head in self._networks]
loss = -sum([pred.log_prob(targets).mean() for pred in preds])
metrics = self.opt(tape, loss, self._networks)
return metrics
class ModelLoss(common.Module):
def __init__(self, config, act_space, wm, tfstep, reward):
self.config = config
self.reward = reward
self.wm = wm
self.ac = agent.ActorCritic(config, act_space, tfstep)
self.actor = self.ac.actor
self.head = common.MLP([], **self.config.expl_head)
self.opt = common.Optimizer('expl', **self.config.expl_opt)
def train(self, start, context, data):
metrics = {}
target = tf.cast(context[self.config.expl_model_loss], tf.float16)
with tf.GradientTape() as tape:
loss = -self.head(context['feat']).log_prob(target).mean()
metrics.update(self.opt(tape, loss, self.head))
metrics.update(self.ac.train(
self.wm, start, data['is_terminal'], self._intr_reward))
return None, metrics
def _intr_reward(self, seq):
reward = self.config.expl_intr_scale * self.head(seq['feat']).mode()
if self.config.expl_extr_scale:
reward += self.config.expl_extr_scale * self.reward(seq)
return reward
| cascade-main | dreamerv2/expl.py |
import logging
import os
import pathlib
import sys
import warnings
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
logging.getLogger().setLevel('ERROR')
warnings.filterwarnings('ignore', '.*box bound precision lowered.*')
sys.path.append(str(pathlib.Path(__file__).parent))
sys.path.append(str(pathlib.Path(__file__).parent.parent))
import ruamel.yaml as yaml
import common
configs = yaml.safe_load(
(pathlib.Path(__file__).parent / 'configs.yaml').read_text())
defaults = common.Config(configs.pop('defaults'))
| cascade-main | dreamerv2/api.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import tensorflow as tf
import tensorflow_probability as tfp
from tensorflow.keras import mixed_precision as prec
from dreamerv2 import common
from dreamerv2 import expl
tfd = tfp.distributions
class Agent(common.Module):
def __init__(self, config, obs_space, act_space, step):
self.config = config
self.obs_space = obs_space
self.act_space = act_space['action']
self.step = step
self.tfstep = tf.Variable(int(self.step), tf.int64)
self.wm = WorldModel(config, obs_space, self.tfstep)
if config.task in common.DMC_TASK_IDS:
self._task_behavior = {
key: ActorCritic(config, self.act_space, self.tfstep)
for key in common.DMC_TASK_IDS[config.task]
}
else:
self._task_behavior = ActorCritic(config, self.act_space, self.tfstep)
if config.expl_behavior == 'greedy':
self._expl_behavior = self._task_behavior
else:
self._expl_behavior = getattr(expl, config.expl_behavior)(
self.config, self.act_space, self.wm, self.tfstep,
lambda seq: self.wm.heads['reward'](seq['feat']).mode())
@tf.function
def policy(self, obs, state=None, policy_idx=0, mode='train', goal=''):
obs = tf.nest.map_structure(tf.tensor, obs)
tf.py_function(lambda: self.tfstep.assign(
int(self.step), read_value=False), [], [])
if state is None:
latent = self.wm.rssm.initial(obs['reward'].shape[0])
action = tf.zeros((obs['reward'].shape[0],) + self.act_space.shape)
state = latent, action
latent, action = state
embed = self.wm.encoder(self.wm.preprocess(obs))
sample = (mode == 'train') or not self.config.eval_state_mean
latent, _ = self.wm.rssm.obs_step(
latent, action, embed, obs['is_first'], sample)
feat = self.wm.rssm.get_feat(latent)
if mode == 'eval':
if goal == '':
actor = self._task_behavior.actor(feat)
else:
actor = self._task_behavior[goal].actor(feat)
action = actor.mode()
noise = self.config.eval_noise
elif mode == 'explore':
try:
actor = self._expl_behavior.actor[policy_idx](feat)
except:
print("Population version not implemented")
actor = self._expl_behavior.actor(feat)
action = actor.sample()
noise = self.config.expl_noise
elif mode == 'train':
actor = self._task_behavior.actor(feat)
action = actor.sample()
noise = self.config.expl_noise
action = common.action_noise(action, noise, self.act_space)
outputs = {'action': action}
state = (latent, action)
return outputs, state
@tf.function
def train(self, data, state=None):
## deprecated
metrics = {}
state, outputs, mets = self.wm.train(data, state)
metrics.update(mets)
start = outputs['post']
if self.config.eval_type == "labels" or 'dmc' in self.config.task:
if isinstance(self._task_behavior, dict):
for key in self._task_behavior.keys():
reward = lambda seq: (self.wm.heads['reward_' + key](seq['feat']).mode(), {})
mets = self._task_behavior[key].train(
self.wm, start, data['is_terminal'], reward)
metrics.update(**{k+'_'+key: v for k, v in mets.items()})
else:
reward = lambda seq: (self.wm.heads['reward'](seq['feat']).mode(), {})
metrics.update(self._task_behavior.train(
self.wm, start, data['is_terminal'], reward))
if self.config.expl_behavior != 'greedy':
mets = self._expl_behavior.train(start, outputs, data)[-1]
metrics.update({'expl_' + key: value for key, value in mets.items()})
return state, metrics
@tf.function
def report(self, data):
report = {}
data = self.wm.preprocess(data)
for key in self.wm.heads['decoder'].cnn_keys:
name = key.replace('/', '_')
report[f'openl_{name}'] = self.wm.video_pred(data, key)
return report
class WorldModel(common.Module):
def __init__(self, config, obs_space, tfstep):
shapes = {k: tuple(v.shape) for k, v in obs_space.items()}
self.config = config
self.tfstep = tfstep
self.rssm = common.EnsembleRSSM(**config.rssm)
self.encoder = common.Encoder(shapes, **config.encoder)
self.heads = {
'decoder': common.Decoder(shapes, **config.decoder),
}
if config.task in common.DMC_TASK_IDS:
self.heads.update({f'reward_{common.DMC_TASK_IDS[config.task][idx]}': common.MLP([], **config.reward_head)
for idx in range(len(common.DMC_TASK_IDS[config.task]))})
else:
self.heads['reward'] = common.MLP([], **config.reward_head)
if config.pred_discount:
self.heads['discount'] = common.MLP([], **config.discount_head)
for name in config.grad_heads:
assert name in self.heads, name
self.model_opt = common.Optimizer('model', **config.model_opt)
def train(self, data, state=None):
with tf.GradientTape() as model_tape:
model_loss, state, outputs, metrics = self.loss(data, state)
modules = [self.encoder, self.rssm, *self.heads.values()]
metrics.update(self.model_opt(model_tape, model_loss, modules))
return state, outputs, metrics
def loss(self, data, state=None):
data = self.preprocess(data)
embed = self.encoder(data)
post, prior = self.rssm.observe(
embed, data['action'], data['is_first'], state)
kl_loss, kl_value = self.rssm.kl_loss(post, prior, **self.config.kl)
assert len(kl_loss.shape) == 0
likes = {}
losses = {'kl': kl_loss}
feat = self.rssm.get_feat(post)
for name, head in self.heads.items():
grad_head = (name in self.config.grad_heads)
inp = feat if grad_head else tf.stop_gradient(feat)
out = head(inp)
dists = out if isinstance(out, dict) else {name: out}
for key, dist in dists.items():
if 'reward_' in key:
_, rew_key = key.split('_')
print(f"\n\nStart Training Reward Head {rew_key}...", flush=True)
rew_idx = common.DMC_TASK_IDS[self.config.task].index(rew_key)
like = tf.cast(dist.log_prob(data['reward'][:, :, rew_idx]), tf.float32)
else:
like = tf.cast(dist.log_prob(data[key]), tf.float32)
likes[key] = like
losses[key] = -like.mean()
model_loss = sum(
self.config.loss_scales.get(k, 1.0) * v for k, v in losses.items())
outs = dict(
embed=embed, feat=feat, post=post,
prior=prior, likes=likes, kl=kl_value)
metrics = {f'{name}_loss': value for name, value in losses.items()}
metrics['model_kl'] = kl_value.mean()
metrics['prior_ent'] = self.rssm.get_dist(prior).entropy().mean()
metrics['post_ent'] = self.rssm.get_dist(post).entropy().mean()
last_state = {k: v[:, -1] for k, v in post.items()}
return model_loss, last_state, outs, metrics
def imagine(self, policy, start, is_terminal, horizon, idx=None):
flatten = lambda x: x.reshape([-1] + list(x.shape[2:]))
start = {k: flatten(v) for k, v in start.items()}
start['feat'] = self.rssm.get_feat(start)
if idx:
start['action'] = tf.zeros_like(policy(start['feat'], idx=idx).mode())
else:
start['action'] = tf.zeros_like(policy(start['feat']).mode())
seq = {k: [v] for k, v in start.items()}
for _ in range(horizon):
if idx:
action = policy(tf.stop_gradient(seq['feat'][-1]), idx=idx).sample()
else:
action = policy(tf.stop_gradient(seq['feat'][-1])).sample()
state = self.rssm.img_step({k: v[-1] for k, v in seq.items()}, action)
feat = self.rssm.get_feat(state)
for key, value in {**state, 'action': action, 'feat': feat}.items():
seq[key].append(value)
seq = {k: tf.stack(v, 0) for k, v in seq.items()}
if 'discount' in self.heads:
disc = self.heads['discount'](seq['feat']).mean()
if is_terminal is not None:
# Override discount prediction for the first step with the true
# discount factor from the replay buffer.
true_first = 1.0 - flatten(is_terminal).astype(disc.dtype)
true_first *= self.config.discount
disc = tf.concat([true_first[None], disc[1:]], 0)
else:
disc = self.config.discount * tf.ones(seq['feat'].shape[:-1])
seq['discount'] = disc
# Shift discount factors because they imply whether the following state
# will be valid, not whether the current state is valid.
seq['weight'] = tf.math.cumprod(
tf.concat([tf.ones_like(disc[:1]), disc[:-1]], 0), 0)
return seq
@tf.function
def preprocess(self, obs):
dtype = prec.global_policy().compute_dtype
obs = obs.copy()
for key, value in obs.items():
if key.startswith('log_'):
continue
if value.dtype == tf.int32:
value = value.astype(dtype)
if value.dtype == tf.uint8:
value = value.astype(dtype) / 255.0 - 0.5
obs[key] = value
obs['reward'] = {
'identity': tf.identity,
'sign': tf.sign,
'tanh': tf.tanh,
}[self.config.clip_rewards](obs['reward'])
if 'discount' not in obs:
obs['discount'] = 1.0 - obs['is_terminal'].astype(dtype)
obs['discount'] *= self.config.discount
return obs
@tf.function
def video_pred(self, data, key):
decoder = self.heads['decoder']
truth = data[key][:6] + 0.5
embed = self.encoder(data)
states, _ = self.rssm.observe(
embed[:6, :5], data['action'][:6, :5], data['is_first'][:6, :5])
recon = decoder(self.rssm.get_feat(states))[key].mode()[:6]
init = {k: v[:, -1] for k, v in states.items()}
prior = self.rssm.imagine(data['action'][:6, 5:], init)
openl = decoder(self.rssm.get_feat(prior))[key].mode()
model = tf.concat([recon[:, :5] + 0.5, openl + 0.5], 1)
error = (model - truth + 1) / 2
video = tf.concat([truth, model, error], 2)
B, T, H, W, C = video.shape
return video.transpose((1, 2, 0, 3, 4)).reshape((T, H, B * W, C))
class ActorCritic(common.Module):
def __init__(self, config, act_space, tfstep):
self.config = config
self.act_space = act_space
self.tfstep = tfstep
discrete = hasattr(act_space, 'n')
if self.config.actor.dist == 'auto':
self.config = self.config.update({
'actor.dist': 'onehot' if discrete else 'trunc_normal'})
if self.config.actor_grad == 'auto':
self.config = self.config.update({
'actor_grad': 'reinforce' if discrete else 'dynamics'})
self.actor = common.MLP(act_space.shape[0], **self.config.actor)
self.critic = common.MLP([], **self.config.critic)
if self.config.slow_target:
self._target_critic = common.MLP([], **self.config.critic)
self._updates = tf.Variable(0, tf.int64)
else:
self._target_critic = self.critic
self.actor_opt = common.Optimizer('actor', **self.config.actor_opt)
self.critic_opt = common.Optimizer('critic', **self.config.critic_opt)
self.rewnorm = common.StreamNorm(**self.config.reward_norm)
def train(self, world_model, start, is_terminal, reward_fn):
metrics = {}
hor = self.config.imag_horizon
# The weights are is_terminal flags for the imagination start states.
# Technically, they should multiply the losses from the second trajectory
# step onwards, which is the first imagined step. However, we are not
# training the action that led into the first step anyway, so we can use
# them to scale the whole sequence.
with tf.GradientTape() as actor_tape:
seq = world_model.imagine(self.actor, start, is_terminal, hor)
reward, mets0 = reward_fn(seq)
seq['reward'], mets1 = self.rewnorm(reward)
mets1 = {f'reward_{k}': v for k, v in mets1.items()}
target, mets2 = self.target(seq)
actor_loss, mets3 = self.actor_loss(seq, target)
with tf.GradientTape() as critic_tape:
critic_loss, mets4 = self.critic_loss(seq, target)
metrics.update(self.actor_opt(actor_tape, actor_loss, self.actor))
metrics.update(self.critic_opt(critic_tape, critic_loss, self.critic))
metrics.update(**mets0, **mets1, **mets2, **mets3, **mets4)
self.update_slow_target() # Variables exist after first forward pass.
return metrics
def actor_loss(self, seq, target):
# Actions: 0 [a1] [a2] a3
# ^ | ^ | ^ |
# / v / v / v
# States: [z0]->[z1]-> z2 -> z3
# Targets: t0 [t1] [t2]
# Baselines: [v0] [v1] v2 v3
# Entropies: [e1] [e2]
# Weights: [ 1] [w1] w2 w3
# Loss: l1 l2
metrics = {}
# Two states are lost at the end of the trajectory, one for the boostrap
# value prediction and one because the corresponding action does not lead
# anywhere anymore. One target is lost at the start of the trajectory
# because the initial state comes from the replay buffer.
policy = self.actor(tf.stop_gradient(seq['feat'][:-2]))
if self.config.actor_grad == 'dynamics':
objective = target[1:]
elif self.config.actor_grad == 'reinforce':
baseline = self._target_critic(seq['feat'][:-2]).mode()
advantage = tf.stop_gradient(target[1:] - baseline)
objective = policy.log_prob(seq['action'][1:-1]) * advantage
elif self.config.actor_grad == 'both':
baseline = self._target_critic(seq['feat'][:-2]).mode()
advantage = tf.stop_gradient(target[1:] - baseline)
objective = policy.log_prob(seq['action'][1:-1]) * advantage
mix = common.schedule(self.config.actor_grad_mix, self.tfstep)
objective = mix * target[1:] + (1 - mix) * objective
metrics['actor_grad_mix'] = mix
else:
raise NotImplementedError(self.config.actor_grad)
ent = policy.entropy()
ent_scale = common.schedule(self.config.actor_ent, self.tfstep)
objective += ent_scale * ent
weight = tf.stop_gradient(seq['weight'])
actor_loss = -(weight[:-2] * objective).mean()
metrics['actor_ent'] = ent.mean()
metrics['actor_ent_scale'] = ent_scale
return actor_loss, metrics
def critic_loss(self, seq, target):
# States: [z0] [z1] [z2] z3
# Rewards: [r0] [r1] [r2] r3
# Values: [v0] [v1] [v2] v3
# Weights: [ 1] [w1] [w2] w3
# Targets: [t0] [t1] [t2]
# Loss: l0 l1 l2
dist = self.critic(seq['feat'][:-1])
target = tf.stop_gradient(target)
weight = tf.stop_gradient(seq['weight'])
critic_loss = -(dist.log_prob(target) * weight[:-1]).mean()
metrics = {'critic': dist.mode().mean()}
return critic_loss, metrics
def target(self, seq):
# States: [z0] [z1] [z2] [z3]
# Rewards: [r0] [r1] [r2] r3
# Values: [v0] [v1] [v2] [v3]
# Discount: [d0] [d1] [d2] d3
# Targets: t0 t1 t2
reward = tf.cast(seq['reward'], tf.float32)
disc = tf.cast(seq['discount'], tf.float32)
value = self._target_critic(seq['feat']).mode()
# Skipping last time step because it is used for bootstrapping.
target = common.lambda_return(
reward[:-1], value[:-1], disc[:-1],
bootstrap=value[-1],
lambda_=self.config.discount_lambda,
axis=0)
metrics = {'critic_slow': value.mean(), 'critic_target': target.mean()}
return target, metrics
def update_slow_target(self):
if self.config.slow_target:
if self._updates % self.config.slow_target_update == 0:
mix = 1.0 if self._updates == 0 else float(
self.config.slow_target_fraction)
for s, d in zip(self.critic.variables, self._target_critic.variables):
d.assign(mix * s + (1 - mix) * d)
self._updates.assign_add(1)
class PopulationActorCritic(ActorCritic):
def __init__(self, config, act_space, tfstep):
self.config = config
self.act_space = act_space
self.tfstep = tfstep
self.num_agents = config.num_agents
discrete = hasattr(act_space, 'n')
if self.config.actor.dist == 'auto':
self.config = self.config.update({'actor.dist': 'onehot' if discrete else 'trunc_normal'})
if self.config.actor_grad == 'auto':
self.config = self.config.update({'actor_grad': 'reinforce'})
self.actor = [common.MLP(act_space.shape[0], **self.config.actor) for _ in range(self.num_agents)]
self.critic = [common.MLP([], **self.config.critic) for _ in range(self.num_agents)]
if self.config.slow_target:
self._target_critic = [common.MLP([], **self.config.critic) for _ in range(self.num_agents)]
self._updates = tf.Variable(0, tf.int64)
else:
self._target_critic = self.critic
self.actor_opt = [common.Optimizer('actor', **self.config.actor_opt) for _ in range(self.num_agents)]
self.critic_opt = [common.Optimizer('critic', **self.config.critic_opt) for _ in range(self.num_agents)]
self.rewnorm = [common.StreamNorm(**self.config.reward_norm) for _ in range(self.num_agents)]
def train_indiv(self, world_model, start, is_terminal, reward_fn, cascade, idx, return_seq=False):
metrics = {}
hor = self.config.imag_horizon
with tf.GradientTape() as actor_tape:
seq = world_model.imagine(self.actor[idx], start, is_terminal, hor)
reward, rew_meta = reward_fn(seq, cascade, rtn_meta=True)
seq['reward'], mets1 = self.rewnorm[idx](reward)
mets1 = {f'reward_{k}': v for k, v in mets1.items()}
target, mets2 = self.target(seq, idx)
actor_loss, mets3 = self.actor_loss(seq, target, idx)
with tf.GradientTape() as critic_tape:
critic_loss, mets4 = self.critic_loss(seq, target, idx)
actor_mets = self.actor_opt[idx](actor_tape, actor_loss, self.actor[idx])
critic_mets = self.critic_opt[idx](critic_tape, critic_loss, self.critic[idx])
#metrics.update(**mets1, **mets2, **mets3, **mets4, **actor_mets, **critic_mets)
metrics.update(**rew_meta)
self.update_slow_target(idx=idx) # Variables exist after first forward pass.
if return_seq:
return metrics, seq
else:
return metrics
def train(self, world_model, start, is_terminal, reward_fn):
metrics = {}
cascade = []
for agent_idx in range(self.num_agents):
# update state based cascade
if self.config.cascade_metric == "euclidean":
mets, seq = self.train_indiv(world_model, start, is_terminal, reward_fn, cascade, agent_idx, return_seq=True)
if agent_idx == 0:
idxs = tf.range(tf.shape(seq[self.config.cascade_feat])[1])
ridxs = tf.random.shuffle(idxs)[:10]
cascade.append(tf.gather(seq[self.config.cascade_feat], ridxs, axis=1))
else:
mets = self.train_indiv(world_model, start, is_terminal, reward_fn, cascade, agent_idx, return_seq=False)
metrics.update(**{f'agent{agent_idx}' + key: val for key, val in mets.items()})
print(f"Trained explorer {agent_idx}", flush=True)
return metrics
def actor_loss(self, seq, target, idx):
# See description in ActorCritic for more info
metrics = {}
policy = self.actor[idx](tf.stop_gradient(seq['feat'][:-2]))
if self.config.actor_grad == 'dynamics':
objective = target[1:]
elif self.config.actor_grad == 'reinforce':
baseline = self._target_critic[idx](seq['feat'][:-2]).mode()
advantage = tf.stop_gradient(target[1:] - baseline)
objective = policy.log_prob(seq['action'][1:-1]) * advantage
elif self.config.actor_grad == 'both':
baseline = self._target_critic[idx](seq['feat'][:-2]).mode()
advantage = tf.stop_gradient(target[1:] - baseline)
objective = policy.log_prob(seq['action'][1:-1]) * advantage
mix = common.schedule(self.config.actor_grad_mix, self.tfstep)
objective = mix * target[1:] + (1 - mix) * objective
metrics['actor_grad_mix'] = mix
else:
raise NotImplementedError(self.config.actor_grad)
ent = policy.entropy()
ent_scale = common.schedule(self.config.actor_ent, self.tfstep)
objective += ent_scale * ent
weight = tf.stop_gradient(seq['weight'])
actor_loss = -(weight[:-2] * objective).mean()
metrics['actor_ent'] = ent.mean()
metrics['actor_ent_scale'] = ent_scale
return actor_loss, metrics
def critic_loss(self, seq, target, idx):
# States: [z0] [z1] [z2] z3
# Rewards: [r0] [r1] [r2] r3
# Values: [v0] [v1] [v2] v3
# Weights: [ 1] [w1] [w2] w3
# Targets: [t0] [t1] [t2]
# Loss: l0 l1 l2
dist = self.critic[idx](seq['feat'][:-1])
target = tf.stop_gradient(target)
weight = tf.stop_gradient(seq['weight'])
critic_loss = -(dist.log_prob(target) * weight[:-1]).mean()
metrics = {'critic': dist.mode().mean()}
return critic_loss, metrics
def target(self, seq, idx):
# States: [z0] [z1] [z2] [z3]
# Rewards: [r0] [r1] [r2] r3
# Values: [v0] [v1] [v2] [v3]
# Discount: [d0] [d1] [d2] d3
# Targets: t0 t1 t2
reward = tf.cast(seq['reward'], tf.float32)
disc = tf.cast(seq['discount'], tf.float32)
value = self._target_critic[idx](seq['feat']).mode()
# Skipping last time step because it is used for bootstrapping.
target = common.lambda_return(reward[:-1], value[:-1], disc[:-1], bootstrap=value[-1], lambda_=self.config.discount_lambda, axis=0)
metrics = {'critic_slow': value.mean(), 'critic_target': target.mean()}
return target, metrics
def update_slow_target(self, idx=0):
if self.config.slow_target:
if self._updates % self.config.slow_target_update == 0:
mix = 1.0 if self._updates == 0 else float(self.config.slow_target_fraction)
for s, d in zip(self.critic[idx].variables, self._target_critic[idx].variables):
d.assign(mix * s + (1 - mix) * d)
self._updates.assign_add(1)
| cascade-main | dreamerv2/agent.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import logging
import os
import pathlib
import re
import sys
import warnings
import pickle
try:
import rich.traceback
rich.traceback.install()
except ImportError:
pass
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
logging.getLogger().setLevel('ERROR')
warnings.filterwarnings('ignore', '.*box bound precision lowered.*')
sys.path.append(str(pathlib.Path(__file__).parent))
sys.path.append(str(pathlib.Path(__file__).parent.parent))
import numpy as np
import agent
import common
def run(config):
logdir = pathlib.Path(config.logdir + config.xpid).expanduser()
logdir.mkdir(parents=True, exist_ok=True)
config.save(logdir / 'config.yaml')
print(config, '\n')
print('Logdir', logdir)
import tensorflow as tf
tf.config.experimental_run_functions_eagerly(not config.jit)
message = 'No GPU found. To actually train on CPU remove this assert.'
if len(tf.config.experimental.list_physical_devices('GPU')) == 0:
print(message)
else:
for gpu in tf.config.experimental.list_physical_devices('GPU'):
tf.config.experimental.set_memory_growth(gpu, True)
assert config.precision in (16, 32), config.precision
if config.precision == 16:
from tensorflow.keras.mixed_precision import experimental as prec
prec.set_policy(prec.Policy('mixed_float16'))
## Load the stats that we keep track of
if (logdir / 'stats.pkl').exists():
stats = pickle.load(open(f"{logdir}/stats.pkl", "rb"))
print("Loaded stats: ", stats)
else:
stats = {
'num_deployments': 0,
'num_trains': 0,
'num_evals': 0
}
pickle.dump(stats, open(f"{logdir}/stats.pkl", "wb"))
multi_reward = config.task in common.DMC_TASK_IDS
replay_dir = logdir / 'train_episodes'
## load dataset - we dont want to load offline again if we have already deployed
if config.offline_dir == 'none' or stats['num_deployments'] > 0:
train_replay = common.Replay(replay_dir, offline_init=False,
multi_reward=multi_reward, **config.replay)
else:
train_replay = common.Replay(replay_dir, offline_init=True,
multi_reward=multi_reward, offline_directory=config.offline_dir, **config.replay)
eval_replay = common.Replay(logdir / 'eval_episodes', **dict(
capacity=config.replay.capacity // 10,
minlen=config.dataset.length,
maxlen=config.dataset.length,
multi_reward=multi_reward))
step = common.Counter(train_replay.stats['total_steps'])
outputs = [
common.TerminalOutput(),
common.JSONLOutput(logdir),
common.TensorBoardOutput(logdir),
]
logger = common.Logger(step, outputs, multiplier=config.action_repeat)
def make_env(mode, seed=1):
if '_' in config.task:
suite, task = config.task.split('_', 1)
else:
suite, task = config.task, ''
if suite == 'dmc':
env = common.DMC(
task, config.action_repeat, config.render_size, config.dmc_camera, save_path=logdir / 'videos')
env = common.NormalizeAction(env)
elif suite == 'atari':
env = common.Atari(
task, config.action_repeat, config.render_size,
config.atari_grayscale, life_done=False, save_path=logdir / 'videos') # do not terminate on life loss
env = common.OneHotAction(env)
elif suite == 'crafter':
assert config.action_repeat == 1
outdir = logdir / 'crafter' if mode == 'train' else None
reward = bool(['noreward', 'reward'].index(task)) or mode == 'eval'
env = common.Crafter(outdir, reward, save_path=logdir / 'videos')
env = common.OneHotAction(env)
elif suite == 'minigrid':
if mode == 'eval':
env = common.make_minigrid_env(task, fix_seed=True, seed=seed)
else:
env = common.make_minigrid_env(task, fix_seed=False, seed=None)
else:
raise NotImplementedError(suite)
env = common.TimeLimit(env, config.time_limit)
return env
def per_episode(ep, mode, task='none'):
length = len(ep['reward']) - 1
if task in common.DMC_TASK_IDS:
scores = {
key: np.sum([val[idx] for val in ep['reward'][1:]])
for idx, key in enumerate(common.DMC_TASK_IDS[task])}
print_rews = f'{mode.title()} episode has {length} steps and returns '
print_rews += ''.join([f"{key}:{np.round(val,1)} " for key,val in scores.items()])
print(print_rews)
for key,val in scores.items():
logger.scalar(f'{mode}_return_{key}', val)
else:
score = float(ep['reward'].astype(np.float64).sum())
print(f'{mode.title()} episode has {length} steps and return {score:.1f}.')
logger.scalar(f'{mode}_return', score)
logger.scalar(f'{mode}_length', length)
for key, value in ep.items():
if re.match(config.log_keys_sum, key):
logger.scalar(f'sum_{mode}_{key}', ep[key].sum())
if re.match(config.log_keys_mean, key):
logger.scalar(f'mean_{mode}_{key}', ep[key].mean())
if re.match(config.log_keys_max, key):
logger.scalar(f'max_{mode}_{key}', ep[key].max(0).mean())
replay = dict(train=train_replay, eval=eval_replay)[mode]
logger.add(replay.stats, prefix=mode)
logger.write()
print('Create envs.\n')
train_envs = [make_env('train') for _ in range(config.envs)]
eval_envs = [make_env('eval') for _ in range(config.eval_envs)]
act_space = train_envs[0].act_space
obs_space = train_envs[0].obs_space
train_driver = common.Driver(train_envs)
train_driver.on_episode(lambda ep: per_episode(ep, mode='train', task=config.task))
train_driver.on_step(lambda tran, worker: step.increment())
train_driver.on_step(train_replay.add_step)
train_driver.on_reset(train_replay.add_step)
eval_driver = common.Driver(eval_envs)
eval_driver.on_episode(eval_replay.add_episode)
eval_driver.on_episode(lambda ep: per_episode(ep, mode='eval', task=config.task))
if stats['num_deployments'] == 0:
if config.offline_dir == 'none':
prefill = max(0, config.train_every - train_replay.stats['total_steps'])
if prefill:
print(f'Prefill dataset ({prefill} steps).')
random_agent = common.RandomAgent(act_space)
train_driver(random_agent, steps=prefill, episodes=1, policy_idx=-1)
train_driver.reset()
eval_driver(random_agent, episodes=1, policy_idx=-1)
eval_driver.reset()
stats['num_deployments'] += 1
train_dataset = iter(train_replay.dataset(**config.offline_model_dataset))
print('Create agent.\n')
agnt = agent.Agent(config, obs_space, act_space, step)
train_agent = common.CarryOverState(agnt.train)
# Attempt to load pretrained full model.
# this can be used to test zero-shot performance on new tasks.
if config.load_pretrained != "none":
print("\nLoading pretrained model...")
train_agent(next(train_dataset))
path = pathlib.Path(config.load_pretrained).expanduser()
agnt.load(path)
## Assume we've done 1 full cycle
stats = {
'num_deployments': 1,
'num_trains': 1,
'num_evals': 1
}
print("\nSuccessfully loaded pretrained model.")
else:
print("\nInitializing agent...")
train_agent(next(train_dataset))
if (logdir / 'variables.pkl').exists():
print("\nStart loading model checkpoint...")
agnt.load(logdir / 'variables.pkl')
print("\nFinished initialize agent.")
# Initialize policies
eval_policies = {}
tasks = ['']
if config.task in common.DMC_TASK_IDS:
tasks = common.DMC_TASK_IDS[config.task]
for task in tasks:
eval_policies[task] = lambda *args: agnt.policy(*args, mode='eval', goal=task)
expl_policies = {}
for idx in range(config.num_agents):
expl_policies[idx] = lambda *args: agnt.policy(*args, policy_idx=idx, mode='explore')
## each loop we do one of the following:
# 1. deploy explorers to collect data
# 2. train WM, explorers, task policies etc.
# 3. evaluate models
while step < config.steps:
print(f"\nMain loop step {step.value}")
should_deploy = stats['num_deployments'] <= stats['num_evals']
should_train_wm = stats['num_trains'] < stats['num_deployments']
should_eval = stats['num_evals'] < stats['num_trains']
assert should_deploy + should_train_wm + should_eval == 1
if should_deploy:
print("\n\nStart collecting data...", flush=True)
## collect a batch of steps with the expl policy
## need to increment steps here
num_steps = int(config.train_every / config.num_agents)
for idx in range(config.num_agents):
expl_policy = expl_policies[idx]
train_driver(expl_policy, steps=num_steps, policy_idx=idx)
stats['num_deployments'] += 1
elif should_eval:
print('\n\nStart evaluation...', flush=True)
if int(step.value) % int(config.eval_every) != 0 or config.eval_type == 'none':
pass
elif config.eval_type == 'coincidental':
mets = common.eval(eval_driver, config, expl_policies, logdir)
for name, values in mets.items():
logger.scalar(name, np.array(values, np.float64).mean())
logger.write()
elif config.eval_type == 'labels':
tasks = ['']
if config.task in common.DMC_TASK_IDS:
tasks = common.DMC_TASK_IDS[config.task]
for idx, task in enumerate(tasks):
print("\n\nStart Evaluating " + task)
eval_policy = eval_policies[task]
eval_driver(eval_policy, episodes=config.eval_eps)
mets = common.get_stats(eval_driver, task=config.task, num_agents=config.num_agents, logdir=logdir)
rew = mets["eval_reward_" + task] if task != '' else mets["eval_reward"]
# logging
logger.scalar("eval_reward_" + task, np.mean(rew))
logger.write()
stats['num_evals'] += 1
elif should_train_wm:
print('\n\nStart model training...')
should_pretrain = (stats['num_trains'] == 0 and config.offline_dir != "none")
if should_pretrain:
# Use all offline data for pretrain
batch_size = config.offline_model_dataset["batch"] * config.offline_model_dataset["length"]
model_train_steps = train_replay._loaded_steps // batch_size - 1
else:
model_train_steps = config.offline_model_train_steps
model_step = common.Counter(0)
while model_step < model_train_steps:
model_step.increment()
mets = train_agent(next(train_dataset))
# save model every 1000
if int(model_step.value) % 1000 == 0:
agnt.save(logdir / 'variables.pkl')
stats['num_trains'] += 1
# save
pickle.dump(stats, open(f"{logdir}/stats.pkl", "wb"))
agnt.save(logdir / 'variables.pkl')
# closing all envs
for env in train_envs + eval_envs:
try:
env.close()
except Exception:
pass
| cascade-main | dreamerv2/train.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import atexit
import os
import sys
import threading
import traceback
import cloudpickle
import gym
import numpy as np
from .cdmc import make_dmc_all
from .recorder import Recorder
class GymWrapper:
def __init__(self, env, obs_key='image', act_key='action'):
self._env = env
self._obs_is_dict = hasattr(self._env.observation_space, 'spaces')
self._act_is_dict = hasattr(self._env.action_space, 'spaces')
self._obs_key = obs_key
self._act_key = act_key
def __getattr__(self, name):
if name.startswith('__'):
raise AttributeError(name)
try:
return getattr(self._env, name)
except AttributeError:
raise ValueError(name)
@property
def obs_space(self):
if self._obs_is_dict:
spaces = self._env.observation_space.spaces.copy()
else:
spaces = {self._obs_key: self._env.observation_space}
return {
**spaces,
'reward': gym.spaces.Box(-np.inf, np.inf, (), dtype=np.float32),
'is_first': gym.spaces.Box(0, 1, (), dtype=bool),
'is_last': gym.spaces.Box(0, 1, (), dtype=bool),
'is_terminal': gym.spaces.Box(0, 1, (), dtype=bool),
}
@property
def act_space(self):
if self._act_is_dict:
return self._env.action_space.spaces.copy()
else:
return {self._act_key: self._env.action_space}
def step(self, action):
if not self._act_is_dict:
action = action[self._act_key]
obs, reward, done, _, info = self._env.step(action)
if not self._obs_is_dict:
obs = {self._obs_key: obs}
obs['reward'] = float(reward)
obs['is_first'] = False
obs['is_last'] = done
obs['is_terminal'] = info.get('is_terminal', done)
return obs
def reset(self):
obs = self._env.reset()[0]
if not self._obs_is_dict:
obs = {self._obs_key: obs}
# print("obs 1:", obs)
obs['reward'] = 0.0
obs['is_first'] = True
obs['is_last'] = False
obs['is_terminal'] = False
return obs
def make_minigrid_env(task, fix_seed, seed):
import gym_minigrid
env = gym.make("MiniGrid-"+task)
env = gym_minigrid.wrappers.RGBImgPartialObsWrapper(env)
if fix_seed:
env = gym_minigrid.wrappers.ReseedWrapper(env, seeds=[seed])
env = GymWrapper(env)
env = ResizeImage(env)
if hasattr(env.act_space['action'], 'n'):
env = OneHotAction(env)
else:
env = NormalizeAction(env)
return env
class DMC:
def __init__(self, name, action_repeat=1, size=(64, 64), camera=None, save_path=None):
os.environ['MUJOCO_GL'] = 'egl'
domain, task = name.split('_', 1)
if task == 'all':
self._dict_reward = True
else:
self._dict_reward = False
if domain == 'cup': # Only domain with multiple words.
domain = 'ball_in_cup'
if domain == 'manip':
from dm_control import manipulation
self._env = manipulation.load(task + '_vision')
elif domain == 'locom':
from dm_control.locomotion.examples import basic_rodent_2020
self._env = getattr(basic_rodent_2020, task)()
elif task == 'all':
import time
seed = int(str(int((time.time()*10000)))[-6:]) # random seed generator
self._env = make_dmc_all(domain,
task,
task_kwargs=dict(random=seed),
environment_kwargs=dict(flat_observation=True),
visualize_reward=False)
else:
from dm_control import suite
self._env = suite.load(domain, task)
self._action_repeat = action_repeat
self._size = size
if camera in (-1, None):
camera = dict(
quadruped_walk=2, quadruped_run=2, quadruped_escape=2,
quadruped_fetch=2, locom_rodent_maze_forage=1,
locom_rodent_two_touch=1,
).get(name, 0)
self._camera = camera
self._ignored_keys = []
save_path.mkdir(parents=True, exist_ok=True)
self.save_path = save_path
for key, value in self._env.observation_spec().items():
if value.shape == (0,):
print(f"Ignoring empty observation key '{key}'.")
self._ignored_keys.append(key)
@property
def obs_space(self):
spaces = {
'image': gym.spaces.Box(0, 255, self._size + (3,), dtype=np.uint8),
'reward': gym.spaces.Box(-np.inf, np.inf, (), dtype=np.float32),
'is_first': gym.spaces.Box(0, 1, (), dtype=bool),
'is_last': gym.spaces.Box(0, 1, (), dtype=bool),
'is_terminal': gym.spaces.Box(0, 1, (), dtype=bool),
}
for key, value in self._env.observation_spec().items():
if key in self._ignored_keys:
continue
if value.dtype == np.float64:
spaces[key] = gym.spaces.Box(-np.inf, np.inf, value.shape, np.float32)
elif value.dtype == np.uint8:
spaces[key] = gym.spaces.Box(0, 255, value.shape, np.uint8)
else:
raise NotImplementedError(value.dtype)
return spaces
@property
def act_space(self):
spec = self._env.action_spec()
action = gym.spaces.Box(spec.minimum, spec.maximum, dtype=np.float32)
return {'action': action}
def step(self, action):
assert np.isfinite(action['action']).all(), action['action']
if self._dict_reward:
reward = []
else:
reward = 0.0
for _ in range(self._action_repeat):
time_step = self._env.step(action['action'])
if self._dict_reward:
curr_reward = []
for key, val in time_step.reward.items():
curr_reward.append(val)
if len(reward) == 0:
reward = curr_reward
else:
reward = [sum(x) for x in zip(reward, curr_reward)]
else:
reward += time_step.reward or 0.0
if time_step.last():
break
assert time_step.discount in (0, 1)
image = self._env.physics.render(*self._size, camera_id=self._camera)
obs = {
'reward': reward,
'is_first': False,
'is_last': time_step.last(),
'is_terminal': time_step.discount == 0,
'image': image,
}
obs.update({
k: v for k, v in dict(time_step.observation).items()
if k not in self._ignored_keys})
return obs
def reset(self):
time_step = self._env.reset()
obs = {
'reward': 0.0,
'is_first': True,
'is_last': False,
'is_terminal': False,
'image': self._env.physics.render(*self._size, camera_id=self._camera),
}
obs.update({
k: v for k, v in dict(time_step.observation).items()
if k not in self._ignored_keys})
return obs
class Atari:
LOCK = threading.Lock()
def __init__(
self, name, action_repeat=4, size=(84, 84), grayscale=True, noops=30,
life_done=False, sticky=True, all_actions=False, save_path=None):
assert size[0] == size[1]
import gym.wrappers
import gym.envs.atari
if name == 'james_bond':
name = 'jamesbond'
with self.LOCK:
env = gym.envs.atari.AtariEnv(
game=name, obs_type='rgb', frameskip=1,
repeat_action_probability=0.25 if sticky else 0.0,
full_action_space=all_actions)
# Avoid unnecessary rendering in inner env.
env._get_obs = lambda: None
# Tell wrapper that the inner env has no action repeat.
env.spec = gym.envs.registration.EnvSpec('NoFrameskip-v0')
self._env = gym.wrappers.AtariPreprocessing(
env, noops, action_repeat, size[0], life_done, grayscale)
save_path.mkdir(parents=True, exist_ok=True)
self.save_path = save_path
self._size = size
self._grayscale = grayscale
@property
def obs_space(self):
shape = self._size + (1 if self._grayscale else 3,)
return {
'image': gym.spaces.Box(0, 255, shape, np.uint8),
'ram': gym.spaces.Box(0, 255, (128,), np.uint8),
'reward': gym.spaces.Box(-np.inf, np.inf, (), dtype=np.float32),
'is_first': gym.spaces.Box(0, 1, (), dtype=bool),
'is_last': gym.spaces.Box(0, 1, (), dtype=bool),
'is_terminal': gym.spaces.Box(0, 1, (), dtype=bool),
}
@property
def act_space(self):
return {'action': self._env.action_space}
def step(self, action):
image, reward, done, info = self._env.step(action['action'])
if self._grayscale:
image = image[..., None]
return {
'image': image,
'ram': self._env.env.ale.getRAM(), #if not self.record_video else self._env._env.env.ale.getRAM(),
'reward': reward,
'is_first': False,
'is_last': done,
'is_terminal': done,
}
def reset(self):
with self.LOCK:
image = self._env.reset()
if self._grayscale:
image = image[..., None]
return {
'image': image,
'ram': self._env.env.ale.getRAM(), #if not self.record_video else self._env._env.env.ale.getRAM(),
# 'ram': self._env.env._get_ram() if not self.record_video else self._env._env.env._get_ram(),
'reward': 0.0,
'is_first': True,
'is_last': False,
'is_terminal': False,
}
def close(self):
return self._env.close()
class Crafter:
def __init__(self, outdir=None, reward=True, seed=None, save_path=None):
import crafter
self._env = crafter.Env(reward=reward, seed=seed)
self._env = Recorder(
self._env, outdir,
save_stats=True,
save_video=False,
save_episode=False,
)
if save_path:
save_path.mkdir(parents=True, exist_ok=True)
self.save_path = save_path
self._achievements = crafter.constants.achievements.copy()
@property
def obs_space(self):
spaces = {
'image': self._env.observation_space,
'reward': gym.spaces.Box(-np.inf, np.inf, (), dtype=np.float32),
'is_first': gym.spaces.Box(0, 1, (), dtype=bool),
'is_last': gym.spaces.Box(0, 1, (), dtype=bool),
'is_terminal': gym.spaces.Box(0, 1, (), dtype=bool),
'log_reward': gym.spaces.Box(-np.inf, np.inf, (), np.float32),
}
spaces.update({
f'log_achievement_{k}': gym.spaces.Box(0, 2 ** 31 - 1, (), np.int32)
for k in self._achievements})
return spaces
@property
def act_space(self):
return {'action': self._env.action_space}
def step(self, action):
image, reward, done, info = self._env.step(action['action'])
obs = {
'image': image,
'reward': reward,
'is_first': False,
'is_last': done,
'is_terminal': info['discount'] == 0,
'log_reward': info['reward'],
}
obs.update({
f'log_achievement_{k}': v
for k, v in info['achievements'].items()})
return obs
def reset(self):
obs = {
'image': self._env.reset(),
'reward': 0.0,
'is_first': True,
'is_last': False,
'is_terminal': False,
'log_reward': 0.0,
}
obs.update({
f'log_achievement_{k}': 0
for k in self._achievements})
return obs
class Dummy:
def __init__(self):
pass
@property
def obs_space(self):
return {
'image': gym.spaces.Box(0, 255, (64, 64, 3), dtype=np.uint8),
'reward': gym.spaces.Box(-np.inf, np.inf, (), dtype=np.float32),
'is_first': gym.spaces.Box(0, 1, (), dtype=bool),
'is_last': gym.spaces.Box(0, 1, (), dtype=bool),
'is_terminal': gym.spaces.Box(0, 1, (), dtype=bool),
}
@property
def act_space(self):
return {'action': gym.spaces.Box(-1, 1, (6,), dtype=np.float32)}
def step(self, action):
return {
'image': np.zeros((64, 64, 3)),
'reward': 0.0,
'is_first': False,
'is_last': False,
'is_terminal': False,
}
def reset(self):
return {
'image': np.zeros((64, 64, 3)),
'reward': 0.0,
'is_first': True,
'is_last': False,
'is_terminal': False,
}
class TimeLimit:
def __init__(self, env, duration):
self._env = env
self._duration = duration
self._step = None
def __getattr__(self, name):
if name.startswith('__'):
raise AttributeError(name)
try:
return getattr(self._env, name)
except AttributeError:
raise ValueError(name)
def step(self, action):
assert self._step is not None, 'Must reset environment.'
obs = self._env.step(action)
self._step += 1
if self._duration and self._step >= self._duration:
obs['is_last'] = True
self._step = None
return obs
def reset(self):
self._step = 0
return self._env.reset()
class NormalizeAction:
def __init__(self, env, key='action'):
self._env = env
self._key = key
space = env.act_space[key]
self._mask = np.isfinite(space.low) & np.isfinite(space.high)
self._low = np.where(self._mask, space.low, -1)
self._high = np.where(self._mask, space.high, 1)
def __getattr__(self, name):
if name.startswith('__'):
raise AttributeError(name)
try:
return getattr(self._env, name)
except AttributeError:
raise ValueError(name)
@property
def act_space(self):
low = np.where(self._mask, -np.ones_like(self._low), self._low)
high = np.where(self._mask, np.ones_like(self._low), self._high)
space = gym.spaces.Box(low, high, dtype=np.float32)
return {**self._env.act_space, self._key: space}
def step(self, action):
orig = (action[self._key] + 1) / 2 * (self._high - self._low) + self._low
orig = np.where(self._mask, orig, action[self._key])
return self._env.step({**action, self._key: orig})
class OneHotAction:
def __init__(self, env, key='action'):
assert hasattr(env.act_space[key], 'n')
self._env = env
self._key = key
self._random = np.random.RandomState()
def __getattr__(self, name):
if name.startswith('__'):
raise AttributeError(name)
try:
return getattr(self._env, name)
except AttributeError:
raise ValueError(name)
@property
def act_space(self):
shape = (self._env.act_space[self._key].n,)
space = gym.spaces.Box(low=0, high=1, shape=shape, dtype=np.float32)
space.sample = self._sample_action
space.n = shape[0]
return {**self._env.act_space, self._key: space}
def step(self, action):
index = np.argmax(action[self._key]).astype(int)
reference = np.zeros_like(action[self._key])
reference[index] = 1
if not np.allclose(reference, action[self._key]):
raise ValueError(f'Invalid one-hot action:\n{action}')
return self._env.step({**action, self._key: index})
def reset(self):
return self._env.reset()
def _sample_action(self):
actions = self._env.act_space.n
index = self._random.randint(0, actions)
reference = np.zeros(actions, dtype=np.float32)
reference[index] = 1.0
return reference
class ResizeImage:
def __init__(self, env, size=(64, 64)):
self._env = env
self._size = size
self._keys = [
k for k, v in env.obs_space.items()
if v.shape and len(v.shape) > 1 and v.shape[:2] != size]
print(f'Resizing keys {",".join(self._keys)} to {self._size}.')
if self._keys:
from PIL import Image
self._Image = Image
def __getattr__(self, name):
if name.startswith('__'):
raise AttributeError(name)
try:
return getattr(self._env, name)
except AttributeError:
raise ValueError(name)
@property
def obs_space(self):
spaces = self._env.obs_space
new_space = {}
for key in self._keys:
shape = self._size + spaces[key].shape[2:]
new_space[key] = gym.spaces.Box(0, 255, shape, np.uint8)
return new_space
def step(self, action):
obs = self._env.step(action)
for key in self._keys:
obs[key] = self._resize(obs[key])
return obs
def reset(self):
obs = self._env.reset()
for key in self._keys:
obs[key] = self._resize(obs[key])
return obs
def _resize(self, image):
image = self._Image.fromarray(image)
image = image.resize(self._size, self._Image.NEAREST)
image = np.array(image)
return image
class RenderImage:
def __init__(self, env, key='image'):
self._env = env
self._key = key
self._shape = self._env.render().shape
def __getattr__(self, name):
if name.startswith('__'):
raise AttributeError(name)
try:
return getattr(self._env, name)
except AttributeError:
raise ValueError(name)
@property
def obs_space(self):
spaces = self._env.obs_space
spaces[self._key] = gym.spaces.Box(0, 255, self._shape, np.uint8)
return spaces
def step(self, action):
obs = self._env.step(action)
obs[self._key] = self._env.render('rgb_array')
return obs
def reset(self):
obs = self._env.reset()
obs[self._key] = self._env.render('rgb_array')
return obs
class Async:
# Message types for communication via the pipe.
_ACCESS = 1
_CALL = 2
_RESULT = 3
_CLOSE = 4
_EXCEPTION = 5
def __init__(self, constructor, strategy='thread'):
self._pickled_ctor = cloudpickle.dumps(constructor)
if strategy == 'process':
import multiprocessing as mp
context = mp.get_context('spawn')
elif strategy == 'thread':
import multiprocessing.dummy as context
else:
raise NotImplementedError(strategy)
self._strategy = strategy
self._conn, conn = context.Pipe()
self._process = context.Process(target=self._worker, args=(conn,))
atexit.register(self.close)
self._process.start()
self._receive() # Ready.
self._obs_space = None
self._act_space = None
def access(self, name):
self._conn.send((self._ACCESS, name))
return self._receive
def call(self, name, *args, **kwargs):
payload = name, args, kwargs
self._conn.send((self._CALL, payload))
return self._receive
def close(self):
try:
self._conn.send((self._CLOSE, None))
self._conn.close()
except IOError:
pass # The connection was already closed.
self._process.join(5)
@property
def obs_space(self):
if not self._obs_space:
self._obs_space = self.access('obs_space')()
return self._obs_space
@property
def act_space(self):
if not self._act_space:
self._act_space = self.access('act_space')()
return self._act_space
def step(self, action, blocking=False):
promise = self.call('step', action)
if blocking:
return promise()
else:
return promise
def reset(self, blocking=False):
promise = self.call('reset')
if blocking:
return promise()
else:
return promise
def _receive(self):
try:
message, payload = self._conn.recv()
except (OSError, EOFError):
raise RuntimeError('Lost connection to environment worker.')
# Re-raise exceptions in the main process.
if message == self._EXCEPTION:
stacktrace = payload
raise Exception(stacktrace)
if message == self._RESULT:
return payload
raise KeyError('Received message of unexpected type {}'.format(message))
def _worker(self, conn):
try:
ctor = cloudpickle.loads(self._pickled_ctor)
env = ctor()
conn.send((self._RESULT, None)) # Ready.
while True:
try:
# Only block for short times to have keyboard exceptions be raised.
if not conn.poll(0.1):
continue
message, payload = conn.recv()
except (EOFError, KeyboardInterrupt):
break
if message == self._ACCESS:
name = payload
result = getattr(env, name)
conn.send((self._RESULT, result))
continue
if message == self._CALL:
name, args, kwargs = payload
result = getattr(env, name)(*args, **kwargs)
conn.send((self._RESULT, result))
continue
if message == self._CLOSE:
break
raise KeyError('Received message of unknown type {}'.format(message))
except Exception:
stacktrace = ''.join(traceback.format_exception(*sys.exc_info()))
print('Error in environment process: {}'.format(stacktrace))
conn.send((self._EXCEPTION, stacktrace))
finally:
try:
conn.close()
except IOError:
pass # The connection was already closed.
| cascade-main | dreamerv2/common/envs.py |
import argparse
import collections
import functools
import itertools
import json
import multiprocessing as mp
import os
import pathlib
import re
import subprocess
import warnings
os.environ['NO_AT_BRIDGE'] = '1' # Hide X org false warning.
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
import numpy as np
import pandas as pd
np.set_string_function(lambda x: f'<np.array shape={x.shape} dtype={x.dtype}>')
Run = collections.namedtuple('Run', 'task method seed xs ys')
PALETTES = dict(
discrete=(
'#377eb8', '#4daf4a', '#984ea3', '#e41a1c', '#ff7f00', '#a65628',
'#f781bf', '#888888', '#a6cee3', '#b2df8a', '#cab2d6', '#fb9a99',
),
contrast=(
'#0022ff', '#33aa00', '#ff0011', '#ddaa00', '#cc44dd', '#0088aa',
'#001177', '#117700', '#990022', '#885500', '#553366', '#006666',
),
gradient=(
'#fde725', '#a0da39', '#4ac16d', '#1fa187', '#277f8e', '#365c8d',
'#46327e', '#440154',
),
baselines=(
'#222222', '#666666', '#aaaaaa', '#cccccc',
),
)
LEGEND = dict(
fontsize='medium', numpoints=1, labelspacing=0, columnspacing=1.2,
handlelength=1.5, handletextpad=0.5, loc='lower center')
DEFAULT_BASELINES = ['d4pg', 'rainbow_sticky', 'human_gamer', 'impala']
def find_keys(args):
filenames = []
for indir in args.indir:
task = next(indir.iterdir()) # First only.
for method in task.iterdir():
seed = next(indir.iterdir()) # First only.
filenames += list(seed.glob('**/*.jsonl'))
keys = set()
for filename in filenames:
keys |= set(load_jsonl(filename).columns)
print(f'Keys ({len(keys)}):', ', '.join(keys), flush=True)
def load_runs(args):
total, toload = [], []
for indir in args.indir:
filenames = list(indir.glob('**/*.jsonl'))
total += filenames
for filename in filenames:
task, method, seed = filename.relative_to(indir).parts[:-1]
if not any(p.search(task) for p in args.tasks):
continue
if not any(p.search(method) for p in args.methods):
continue
toload.append((filename, indir))
print(f'Loading {len(toload)} of {len(total)} runs...')
jobs = [functools.partial(load_run, f, i, args) for f, i in toload]
# Disable async data loading:
# runs = [j() for j in jobs]
with mp.Pool(10) as pool:
promises = [pool.apply_async(j) for j in jobs]
runs = [p.get() for p in promises]
runs = [r for r in runs if r is not None]
return runs
def load_run(filename, indir, args):
task, method, seed = filename.relative_to(indir).parts[:-1]
prefix = f'indir{args.indir.index(indir)+1}_'
if task == 'atari_jamesbond':
task = 'atari_james_bond'
seed = prefix + seed
if args.prefix:
method = prefix + method
df = load_jsonl(filename)
if df is None:
print('Skipping empty run')
return
try:
df = df[[args.xaxis, args.yaxis]].dropna()
if args.maxval:
df = df.replace([+np.inf], +args.maxval)
df = df.replace([-np.inf], -args.maxval)
df[args.yaxis] = df[args.yaxis].clip(-args.maxval, +args.maxval)
except KeyError:
return
xs = df[args.xaxis].to_numpy()
if args.xmult != 1:
xs = xs.astype(np.float32) * args.xmult
ys = df[args.yaxis].to_numpy()
bins = {
'atari': 1e6,
'dmc': 1e4,
'crafter': 1e4,
}.get(task.split('_')[0], 1e5) if args.bins == -1 else args.bins
if bins:
borders = np.arange(0, xs.max() + 1e-8, bins)
xs, ys = bin_scores(xs, ys, borders)
if not len(xs):
print('Skipping empty run', task, method, seed)
return
return Run(task, method, seed, xs, ys)
def load_baselines(patterns, prefix=False):
runs = []
directory = pathlib.Path(__file__).parent.parent / 'scores'
for filename in directory.glob('**/*_baselines.json'):
for task, methods in json.loads(filename.read_text()).items():
for method, score in methods.items():
if prefix:
method = f'baseline_{method}'
if not any(p.search(method) for p in patterns):
continue
runs.append(Run(task, method, None, None, score))
return runs
def stats(runs, baselines):
tasks = sorted(set(r.task for r in runs))
methods = sorted(set(r.method for r in runs))
seeds = sorted(set(r.seed for r in runs))
baseline = sorted(set(r.method for r in baselines))
print('Loaded', len(runs), 'runs.')
print(f'Tasks ({len(tasks)}):', ', '.join(tasks))
print(f'Methods ({len(methods)}):', ', '.join(methods))
print(f'Seeds ({len(seeds)}):', ', '.join(seeds))
print(f'Baselines ({len(baseline)}):', ', '.join(baseline))
def order_methods(runs, baselines, args):
methods = []
for pattern in args.methods:
for method in sorted(set(r.method for r in runs)):
if pattern.search(method):
if method not in methods:
methods.append(method)
if method not in args.colors:
index = len(args.colors) % len(args.palette)
args.colors[method] = args.palette[index]
non_baseline_colors = len(args.colors)
for pattern in args.baselines:
for method in sorted(set(r.method for r in baselines)):
if pattern.search(method):
if method not in methods:
methods.append(method)
if method not in args.colors:
index = len(args.colors) - non_baseline_colors
index = index % len(PALETTES['baselines'])
args.colors[method] = PALETTES['baselines'][index]
return methods
def figure(runs, methods, args):
tasks = sorted(set(r.task for r in runs if r.xs is not None))
rows = int(np.ceil((len(tasks) + len(args.add)) / args.cols))
figsize = args.size[0] * args.cols, args.size[1] * rows
fig, axes = plt.subplots(rows, args.cols, figsize=figsize, squeeze=False)
for task, ax in zip(tasks, axes.flatten()):
relevant = [r for r in runs if r.task == task]
plot(task, ax, relevant, methods, args)
for name, ax in zip(args.add, axes.flatten()[len(tasks):]):
ax.set_facecolor((0.9, 0.9, 0.9))
if name == 'median':
plot_combined(
'combined_median', ax, runs, methods, args,
agg=lambda x: np.nanmedian(x, -1))
elif name == 'mean':
plot_combined(
'combined_mean', ax, runs, methods, args,
agg=lambda x: np.nanmean(x, -1))
elif name == 'gamer_median':
plot_combined(
'combined_gamer_median', ax, runs, methods, args,
lo='random', hi='human_gamer',
agg=lambda x: np.nanmedian(x, -1))
elif name == 'gamer_mean':
plot_combined(
'combined_gamer_mean', ax, runs, methods, args,
lo='random', hi='human_gamer',
agg=lambda x: np.nanmean(x, -1))
elif name == 'record_mean':
plot_combined(
'combined_record_mean', ax, runs, methods, args,
lo='random', hi='record',
agg=lambda x: np.nanmean(x, -1))
elif name == 'clip_record_mean':
plot_combined(
'combined_clipped_record_mean', ax, runs, methods, args,
lo='random', hi='record', clip=True,
agg=lambda x: np.nanmean(x, -1))
elif name == 'seeds':
plot_combined(
'combined_seeds', ax, runs, methods, args,
agg=lambda x: np.isfinite(x).sum(-1))
elif name == 'human_above':
plot_combined(
'combined_above_human_gamer', ax, runs, methods, args,
agg=lambda y: (y >= 1.0).astype(float).sum(-1))
elif name == 'human_below':
plot_combined(
'combined_below_human_gamer', ax, runs, methods, args,
agg=lambda y: (y <= 1.0).astype(float).sum(-1))
else:
raise NotImplementedError(name)
if args.xlim:
for ax in axes[:-1].flatten():
ax.xaxis.get_offset_text().set_visible(False)
if args.xlabel:
for ax in axes[-1]:
ax.set_xlabel(args.xlabel)
if args.ylabel:
for ax in axes[:, 0]:
ax.set_ylabel(args.ylabel)
for ax in axes.flatten()[len(tasks) + len(args.add):]:
ax.axis('off')
legend(fig, args.labels, ncol=args.legendcols, **LEGEND)
return fig
def plot(task, ax, runs, methods, args):
assert runs
try:
title = task.split('_', 1)[1].replace('_', ' ').title()
except IndexError:
title = task.title()
ax.set_title(title)
xlim = [+np.inf, -np.inf]
for index, method in enumerate(methods):
relevant = [r for r in runs if r.method == method]
if not relevant:
continue
if any(r.xs is None for r in relevant):
baseline(index, method, ax, relevant, args)
else:
if args.agg == 'none':
xs, ys = curve_lines(index, task, method, ax, relevant, args)
else:
xs, ys = curve_area(index, task, method, ax, relevant, args)
if len(xs) == len(ys) == 0:
print(f'Skipping empty: {task} {method}')
continue
xlim = [min(xlim[0], np.nanmin(xs)), max(xlim[1], np.nanmax(xs))]
ax.ticklabel_format(axis='x', style='sci', scilimits=(0, 0))
steps = [1, 2, 2.5, 5, 10]
ax.xaxis.set_major_locator(ticker.MaxNLocator(args.xticks, steps=steps))
ax.yaxis.set_major_locator(ticker.MaxNLocator(args.yticks, steps=steps))
if np.isfinite(xlim).all():
ax.set_xlim(args.xlim or xlim)
if args.xlim:
ticks = sorted({*ax.get_xticks(), *args.xlim})
ticks = [x for x in ticks if args.xlim[0] <= x <= args.xlim[1]]
ax.set_xticks(ticks)
if args.ylim:
ax.set_ylim(args.ylim)
if args.ylimticks:
ticks = sorted({*ax.get_yticks(), *args.ylim})
ticks = [x for x in ticks if args.ylim[0] <= x <= args.ylim[1]]
ax.set_yticks(ticks)
def plot_combined(
name, ax, runs, methods, args, agg, lo=None, hi=None, clip=False):
tasks = sorted(set(run.task for run in runs if run.xs is not None))
seeds = list(set(run.seed for run in runs))
runs = [r for r in runs if r.task in tasks] # Discard unused baselines.
# Bin all runs onto the same X steps.
borders = sorted(
[r.xs for r in runs if r.xs is not None],
key=lambda x: np.nanmax(x))[-1]
for index, run in enumerate(runs):
if run.xs is None:
continue
xs, ys = bin_scores(run.xs, run.ys, borders, fill='last')
runs[index] = run._replace(xs=xs, ys=ys)
# Per-task normalization by low and high baseline.
if lo or hi:
mins = collections.defaultdict(list)
maxs = collections.defaultdict(list)
[mins[r.task].append(r.ys) for r in load_baselines([re.compile(lo)])]
[maxs[r.task].append(r.ys) for r in load_baselines([re.compile(hi)])]
mins = {task: min(ys) for task, ys in mins.items() if task in tasks}
maxs = {task: max(ys) for task, ys in maxs.items() if task in tasks}
missing_baselines = []
for task in tasks:
if task not in mins or task not in maxs:
missing_baselines.append(task)
if set(missing_baselines) == set(tasks):
print(f'No baselines found to normalize any tasks in {name} plot.')
else:
for task in missing_baselines:
print(f'No baselines found to normalize {task} in {name} plot.')
for index, run in enumerate(runs):
if run.task not in mins or run.task not in maxs:
continue
ys = (run.ys - mins[run.task]) / (maxs[run.task] - mins[run.task])
if clip:
ys = np.minimum(ys, 1.0)
runs[index] = run._replace(ys=ys)
# Aggregate across tasks but not methods or seeds.
combined = []
for method, seed in itertools.product(methods, seeds):
relevant = [r for r in runs if r.method == method and r.seed == seed]
if not relevant:
continue
if relevant[0].xs is None:
xs, ys = None, np.array([r.ys for r in relevant])
else:
xs, ys = stack_scores(*zip(*[(r.xs, r.ys) for r in relevant]))
with warnings.catch_warnings(): # Ignore empty slice warnings.
warnings.simplefilter('ignore', category=RuntimeWarning)
combined.append(Run('combined', method, seed, xs, agg(ys)))
plot(name, ax, combined, methods, args)
def curve_lines(index, task, method, ax, runs, args):
zorder = 10000 - 10 * index - 1
for run in runs:
color = args.colors[method]
ax.plot(run.xs, run.ys, label=method, color=color, zorder=zorder)
xs, ys = stack_scores(*zip(*[(r.xs, r.ys) for r in runs]))
return xs, ys
def curve_area(index, task, method, ax, runs, args):
xs, ys = stack_scores(*zip(*[(r.xs, r.ys) for r in runs]))
with warnings.catch_warnings(): # NaN buckets remain NaN.
warnings.simplefilter('ignore', category=RuntimeWarning)
if args.agg == 'std1':
mean, std = np.nanmean(ys, -1), np.nanstd(ys, -1)
lo, mi, hi = mean - std, mean, mean + std
elif args.agg == 'per0':
lo, mi, hi = [np.nanpercentile(ys, k, -1) for k in (0, 50, 100)]
elif args.agg == 'per5':
lo, mi, hi = [np.nanpercentile(ys, k, -1) for k in (5, 50, 95)]
elif args.agg == 'per25':
lo, mi, hi = [np.nanpercentile(ys, k, -1) for k in (25, 50, 75)]
else:
raise NotImplementedError(args.agg)
color = args.colors[method]
kw = dict(color=color, zorder=1000 - 10 * index, alpha=0.1, linewidths=0)
mask = ~np.isnan(mi)
xs, lo, mi, hi = xs[mask], lo[mask], mi[mask], hi[mask]
ax.fill_between(xs, lo, hi, **kw)
ax.plot(xs, mi, label=method, color=color, zorder=10000 - 10 * index - 1)
return xs, mi
def baseline(index, method, ax, runs, args):
assert all(run.xs is None for run in runs)
ys = np.array([run.ys for run in runs])
mean, std = ys.mean(), ys.std()
color = args.colors[method]
kw = dict(color=color, zorder=500 - 20 * index - 1, alpha=0.1, linewidths=0)
ax.fill_between([-np.inf, np.inf], [mean - std] * 2, [mean + std] * 2, **kw)
kw = dict(ls='--', color=color, zorder=5000 - 10 * index - 1)
ax.axhline(mean, label=method, **kw)
def legend(fig, mapping=None, **kwargs):
entries = {}
for ax in fig.axes:
for handle, label in zip(*ax.get_legend_handles_labels()):
if mapping and label in mapping:
label = mapping[label]
entries[label] = handle
leg = fig.legend(entries.values(), entries.keys(), **kwargs)
leg.get_frame().set_edgecolor('white')
extent = leg.get_window_extent(fig.canvas.get_renderer())
extent = extent.transformed(fig.transFigure.inverted())
yloc, xloc = kwargs['loc'].split()
y0 = dict(lower=extent.y1, center=0, upper=0)[yloc]
y1 = dict(lower=1, center=1, upper=extent.y0)[yloc]
x0 = dict(left=extent.x1, center=0, right=0)[xloc]
x1 = dict(left=1, center=1, right=extent.x0)[xloc]
fig.tight_layout(rect=[x0, y0, x1, y1], h_pad=0.5, w_pad=0.5)
def save(fig, args):
args.outdir.mkdir(parents=True, exist_ok=True)
filename = args.outdir / 'curves.png'
fig.savefig(filename, dpi=args.dpi)
print('Saved to', filename)
filename = args.outdir / 'curves.pdf'
fig.savefig(filename)
try:
subprocess.call(['pdfcrop', str(filename), str(filename)])
except FileNotFoundError:
print('Install texlive-extra-utils to crop PDF outputs.')
def bin_scores(xs, ys, borders, reducer=np.nanmean, fill='nan'):
order = np.argsort(xs)
xs, ys = xs[order], ys[order]
binned = []
with warnings.catch_warnings(): # Empty buckets become NaN.
warnings.simplefilter('ignore', category=RuntimeWarning)
for start, stop in zip(borders[:-1], borders[1:]):
left = (xs <= start).sum()
right = (xs <= stop).sum()
if left < right:
value = reducer(ys[left:right])
elif binned:
value = {'nan': np.nan, 'last': binned[-1]}[fill]
else:
value = np.nan
binned.append(value)
return borders[1:], np.array(binned)
def stack_scores(multiple_xs, multiple_ys, fill='last'):
longest_xs = sorted(multiple_xs, key=lambda x: len(x))[-1]
multiple_padded_ys = []
for xs, ys in zip(multiple_xs, multiple_ys):
assert (longest_xs[:len(xs)] == xs).all(), (list(xs), list(longest_xs))
value = {'nan': np.nan, 'last': ys[-1]}[fill]
padding = [value] * (len(longest_xs) - len(xs))
padded_ys = np.concatenate([ys, padding])
multiple_padded_ys.append(padded_ys)
stacked_ys = np.stack(multiple_padded_ys, -1)
return longest_xs, stacked_ys
def load_jsonl(filename):
try:
with filename.open() as f:
lines = list(f.readlines())
records = []
for index, line in enumerate(lines):
try:
records.append(json.loads(line))
except Exception:
if index == len(lines) - 1:
continue # Silently skip last line if it is incomplete.
raise ValueError(
f'Skipping invalid JSON line ({index+1}/{len(lines)+1}) in'
f'{filename}: {line}')
return pd.DataFrame(records)
except ValueError as e:
print('Invalid', filename, e)
return None
def save_runs(runs, filename):
filename.parent.mkdir(parents=True, exist_ok=True)
records = []
for run in runs:
if run.xs is None:
continue
records.append(dict(
task=run.task, method=run.method, seed=run.seed,
xs=run.xs.tolist(), ys=run.ys.tolist()))
runs = json.dumps(records)
filename.write_text(runs)
print('Saved', filename)
def main(args):
find_keys(args)
runs = load_runs(args)
save_runs(runs, args.outdir / 'runs.json')
baselines = load_baselines(args.baselines, args.prefix)
stats(runs, baselines)
methods = order_methods(runs, baselines, args)
if not runs:
print('Noting to plot.')
return
# Adjust options based on loaded runs.
tasks = set(r.task for r in runs)
if 'auto' in args.add:
index = args.add.index('auto')
del args.add[index]
atari = any(run.task.startswith('atari_') for run in runs)
if len(tasks) < 2:
pass
elif atari:
args.add[index:index] = [
'gamer_median', 'gamer_mean', 'record_mean', 'clip_record_mean',
]
else:
args.add[index:index] = ['mean', 'median']
args.cols = min(args.cols, len(tasks) + len(args.add))
args.legendcols = min(args.legendcols, args.cols)
print('Plotting...')
fig = figure(runs + baselines, methods, args)
save(fig, args)
def parse_args():
boolean = lambda x: bool(['False', 'True'].index(x))
parser = argparse.ArgumentParser()
parser.add_argument('--indir', nargs='+', type=pathlib.Path, required=True)
parser.add_argument('--indir-prefix', type=pathlib.Path)
parser.add_argument('--outdir', type=pathlib.Path, required=True)
parser.add_argument('--subdir', type=boolean, default=True)
parser.add_argument('--xaxis', type=str, default='step')
parser.add_argument('--yaxis', type=str, default='eval_return')
parser.add_argument('--tasks', nargs='+', default=[r'.*'])
parser.add_argument('--methods', nargs='+', default=[r'.*'])
parser.add_argument('--baselines', nargs='+', default=DEFAULT_BASELINES)
parser.add_argument('--prefix', type=boolean, default=False)
parser.add_argument('--bins', type=float, default=-1)
parser.add_argument('--agg', type=str, default='std1')
parser.add_argument('--size', nargs=2, type=float, default=[2.5, 2.3])
parser.add_argument('--dpi', type=int, default=80)
parser.add_argument('--cols', type=int, default=6)
parser.add_argument('--xlim', nargs=2, type=float, default=None)
parser.add_argument('--ylim', nargs=2, type=float, default=None)
parser.add_argument('--ylimticks', type=boolean, default=True)
parser.add_argument('--xlabel', type=str, default=None)
parser.add_argument('--ylabel', type=str, default=None)
parser.add_argument('--xticks', type=int, default=6)
parser.add_argument('--yticks', type=int, default=5)
parser.add_argument('--xmult', type=float, default=1)
parser.add_argument('--labels', nargs='+', default=None)
parser.add_argument('--palette', nargs='+', default=['contrast'])
parser.add_argument('--legendcols', type=int, default=4)
parser.add_argument('--colors', nargs='+', default={})
parser.add_argument('--maxval', type=float, default=0)
parser.add_argument('--add', nargs='+', type=str, default=['auto', 'seeds'])
args = parser.parse_args()
if args.subdir:
args.outdir /= args.indir[0].stem
if args.indir_prefix:
args.indir = [args.indir_prefix / indir for indir in args.indir]
args.indir = [d.expanduser() for d in args.indir]
args.outdir = args.outdir.expanduser()
if args.labels:
assert len(args.labels) % 2 == 0
args.labels = {k: v for k, v in zip(args.labels[:-1], args.labels[1:])}
if args.colors:
assert len(args.colors) % 2 == 0
args.colors = {k: v for k, v in zip(args.colors[:-1], args.colors[1:])}
args.tasks = [re.compile(p) for p in args.tasks]
args.methods = [re.compile(p) for p in args.methods]
args.baselines = [re.compile(p) for p in args.baselines]
if 'return' not in args.yaxis:
args.baselines = []
if args.prefix is None:
args.prefix = len(args.indir) > 1
if len(args.palette) == 1 and args.palette[0] in PALETTES:
args.palette = 10 * PALETTES[args.palette[0]]
if len(args.add) == 1 and args.add[0] == 'none':
args.add = []
return args
if __name__ == '__main__':
main(parse_args())
| cascade-main | dreamerv2/common/plot.py |
import json
import pathlib
import re
class Config(dict):
SEP = '.'
IS_PATTERN = re.compile(r'.*[^A-Za-z0-9_.-].*')
def __init__(self, *args, **kwargs):
mapping = dict(*args, **kwargs)
mapping = self._flatten(mapping)
mapping = self._ensure_keys(mapping)
mapping = self._ensure_values(mapping)
self._flat = mapping
self._nested = self._nest(mapping)
# Need to assign the values to the base class dictionary so that
# conversion to dict does not lose the content.
super().__init__(self._nested)
@property
def flat(self):
return self._flat.copy()
def save(self, filename):
filename = pathlib.Path(filename)
if filename.suffix == '.json':
filename.write_text(json.dumps(dict(self)))
elif filename.suffix in ('.yml', '.yaml'):
import ruamel.yaml as yaml
with filename.open('w') as f:
yaml.safe_dump(dict(self), f)
else:
raise NotImplementedError(filename.suffix)
@classmethod
def load(cls, filename):
filename = pathlib.Path(filename)
if filename.suffix == '.json':
return cls(json.loads(filename.read_text()))
elif filename.suffix in ('.yml', '.yaml'):
import ruamel.yaml as yaml
return cls(yaml.safe_load(filename.read_text()))
else:
raise NotImplementedError(filename.suffix)
def parse_flags(self, argv=None, known_only=False, help_exists=None):
from . import flags
return flags.Flags(self).parse(argv, known_only, help_exists)
def __contains__(self, name):
try:
self[name]
return True
except KeyError:
return False
def __getattr__(self, name):
if name.startswith('_'):
return super().__getattr__(name)
try:
return self[name]
except KeyError:
raise AttributeError(name)
def __getitem__(self, name):
result = self._nested
for part in name.split(self.SEP):
result = result[part]
if isinstance(result, dict):
result = type(self)(result)
return result
def __setattr__(self, key, value):
if key.startswith('_'):
return super().__setattr__(key, value)
message = f"Tried to set key '{key}' on immutable config. Use update()."
raise AttributeError(message)
def __setitem__(self, key, value):
if key.startswith('_'):
return super().__setitem__(key, value)
message = f"Tried to set key '{key}' on immutable config. Use update()."
raise AttributeError(message)
def __reduce__(self):
return (type(self), (dict(self),))
def __str__(self):
lines = ['\nConfig:']
keys, vals, typs = [], [], []
for key, val in self.flat.items():
keys.append(key + ':')
vals.append(self._format_value(val))
typs.append(self._format_type(val))
max_key = max(len(k) for k in keys) if keys else 0
max_val = max(len(v) for v in vals) if vals else 0
for key, val, typ in zip(keys, vals, typs):
key = key.ljust(max_key)
val = val.ljust(max_val)
lines.append(f'{key} {val} ({typ})')
return '\n'.join(lines)
def update(self, *args, **kwargs):
result = self._flat.copy()
inputs = self._flatten(dict(*args, **kwargs))
for key, new in inputs.items():
if self.IS_PATTERN.match(key):
pattern = re.compile(key)
keys = {k for k in result if pattern.match(k)}
else:
keys = [key]
if not keys:
raise KeyError(f'Unknown key or pattern {key}.')
for key in keys:
old = result[key]
try:
if isinstance(old, int) and isinstance(new, float):
if float(int(new)) != new:
message = f"Cannot convert fractional float {new} to int."
raise ValueError(message)
result[key] = type(old)(new)
except (ValueError, TypeError):
raise TypeError(
f"Cannot convert '{new}' to type '{type(old).__name__}' " +
f"of value '{old}' for key '{key}'.")
return type(self)(result)
def _flatten(self, mapping):
result = {}
for key, value in mapping.items():
if isinstance(value, dict):
for k, v in self._flatten(value).items():
if self.IS_PATTERN.match(key) or self.IS_PATTERN.match(k):
combined = f'{key}\\{self.SEP}{k}'
else:
combined = f'{key}{self.SEP}{k}'
result[combined] = v
else:
result[key] = value
return result
def _nest(self, mapping):
result = {}
for key, value in mapping.items():
parts = key.split(self.SEP)
node = result
for part in parts[:-1]:
if part not in node:
node[part] = {}
node = node[part]
node[parts[-1]] = value
return result
def _ensure_keys(self, mapping):
for key in mapping:
assert not self.IS_PATTERN.match(key), key
return mapping
def _ensure_values(self, mapping):
result = json.loads(json.dumps(mapping))
for key, value in result.items():
if isinstance(value, list):
value = tuple(value)
if isinstance(value, tuple):
if len(value) == 0:
message = 'Empty lists are disallowed because their type is unclear.'
raise TypeError(message)
if not isinstance(value[0], (str, float, int, bool)):
message = 'Lists can only contain strings, floats, ints, bools'
message += f' but not {type(value[0])}'
raise TypeError(message)
if not all(isinstance(x, type(value[0])) for x in value[1:]):
message = 'Elements of a list must all be of the same type.'
raise TypeError(message)
result[key] = value
return result
def _format_value(self, value):
if isinstance(value, (list, tuple)):
return '[' + ', '.join(self._format_value(x) for x in value) + ']'
return str(value)
def _format_type(self, value):
if isinstance(value, (list, tuple)):
assert len(value) > 0, value
return self._format_type(value[0]) + 's'
return str(type(value).__name__)
| cascade-main | dreamerv2/common/config.py |
import pathlib
import pickle
import re
import numpy as np
import tensorflow as tf
from tensorflow.keras import mixed_precision as prec
try:
from tensorflow.python.distribute import values
except Exception:
from google3.third_party.tensorflow.python.distribute import values
tf.tensor = tf.convert_to_tensor
for base in (tf.Tensor, tf.Variable, values.PerReplica):
base.mean = tf.math.reduce_mean
base.std = tf.math.reduce_std
base.var = tf.math.reduce_variance
base.sum = tf.math.reduce_sum
base.any = tf.math.reduce_any
base.all = tf.math.reduce_all
base.min = tf.math.reduce_min
base.max = tf.math.reduce_max
base.abs = tf.math.abs
base.logsumexp = tf.math.reduce_logsumexp
base.transpose = tf.transpose
base.reshape = tf.reshape
base.astype = tf.cast
# values.PerReplica.dtype = property(lambda self: self.values[0].dtype)
# tf.TensorHandle.__repr__ = lambda x: '<tensor>'
# tf.TensorHandle.__str__ = lambda x: '<tensor>'
# np.set_printoptions(threshold=5, edgeitems=0)
class Module(tf.Module):
def save(self, filename):
values = tf.nest.map_structure(lambda x: x.numpy(), self.variables)
amount = len(tf.nest.flatten(values))
count = int(sum(np.prod(x.shape) for x in tf.nest.flatten(values)))
print(f'Save checkpoint with {amount} tensors and {count} parameters.')
with pathlib.Path(filename).open('wb') as f:
pickle.dump(values, f)
def load(self, filename):
with pathlib.Path(filename).open('rb') as f:
values = pickle.load(f)
amount = len(tf.nest.flatten(values))
count = int(sum(np.prod(x.shape) for x in tf.nest.flatten(values)))
print(f'Load checkpoint with {amount} tensors and {count} parameters.')
amount_agent = len(tf.nest.flatten(self.variables))
count_agent = int(sum(np.prod(x.shape) for x in tf.nest.flatten(self.variables)))
print(f'Agent checkpoint has {amount_agent} tensors and {count_agent} parameters.')
tf.nest.map_structure(lambda x, y: x.assign(y), self.variables, values)
def get(self, name, ctor, *args, **kwargs):
# Create or get layer by name to avoid mentioning it in the constructor.
if not hasattr(self, '_modules'):
self._modules = {}
if name not in self._modules:
self._modules[name] = ctor(*args, **kwargs)
return self._modules[name]
class Optimizer(tf.Module):
def __init__(
self, name, lr, eps=1e-4, clip=None, wd=None,
opt='adam', wd_pattern=r'.*'):
assert 0 <= wd < 1
assert not clip or 1 <= clip
self._name = name
self._clip = clip
self._wd = wd
self._wd_pattern = wd_pattern
self._opt = {
'adam': lambda: tf.optimizers.Adam(lr, epsilon=eps),
'nadam': lambda: tf.optimizers.Nadam(lr, epsilon=eps),
'adamax': lambda: tf.optimizers.Adamax(lr, epsilon=eps),
'sgd': lambda: tf.optimizers.SGD(lr),
'momentum': lambda: tf.optimizers.SGD(lr, 0.9),
}[opt]()
self._mixed = (prec.global_policy().compute_dtype == tf.float16)
if self._mixed:
self._opt = prec.LossScaleOptimizer(self._opt, dynamic=True)
self._once = True
@property
def variables(self):
return self._opt.variables()
def __call__(self, tape, loss, modules):
assert loss.dtype is tf.float32, (self._name, loss.dtype)
assert len(loss.shape) == 0, (self._name, loss.shape)
metrics = {}
# Find variables.
modules = modules if hasattr(modules, '__len__') else (modules,)
varibs = tf.nest.flatten([module.variables for module in modules])
count = sum(np.prod(x.shape) for x in varibs)
if self._once:
print(f'Found {count} {self._name} parameters.')
self._once = False
# Check loss.
tf.debugging.check_numerics(loss, self._name + '_loss')
metrics[f'{self._name}_loss'] = loss
# Compute scaled gradient.
if self._mixed:
with tape:
loss = self._opt.get_scaled_loss(loss)
grads = tape.gradient(loss, varibs)
if self._mixed:
grads = self._opt.get_unscaled_gradients(grads)
if self._mixed:
metrics[f'{self._name}_loss_scale'] = self._opt.loss_scale
# Distributed sync.
context = tf.distribute.get_replica_context()
if context:
grads = context.all_reduce('mean', grads)
# Gradient clipping.
norm = tf.linalg.global_norm(grads)
if not self._mixed:
tf.debugging.check_numerics(norm, self._name + '_norm')
if self._clip:
grads, _ = tf.clip_by_global_norm(grads, self._clip, norm)
metrics[f'{self._name}_grad_norm'] = norm
# Weight decay.
if self._wd:
self._apply_weight_decay(varibs)
# Apply gradients.
self._opt.apply_gradients(
zip(grads, varibs),
experimental_aggregate_gradients=False)
return metrics
def _apply_weight_decay(self, varibs):
nontrivial = (self._wd_pattern != r'.*')
if nontrivial:
print('Applied weight decay to variables:')
for var in varibs:
if re.search(self._wd_pattern, self._name + '/' + var.name):
if nontrivial:
print('- ' + self._name + '/' + var.name)
var.assign((1 - self._wd) * var)
| cascade-main | dreamerv2/common/tfutils.py |
import tensorflow as tf
import tensorflow_probability as tfp
from tensorflow_probability import distributions as tfd
# Patch to ignore seed to avoid synchronization across GPUs.
_orig_random_categorical = tf.random.categorical
def random_categorical(*args, **kwargs):
kwargs['seed'] = None
return _orig_random_categorical(*args, **kwargs)
tf.random.categorical = random_categorical
# Patch to ignore seed to avoid synchronization across GPUs.
_orig_random_normal = tf.random.normal
def random_normal(*args, **kwargs):
kwargs['seed'] = None
return _orig_random_normal(*args, **kwargs)
tf.random.normal = random_normal
class SampleDist:
def __init__(self, dist, samples=100):
self._dist = dist
self._samples = samples
@property
def name(self):
return 'SampleDist'
def __getattr__(self, name):
return getattr(self._dist, name)
def mean(self):
samples = self._dist.sample(self._samples)
return samples.mean(0)
def mode(self):
sample = self._dist.sample(self._samples)
logprob = self._dist.log_prob(sample)
return tf.gather(sample, tf.argmax(logprob))[0]
def entropy(self):
sample = self._dist.sample(self._samples)
logprob = self.log_prob(sample)
return -logprob.mean(0)
class OneHotDist(tfd.OneHotCategorical):
def __init__(self, logits=None, probs=None, dtype=None):
self._sample_dtype = dtype or tf.float32
super().__init__(logits=logits, probs=probs)
def mode(self):
return tf.cast(super().mode(), self._sample_dtype)
def sample(self, sample_shape=(), seed=None):
# Straight through biased gradient estimator.
sample = tf.cast(super().sample(sample_shape, seed), self._sample_dtype)
probs = self._pad(super().probs_parameter(), sample.shape)
sample += tf.cast(probs - tf.stop_gradient(probs), self._sample_dtype)
return sample
def _pad(self, tensor, shape):
tensor = super().probs_parameter()
while len(tensor.shape) < len(shape):
tensor = tensor[None]
return tensor
class TruncNormalDist(tfd.TruncatedNormal):
def __init__(self, loc, scale, low, high, clip=1e-6, mult=1):
super().__init__(loc, scale, low, high)
self._clip = clip
self._mult = mult
def sample(self, *args, **kwargs):
event = super().sample(*args, **kwargs)
if self._clip:
clipped = tf.clip_by_value(
event, self.low + self._clip, self.high - self._clip)
event = event - tf.stop_gradient(event) + tf.stop_gradient(clipped)
if self._mult:
event *= self._mult
return event
class TanhBijector(tfp.bijectors.Bijector):
def __init__(self, validate_args=False, name='tanh'):
super().__init__(
forward_min_event_ndims=0,
validate_args=validate_args,
name=name)
def _forward(self, x):
return tf.nn.tanh(x)
def _inverse(self, y):
dtype = y.dtype
y = tf.cast(y, tf.float32)
y = tf.where(
tf.less_equal(tf.abs(y), 1.),
tf.clip_by_value(y, -0.99999997, 0.99999997), y)
y = tf.atanh(y)
y = tf.cast(y, dtype)
return y
def _forward_log_det_jacobian(self, x):
log2 = tf.math.log(tf.constant(2.0, dtype=x.dtype))
return 2.0 * (log2 - x - tf.nn.softplus(-2.0 * x))
| cascade-main | dreamerv2/common/dists.py |
import re
import sys
class Flags:
def __init__(self, *args, **kwargs):
from .config import Config
self._config = Config(*args, **kwargs)
def parse(self, argv=None, known_only=False, help_exists=None):
if help_exists is None:
help_exists = not known_only
if argv is None:
argv = sys.argv[1:]
if '--help' in argv:
print('\nHelp:')
lines = str(self._config).split('\n')[2:]
print('\n'.join('--' + re.sub(r'[:,\[\]]', '', x) for x in lines))
help_exists and sys.exit()
parsed = {}
remaining = []
key = None
vals = None
for arg in argv:
if arg.startswith('--'):
if key:
self._submit_entry(key, vals, parsed, remaining)
if '=' in arg:
key, val = arg.split('=', 1)
vals = [val]
else:
key, vals = arg, []
else:
if key:
vals.append(arg)
else:
remaining.append(arg)
self._submit_entry(key, vals, parsed, remaining)
parsed = self._config.update(parsed)
if known_only:
return parsed, remaining
else:
for flag in remaining:
if flag.startswith('--'):
raise ValueError(f"Flag '{flag}' did not match any config keys.")
assert not remaining, remaining
return parsed
def _submit_entry(self, key, vals, parsed, remaining):
if not key and not vals:
return
if not key:
vals = ', '.join(f"'{x}'" for x in vals)
raise ValueError(f"Values {vals} were not preceeded by any flag.")
name = key[len('--'):]
if '=' in name:
remaining.extend([key] + vals)
return
if self._config.IS_PATTERN.match(name):
pattern = re.compile(name)
keys = {k for k in self._config.flat if pattern.match(k)}
elif name in self._config:
keys = [name]
else:
keys = []
if not keys:
remaining.extend([key] + vals)
return
if not vals:
raise ValueError(f"Flag '{key}' was not followed by any values.")
for key in keys:
parsed[key] = self._parse_flag_value(self._config[key], vals, key)
def _parse_flag_value(self, default, value, key):
value = value if isinstance(value, (tuple, list)) else (value,)
if isinstance(default, (tuple, list)):
if len(value) == 1 and ',' in value[0]:
value = value[0].split(',')
return tuple(self._parse_flag_value(default[0], [x], key) for x in value)
assert len(value) == 1, value
value = str(value[0])
if default is None:
return value
if isinstance(default, bool):
try:
return bool(['False', 'True'].index(value))
except ValueError:
message = f"Expected bool but got '{value}' for key '{key}'."
raise TypeError(message)
if isinstance(default, int):
value = float(value) # Allow scientific notation for integers.
if float(int(value)) != value:
message = f"Expected int but got float '{value}' for key '{key}'."
raise TypeError(message)
return int(value)
return type(default)(value)
| cascade-main | dreamerv2/common/flags.py |
import datetime
import json
import pathlib
import imageio
import numpy as np
class Recorder:
def __init__(
self, env, directory, save_stats=True, save_video=True,
save_episode=True, video_size=(512, 512)):
if directory and save_stats:
env = StatsRecorder(env, directory)
if directory and save_video:
env = VideoRecorder(env, directory, video_size)
if directory and save_episode:
env = EpisodeRecorder(env, directory)
if not directory:
env = NoopRecorder(env)
self._env = env
def __getattr__(self, name):
if name.startswith('__'):
raise AttributeError(name)
return getattr(self._env, name)
class NoopRecorder:
def __init__(self, env):
self._env = env
def reset(self):
obs = self._env.reset()
return obs
def step(self, action, policy_idx=0):
return self._env.step(action)
def __getattr__(self, name):
if name.startswith('__'):
raise AttributeError(name)
return getattr(self._env, name)
class StatsRecorder:
def __init__(self, env, directory):
self._env = env
self._directory = pathlib.Path(directory).expanduser()
self._directory.mkdir(exist_ok=True, parents=True)
self._file = (self._directory / 'stats.jsonl').open('a')
self._length = None
self._reward = None
self._unlocked = None
self._stats = None
def __getattr__(self, name):
if name.startswith('__'):
raise AttributeError(name)
return getattr(self._env, name)
def reset(self):
obs = self._env.reset()
self._length = 0
self._reward = 0
self._unlocked = None
self._stats = None
return obs
def step(self, action, policy_idx=0):
obs, reward, done, info = self._env.step(action)
self._length += 1
self._reward += info['reward']
if done:
self._stats = {'length': self._length, 'reward': round(self._reward, 1), 'policy_idx': policy_idx}
for key, value in info['achievements'].items():
self._stats[f'achievement_{key}'] = value
self._save()
return obs, reward, done, info
def _save(self):
self._file.write(json.dumps(self._stats) + '\n')
self._file.flush()
class VideoRecorder:
def __init__(self, env, directory, size=(512, 512)):
if not hasattr(env, 'episode_name'):
env = EpisodeName(env)
self._env = env
self._directory = pathlib.Path(directory).expanduser()
self._directory.mkdir(exist_ok=True, parents=True)
self._size = size
self._frames = None
def __getattr__(self, name):
if name.startswith('__'):
raise AttributeError(name)
return getattr(self._env, name)
def reset(self):
obs = self._env.reset()
self._frames = [self._env.render(self._size)]
return obs
def step(self, action):
obs, reward, done, info = self._env.step(action)
self._frames.append(self._env.render(self._size))
if done:
self._save()
return obs, reward, done, info
def _save(self):
filename = str(self._directory / (self._env.episode_name + '.mp4'))
imageio.mimsave(filename, self._frames)
class EpisodeRecorder:
def __init__(self, env, directory):
if not hasattr(env, 'episode_name'):
env = EpisodeName(env)
self._env = env
self._directory = pathlib.Path(directory).expanduser()
self._directory.mkdir(exist_ok=True, parents=True)
self._episode = None
def __getattr__(self, name):
if name.startswith('__'):
raise AttributeError(name)
return getattr(self._env, name)
def reset(self):
obs = self._env.reset()
self._episode = [{'image': obs}]
return obs
def step(self, action):
# Transitions are defined from the environment perspective, meaning that a
# transition contains the action and the resulting reward and next
# observation produced by the environment in response to said action.
obs, reward, done, info = self._env.step(action)
transition = {
'action': action, 'image': obs, 'reward': reward, 'done': done,
}
for key, value in info.items():
if key in ('inventory', 'achievements'):
continue
transition[key] = value
for key, value in info['achievements'].items():
transition[f'achievement_{key}'] = value
for key, value in info['inventory'].items():
transition[f'ainventory_{key}'] = value
self._episode.append(transition)
if done:
self._save()
return obs, reward, done, info
def _save(self):
filename = str(self._directory / (self._env.episode_name + '.npz'))
# Fill in zeros for keys missing at the first time step.
for key, value in self._episode[1].items():
if key not in self._episode[0]:
self._episode[0][key] = np.zeros_like(value)
episode = {
k: np.array([step[k] for step in self._episode])
for k in self._episode[0]}
np.savez_compressed(filename, **episode)
class EpisodeName:
def __init__(self, env):
self._env = env
self._timestamp = None
self._unlocked = None
self._length = None
def __getattr__(self, name):
if name.startswith('__'):
raise AttributeError(name)
return getattr(self._env, name)
def reset(self):
obs = self._env.reset()
self._timestamp = None
self._unlocked = None
self._length = 0
return obs
def step(self, action):
obs, reward, done, info = self._env.step(action)
self._length += 1
if done:
self._timestamp = datetime.datetime.now().strftime('%Y%m%dT%H%M%S')
self._unlocked = sum(int(v >= 1) for v in info['achievements'].values())
return obs, reward, done, info
@property
def episode_name(self):
return f'{self._timestamp}-ach{self._unlocked}-len{self._length}' | cascade-main | dreamerv2/common/recorder.py |
# General tools.
from .config import *
from .counter import *
from .flags import *
from .logger import *
from .when import *
from .eval import *
from .cdmc import *
# RL tools.
from .other import *
from .driver import *
from .envs import *
from .replay import *
# TensorFlow tools.
from .tfutils import *
from .dists import *
from .nets import *
| cascade-main | dreamerv2/common/__init__.py |
import collections
import contextlib
import re
import time
import numpy as np
import tensorflow as tf
from tensorflow_probability import distributions as tfd
from . import dists
from . import tfutils
class RandomAgent:
def __init__(self, act_space, logprob=False):
self.act_space = act_space['action']
self.logprob = logprob
if hasattr(self.act_space, 'n'):
self._dist = dists.OneHotDist(tf.zeros(self.act_space.n))
else:
dist = tfd.Uniform(self.act_space.low, self.act_space.high)
self._dist = tfd.Independent(dist, 1)
def __call__(self, obs, state=None, mode=None):
action = self._dist.sample(len(obs['is_first']))
output = {'action': action}
if self.logprob:
output['logprob'] = self._dist.log_prob(action)
return output, None
def static_scan(fn, inputs, start, reverse=False):
last = start
outputs = [[] for _ in tf.nest.flatten(start)]
indices = range(tf.nest.flatten(inputs)[0].shape[0])
if reverse:
indices = reversed(indices)
for index in indices:
inp = tf.nest.map_structure(lambda x: x[index], inputs)
last = fn(last, inp)
[o.append(l) for o, l in zip(outputs, tf.nest.flatten(last))]
if reverse:
outputs = [list(reversed(x)) for x in outputs]
outputs = [tf.stack(x, 0) for x in outputs]
return tf.nest.pack_sequence_as(start, outputs)
def schedule(string, step):
try:
return float(string)
except ValueError:
step = tf.cast(step, tf.float32)
match = re.match(r'linear\((.+),(.+),(.+)\)', string)
if match:
initial, final, duration = [float(group) for group in match.groups()]
mix = tf.clip_by_value(step / duration, 0, 1)
return (1 - mix) * initial + mix * final
match = re.match(r'warmup\((.+),(.+)\)', string)
if match:
warmup, value = [float(group) for group in match.groups()]
scale = tf.clip_by_value(step / warmup, 0, 1)
return scale * value
match = re.match(r'exp\((.+),(.+),(.+)\)', string)
if match:
initial, final, halflife = [float(group) for group in match.groups()]
return (initial - final) * 0.5 ** (step / halflife) + final
match = re.match(r'horizon\((.+),(.+),(.+)\)', string)
if match:
initial, final, duration = [float(group) for group in match.groups()]
mix = tf.clip_by_value(step / duration, 0, 1)
horizon = (1 - mix) * initial + mix * final
return 1 - 1 / horizon
raise NotImplementedError(string)
def lambda_return(
reward, value, pcont, bootstrap, lambda_, axis):
# Setting lambda=1 gives a discounted Monte Carlo return.
# Setting lambda=0 gives a fixed 1-step return.
assert reward.shape.ndims == value.shape.ndims, (reward.shape, value.shape)
if isinstance(pcont, (int, float)):
pcont = pcont * tf.ones_like(reward)
dims = list(range(reward.shape.ndims))
dims = [axis] + dims[1:axis] + [0] + dims[axis + 1:]
if axis != 0:
reward = tf.transpose(reward, dims)
value = tf.transpose(value, dims)
pcont = tf.transpose(pcont, dims)
if bootstrap is None:
bootstrap = tf.zeros_like(value[-1])
next_values = tf.concat([value[1:], bootstrap[None]], 0)
inputs = reward + pcont * next_values * (1 - lambda_)
returns = static_scan(
lambda agg, cur: cur[0] + cur[1] * lambda_ * agg,
(inputs, pcont), bootstrap, reverse=True)
if axis != 0:
returns = tf.transpose(returns, dims)
return returns
def action_noise(action, amount, act_space):
if amount == 0:
return action
amount = tf.cast(amount, action.dtype)
if hasattr(act_space, 'n'):
probs = amount / action.shape[-1] + (1 - amount) * action
return dists.OneHotDist(probs=probs).sample()
else:
return tf.clip_by_value(tfd.Normal(action, amount).sample(), -1, 1)
class StreamNorm(tfutils.Module):
def __init__(self, shape=(), momentum=0.99, scale=1.0, eps=1e-8):
# Momentum of 0 normalizes only based on the current batch.
# Momentum of 1 disables normalization.
self._shape = tuple(shape)
self._momentum = momentum
self._scale = scale
self._eps = eps
self.mag = tf.Variable(tf.ones(shape, tf.float64), False)
def __call__(self, inputs):
metrics = {}
self.update(inputs)
metrics['mean'] = inputs.mean()
metrics['std'] = inputs.std()
outputs = self.transform(inputs)
metrics['normed_mean'] = outputs.mean()
metrics['normed_std'] = outputs.std()
return outputs, metrics
def reset(self):
self.mag.assign(tf.ones_like(self.mag))
def update(self, inputs):
batch = inputs.reshape((-1,) + self._shape)
mag = tf.abs(batch).mean(0).astype(tf.float64)
self.mag.assign(self._momentum * self.mag + (1 - self._momentum) * mag)
def transform(self, inputs):
values = inputs.reshape((-1,) + self._shape)
values /= self.mag.astype(inputs.dtype)[None] + self._eps
values *= self._scale
return values.reshape(inputs.shape)
class Timer:
def __init__(self):
self._indurs = collections.defaultdict(list)
self._outdurs = collections.defaultdict(list)
self._start_times = {}
self._end_times = {}
@contextlib.contextmanager
def section(self, name):
self.start(name)
yield
self.end(name)
def wrap(self, function, name):
def wrapped(*args, **kwargs):
with self.section(name):
return function(*args, **kwargs)
return wrapped
def start(self, name):
now = time.time()
self._start_times[name] = now
if name in self._end_times:
last = self._end_times[name]
self._outdurs[name].append(now - last)
def end(self, name):
now = time.time()
self._end_times[name] = now
self._indurs[name].append(now - self._start_times[name])
def result(self):
metrics = {}
for key in self._indurs:
indurs = self._indurs[key]
outdurs = self._outdurs[key]
metrics[f'timer_count_{key}'] = len(indurs)
metrics[f'timer_inside_{key}'] = np.sum(indurs)
metrics[f'timer_outside_{key}'] = np.sum(outdurs)
indurs.clear()
outdurs.clear()
return metrics
class CarryOverState:
def __init__(self, fn):
self._fn = fn
self._state = None
def __call__(self, *args):
self._state, out = self._fn(*args, self._state)
return out
| cascade-main | dreamerv2/common/other.py |
import json
import os
import pathlib
import time
import numpy as np
class Logger:
def __init__(self, step, outputs, multiplier=1):
self._step = step
self._outputs = outputs
self._multiplier = multiplier
self._last_step = None
self._last_time = None
self._metrics = []
def add(self, mapping, prefix=None):
step = int(self._step) * self._multiplier
for name, value in dict(mapping).items():
name = f'{prefix}_{name}' if prefix else name
value = np.array(value)
if len(value.shape) not in (0, 2, 3, 4):
raise ValueError(
f"Shape {value.shape} for name '{name}' cannot be "
"interpreted as scalar, image, or video.")
self._metrics.append((step, name, value))
def scalar(self, name, value):
self.add({name: value})
def image(self, name, value):
self.add({name: value})
def video(self, name, value):
self.add({name: value})
def write(self, fps=False):
fps and self.scalar('fps', self._compute_fps())
if not self._metrics:
return
for output in self._outputs:
output(self._metrics)
self._metrics.clear()
def _compute_fps(self):
step = int(self._step) * self._multiplier
if self._last_step is None:
self._last_time = time.time()
self._last_step = step
return 0
steps = step - self._last_step
duration = time.time() - self._last_time
self._last_time += duration
self._last_step = step
return steps / duration
class TerminalOutput:
def __call__(self, summaries):
step = max(s for s, _, _, in summaries)
scalars = {k: float(v) for _, k, v in summaries if len(v.shape) == 0}
formatted = {k: self._format_value(v) for k, v in scalars.items()}
print(f'[{step}]', ' / '.join(f'{k} {v}' for k, v in formatted.items()))
def _format_value(self, value):
if value == 0:
return '0'
elif 0.01 < abs(value) < 10000:
value = f'{value:.2f}'
value = value.rstrip('0')
value = value.rstrip('0')
value = value.rstrip('.')
return value
else:
value = f'{value:.1e}'
value = value.replace('.0e', 'e')
value = value.replace('+0', '')
value = value.replace('+', '')
value = value.replace('-0', '-')
return value
class JSONLOutput:
def __init__(self, logdir):
self._logdir = pathlib.Path(logdir).expanduser()
def __call__(self, summaries):
scalars = {k: float(v) for _, k, v in summaries if len(v.shape) == 0}
step = max(s for s, _, _, in summaries)
with (self._logdir / 'metrics.jsonl').open('a') as f:
f.write(json.dumps({'step': step, **scalars}) + '\n')
class TensorBoardOutput:
def __init__(self, logdir, fps=20):
# The TensorFlow summary writer supports file protocols like gs://. We use
# os.path over pathlib here to preserve those prefixes.
self._logdir = os.path.expanduser(logdir)
self._writer = None
self._fps = fps
def __call__(self, summaries):
import tensorflow as tf
self._ensure_writer()
self._writer.set_as_default()
for step, name, value in summaries:
if len(value.shape) == 0:
tf.summary.scalar('scalars/' + name, value, step)
elif len(value.shape) == 2:
tf.summary.image(name, value, step)
elif len(value.shape) == 3:
tf.summary.image(name, value, step)
elif len(value.shape) == 4:
self._video_summary(name, value, step)
self._writer.flush()
def _ensure_writer(self):
if not self._writer:
import tensorflow as tf
self._writer = tf.summary.create_file_writer(
self._logdir, max_queue=1000)
def _video_summary(self, name, video, step):
import tensorflow as tf
import tensorflow.compat.v1 as tf1
name = name if isinstance(name, str) else name.decode('utf-8')
if np.issubdtype(video.dtype, np.floating):
video = np.clip(255 * video, 0, 255).astype(np.uint8)
try:
T, H, W, C = video.shape
summary = tf1.Summary()
image = tf1.Summary.Image(height=H, width=W, colorspace=C)
image.encoded_image_string = encode_gif(video, self._fps)
summary.value.add(tag=name, image=image)
tf.summary.experimental.write_raw_pb(summary.SerializeToString(), step)
except (IOError, OSError) as e:
print('GIF summaries require ffmpeg in $PATH.', e)
tf.summary.image(name, video, step)
def encode_gif(frames, fps):
from subprocess import Popen, PIPE
h, w, c = frames[0].shape
pxfmt = {1: 'gray', 3: 'rgb24'}[c]
cmd = ' '.join([
'ffmpeg -y -f rawvideo -vcodec rawvideo',
f'-r {fps:.02f} -s {w}x{h} -pix_fmt {pxfmt} -i - -filter_complex',
'[0:v]split[x][z];[z]palettegen[y];[x]fifo[x];[x][y]paletteuse',
f'-r {fps:.02f} -f gif -'])
proc = Popen(cmd.split(' '), stdin=PIPE, stdout=PIPE, stderr=PIPE)
for image in frames:
proc.stdin.write(image.tobytes())
out, err = proc.communicate()
if proc.returncode:
raise IOError('\n'.join([' '.join(cmd), err.decode('utf8')]))
del proc
return out
| cascade-main | dreamerv2/common/logger.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import collections
import datetime
import io
import pathlib
import uuid
import numpy as np
import tensorflow as tf
class Replay:
def __init__(
self, directory, capacity=0, offline_init=False, ongoing=False, minlen=1, maxlen=0,
prioritize_ends=False, multi_reward=False, offline_directory=None):
self._capacity = capacity
self._ongoing = ongoing
self._minlen = minlen
self._maxlen = maxlen
self._prioritize_ends = prioritize_ends
self._random = np.random.RandomState()
self._eval_score = 0
self.achievements = collections.defaultdict(list)
self._solved_levels = 0
self._multi_reward = multi_reward
self._max_scores = 0
self.rewards = []
self._mean_scores = 0
self._directory = pathlib.Path(directory).expanduser()
self._directory.mkdir(parents=True, exist_ok=True)
if offline_init:
self._total_episodes = 0
self._total_steps = 0
self._loaded_episodes = 0
self._loaded_steps = 0
self._complete_eps = {}
if type(offline_directory) is not list:
offline_directory = [offline_directory]
for d in offline_directory:
print(f"\nloading...{d}")
path = pathlib.Path(d).expanduser()
complete_eps, t_steps, t_eps = self.load_episodes(path, capacity, minlen)
saved_eps = save_episodes(self._directory, complete_eps)
self._complete_eps.update(saved_eps)
self._enforce_limit()
self._loaded_episodes += len(complete_eps)
self._loaded_steps += sum(eplen(x) for x in complete_eps.values())
# filename -> key -> value_sequence
self._complete_eps, _, _ = self.load_episodes(self._directory, capacity, minlen)
# worker -> key -> value_sequence
self._total_episodes, self._total_steps = count_episodes(directory)
self._loaded_episodes = len(self._complete_eps)
self._loaded_steps = sum(eplen(x) for x in self._complete_eps.values())
self._ongoing_eps = collections.defaultdict(lambda: collections.defaultdict(list))
@property
def stats(self):
return {
'total_steps': self._total_steps,
'total_episodes': self._total_episodes,
'loaded_steps': self._loaded_steps,
'loaded_episodes': self._loaded_episodes,
'running_score': self._eval_score,
'solved_levels': self._solved_levels,
'max_scores': self._max_scores,
'mean_scores': self._mean_scores
}
def add_step(self, transition, worker=0):
episode = self._ongoing_eps[worker]
for key, value in transition.items():
episode[key].append(value)
if transition['is_last']:
self.add_episode(episode)
episode.clear()
def add_episode(self, episode):
length = eplen(episode)
if 'log_achievement_collect_diamond' in episode.keys():
self.update_crafter_score(episode)
if self._multi_reward:
pass # in case we need to do something here
elif 'reward' in episode.keys() and sum(episode['reward']) > 0:
rew = sum(episode['reward'])
self._solved_levels += 1
self._max_scores = max(self._max_scores, rew)
self.rewards.append(rew)
self._mean_scores = np.mean(self.rewards)
if length < self._minlen:
print(f'Skipping short episode of length {length}.')
return
self._total_steps += length
self._loaded_steps += length
self._total_episodes += 1
self._loaded_episodes += 1
episode = {key: convert(value) for key, value in episode.items()}
if self._multi_reward:
episode['reward'] = reshape_rewards_dmc(episode)
filename = save_episode(self._directory, episode)
self._complete_eps[str(filename)] = episode
self._enforce_limit()
def dataset(self, batch, length):
example = next(iter(self._generate_chunks(length)))
dataset = tf.data.Dataset.from_generator(
lambda: self._generate_chunks(length),
{k: v.dtype for k, v in example.items()},
{k: v.shape for k, v in example.items()})
dataset = dataset.batch(batch, drop_remainder=True)
dataset = dataset.prefetch(5)
return dataset
def _generate_chunks(self, length):
sequence = self._sample_sequence()
while True:
chunk = collections.defaultdict(list)
added = 0
while added < length:
needed = length - added
adding = {k: v[:needed] for k, v in sequence.items()}
sequence = {k: v[needed:] for k, v in sequence.items()}
for key, value in adding.items():
chunk[key].append(value)
added += len(adding['action'])
if len(sequence['action']) < 1:
sequence = self._sample_sequence()
chunk = {k: np.concatenate(v) for k, v in chunk.items()}
yield chunk
def _sample_sequence(self):
episodes = list(self._complete_eps.values())
if self._ongoing:
episodes += [
x for x in self._ongoing_eps.values()
if eplen(x) >= self._minlen]
episode = self._random.choice(episodes)
total = len(episode['action'])
length = total
if self._maxlen:
length = min(length, self._maxlen)
# Randomize length to avoid all chunks ending at the same time in case the
# episodes are all of the same length.
length -= np.random.randint(self._minlen)
length = max(self._minlen, length)
upper = total - length + 1
if self._prioritize_ends:
upper += self._minlen
index = min(self._random.randint(upper), total - length)
sequence = {
k: convert(v[index: index + length])
for k, v in episode.items() if not k.startswith('log_')}
sequence['is_first'] = np.zeros(len(sequence['action']), np.bool)
sequence['is_first'][0] = True
if self._maxlen:
assert self._minlen <= len(sequence['action']) <= self._maxlen
return sequence
def _enforce_limit(self):
if not self._capacity:
return
while self._loaded_episodes > 1 and self._loaded_steps > self._capacity:
# Relying on Python preserving the insertion order of dicts.
oldest, episode = next(iter(self._complete_eps.items()))
self._loaded_steps -= eplen(episode)
self._loaded_episodes -= 1
del self._complete_eps[oldest]
def update_crafter_score(self, episode):
for key, val in episode.items():
if 'log_achievement' in key:
self.achievements[key] += [int(any([x.item() for x in episode[key]]))]
means = [np.mean(vals)*100 for vals in self.achievements.values()]
self._eval_score = (np.exp(np.nanmean(np.log(1 + np.array(means)), -1)) - 1)
def load_episodes(self, directory, capacity=None, minlen=1):
# The returned directory from filenames to episodes is guaranteed to be in
# temporally sorted order.
filenames = sorted(directory.glob('*.npz'))
if capacity:
num_steps = 0
num_episodes = 0
for filename in reversed(filenames):
length = int(str(filename).split('-')[-1][:-4])
num_steps += length
num_episodes += 1
if num_steps >= capacity:
break
filenames = filenames[-num_episodes:]
episodes = {}
num_steps = 0
num_episodes = 0
for filename in filenames:
try:
with filename.open('rb') as f:
episode = np.load(f)
episode = {k: episode[k] for k in episode.keys()}
for key, val in episode.items():
if 'log_achievement' in key:
self.achievements[key] += [int(any([x.item() for x in episode[key]]))]
if not self._multi_reward:
if 'reward' in episode.keys() and sum(episode['reward']) > 0:
rew = sum(episode['reward'])
self._solved_levels += 1
self._max_scores = max(self._max_scores, rew)
self.rewards.append(rew)
self._mean_scores = np.mean(self.rewards)
num_steps += 1
num_episodes += 1
except Exception as e:
print(f'Could not load episode {str(filename)}: {e}')
continue
if 'is_terminal' not in episode:
episode['is_terminal'] = episode['discount'] == 0
episodes[str(filename)] = episode
return episodes, num_steps, num_episodes
def count_episodes(directory):
filenames = list(directory.glob('*.npz'))
num_episodes = len(filenames)
num_steps = sum(int(str(n).split('-')[-1][:-4]) - 1 for n in filenames)
return num_episodes, num_steps
def save_episode(directory, episode):
timestamp = datetime.datetime.now().strftime('%Y%m%dT%H%M%S')
identifier = str(uuid.uuid4().hex)
length = eplen(episode)
filename = directory / f'{timestamp}-{identifier}-{length}.npz'
with io.BytesIO() as f1:
np.savez_compressed(f1, **episode)
f1.seek(0)
with filename.open('wb') as f2:
f2.write(f1.read())
return filename
def save_episodes(directory, episodes):
saved_eps = {}
for _, ep in episodes.items():
filename = save_episode(directory, ep)
saved_eps[str(filename)] = ep
return saved_eps
def convert(value):
value = np.array(value)
if np.issubdtype(value.dtype, np.floating):
return value.astype(np.float32)
elif np.issubdtype(value.dtype, np.signedinteger):
return value.astype(np.int32)
elif np.issubdtype(value.dtype, np.uint8):
return value.astype(np.uint8)
return value
def reshape_rewards_dmc(episode):
rew = np.concatenate([r.reshape(1, -1) for r in episode['reward'][1:]], 0)
rew = np.concatenate((np.zeros(rew.shape[1]).reshape(1, rew.shape[1]), rew))
return rew
def eplen(episode):
return len(episode['action']) - 1
| cascade-main | dreamerv2/common/replay.py |
"""In gym, the RAM is represented as an 128-element array, where each element in the array can range from 0 to 255
The atari_dict below is organized as so:
key: the name of the game
value: the game dictionary
Game dictionary is organized as:
key: state variable name
value: the element in the RAM array where the value of that state variable is stored
e.g. the value of the x coordinate of the player in asteroids is stored in the 73rd (counting up from 0)
element of the RAM array (when the player in asteroids moves horizontally, ram_array[73] should change
in value correspondingly)
"""
""" MZR player_direction values:
72: facing left,
40: facing left, climbing down ladder/rope
24: facing left, climbing up ladder/rope
128: facing right
32: facing right, climbing down ladder/rope
16: facing right climbing up ladder/rope """
atari_dict = {
"asteroids": dict(enemy_asteroids_y=[3, 4, 5, 6, 7, 8, 9, 12, 13, 14, 15, 16, 17, 18, 19],
enemy_asteroids_x=[21, 22, 23, 24, 25, 26, 27, 30, 31, 32, 33, 34, 35, 36, 37],
player_x=73,
player_y=74,
num_lives_direction=60,
player_score_high=61,
player_score_low=62,
player_missile_x1=83,
player_missile_x2=84,
player_missile_y1=86,
player_missile_y2=87,
player_missile1_direction=89,
player_missile2_direction=90),
"battlezone": dict( # red_enemy_x=75,
blue_tank_facing_direction=46, # 17 left 21 forward 29 right
blue_tank_size_y=47, # tank gets larger as it gets closer
blue_tank_x=48,
blue_tank2_facing_direction=52,
blue_tank2_size_y=53,
blue_tank2_x=54,
num_lives=58,
missile_y=105,
compass_needles_angle=84,
angle_of_tank=4, # as shown by what the mountains look like
left_tread_position=59, # got to mod this number by 8 to get unique values
right_tread_position=60, # got to mod this number by 8 to get unique values
crosshairs_color=108, # 0 if black 46 if yellow
score=29),
"berzerk": dict(player_x=19,
player_y=11,
player_direction=14,
player_missile_x=22,
player_missile_y=23,
player_missile_direction=21,
robot_missile_direction=26,
robot_missile_x=29,
robot_missile_y=30,
num_lives=90,
robots_killed_count=91,
game_level=92,
enemy_evilOtto_x=46,
enemy_evilOtto_y=89,
enemy_robots_x=range(65, 73),
enemy_robots_y=range(56, 65),
player_score=range(93, 96)),
"bowling": dict(ball_x=30,
ball_y=41,
player_x=29,
player_y=40,
frame_number_display=36,
pin_existence=range(57, 67),
score=33),
"boxing": dict(player_x=32,
player_y=34,
enemy_x=33,
enemy_y=35,
enemy_score=19,
clock=17,
player_score=18),
"breakout": dict(ball_x=99,
ball_y=101,
player_x=72,
blocks_hit_count=77,
block_bit_map=range(30), # see breakout bitmaps tab
score=84), # 5 for each hit
"demonattack": dict(level=62,
player_x=22,
enemy_x1=17,
enemy_x2=18,
enemy_x3=19,
missile_y=21,
enemy_y1=69,
enemy_y2=70,
enemy_y3=71,
num_lives=114),
"freeway": dict(player_y=14,
score=103,
enemy_car_x=range(108, 118)), # which lane the car collided with player
"frostbite": dict(
top_row_iceflow_x=34,
second_row_iceflow_x=33,
third_row_iceflow_x=32,
fourth_row_iceflow_x=31,
enemy_bear_x=104,
num_lives=76,
igloo_blocks_count=77, # 255 is none and 15 is all "
enemy_x=range(84, 88), # 84 bottom row - 87 top row
player_x=102,
player_y=100,
player_direction=4,
score=[72, 73, 74]),
"hero": dict(player_x=27,
player_y=31,
power_meter=43,
room_number=28,
level_number=117,
dynamite_count=50,
score=[56, 57]),
"montezumarevenge": dict(room_number=3,
player_x=42,
player_y=43,
player_direction=52, # 72: facing left, 40: facing left, climbing down ladder/rope 24: facing left, climbing up ladder/rope 128: facing right 32: facing right, climbing down ladder/rope, 16: facing right climbing up ladder/rope
enemy_skull_x=47,
enemy_skull_y=46,
key_monster_x=44,
key_monster_y=45,
level=57,
num_lives=58,
items_in_inventory_count=61,
room_state=62,
score_0=19,
score_1=20,
score_2=21),
"mspacman": dict(enemy_sue_x=6,
enemy_inky_x=7,
enemy_pinky_x=8,
enemy_blinky_x=9,
enemy_sue_y=12,
enemy_inky_y=13,
enemy_pinky_y=14,
enemy_blinky_y=15,
player_x=10,
player_y=16,
fruit_x=11,
fruit_y=17,
ghosts_count=19,
player_direction=56,
dots_eaten_count=119,
player_score=120,
num_lives=123),
"pitfall": dict(player_x=97, # 8-148
player_y=105, # 21-86 except for when respawning then 0-255 with confusing wraparound
enemy_logs_x=98, # 0-160
enemy_scorpion_x=99,
# player_y_on_ladder= 108, # 0-20
# player_collided_with_rope= 5, #yes if bit 6 is 1
bottom_of_rope_y=18, # 0-20 varies even when you can't see rope
clock_sec=89,
clock_min=88
),
"pong": dict(player_y=51,
player_x=46,
enemy_y=50,
enemy_x=45,
ball_x=49,
ball_y=54,
enemy_score=13,
player_score=14),
"privateeye": dict(player_x=63,
player_y=86,
room_number=92,
clock=[67, 69],
player_direction=58,
score=[73, 74],
dove_x=48,
dove_y=39),
"qbert": dict(player_x=43,
player_y=67,
player_column=35,
red_enemy_column=69,
green_enemy_column=105,
score=[89, 90, 91], # binary coded decimal score
tile_color=[ 21, # row of 1
52, 54, # row of 2
83, 85, 87, # row of 3
98, 100, 102, 104, # row of 4
1, 3, 5, 7, 9, # row of 5
32, 34, 36, 38, 40, 42]), # row of 6
"riverraid": dict(player_x=51,
missile_x=117,
missile_y=50,
fuel_meter_high=55, # high value displayed
fuel_meter_low=56 # low value
),
"seaquest": dict(enemy_obstacle_x=range(30, 34),
player_x=70,
player_y=97,
diver_or_enemy_missile_x=range(71, 75),
player_direction=86,
player_missile_direction=87,
oxygen_meter_value=102,
player_missile_x=103,
score=[57, 58],
num_lives=59,
divers_collected_count=62),
"skiing": dict(player_x=25,
clock_m=104,
clock_s=105,
clock_ms=106,
score=107,
object_y=range(87, 94)), # object_y_1 is y position of whatever topmost object on the screen is
"spaceinvaders": dict(invaders_left_count=17,
player_score=104,
num_lives=73,
player_x=28,
enemies_x=26,
missiles_y=9,
enemies_y=24),
"tennis": dict(enemy_x=27,
enemy_y=25,
enemy_score=70,
ball_x=16,
ball_y=17,
player_x=26,
player_y=24,
player_score=69),
"venture": dict(sprite0_y=20,
sprite1_y=21,
sprite2_y=22,
sprite3_y=23,
sprite4_y=24,
sprite5_y=25,
sprite0_x=79,
sprite1_x=80,
sprite2_x=81,
sprite3_x=82,
sprite4_x=83,
sprite5_x=84,
player_x=85,
player_y=26,
current_room=90, # The number of the room the player is currently in 0 to 9_
num_lives=70,
score_1_2=71,
score_3_4=72),
"videopinball": dict(ball_x=67,
ball_y=68,
player_left_paddle_y=98,
player_right_paddle_y=102,
score_1=48,
score_2=50),
"yarsrevenge": dict(player_x=32,
player_y=31,
player_missile_x=38,
player_missile_y=37,
enemy_x=43,
enemy_y=42,
enemy_missile_x=47,
enemy_missile_y=46)
}
# break up any lists (e.g. dict(clock=[67, 69]) -> dict(clock_0=67, clock_1=69) )
update_dict = {k: {} for k in atari_dict.keys()}
remove_dict = {k: [] for k in atari_dict.keys()}
for game, d in atari_dict.items():
for k, v in d.items():
if isinstance(v, range) or isinstance(v, list):
for i, vi in enumerate(v):
update_dict[game]["%s_%i" % (k, i)] = vi
remove_dict[game].append(k)
for k in atari_dict.keys():
atari_dict[k].update(update_dict[k])
for rk in remove_dict[k]:
atari_dict[k].pop(rk) | cascade-main | dreamerv2/common/ram_annotations.py |
class Every:
def __init__(self, every):
self._every = every
self._last = None
def __call__(self, step):
step = int(step)
if not self._every:
return False
if self._last is None:
self._last = step
return True
if step >= self._last + self._every:
self._last += self._every
return True
return False
class Once:
def __init__(self):
self._once = True
def __call__(self):
if self._once:
self._once = False
return True
return False
class Until:
def __init__(self, until):
self._until = until
def __call__(self, step):
step = int(step)
if not self._until:
return True
return step < self._until
| cascade-main | dreamerv2/common/when.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from collections import defaultdict
from .cdmc import DMC_TASK_IDS
import numpy as np
from scipy.stats import gmean
def get_stats_at_idx(driver, task, idx):
"""
Get the train / eval stats from driver from the idx env.
"""
prefix = "eval_"
eps = driver._eps[idx]
eval_data = defaultdict(list)
if task == 'crafter_noreward':
for ep in eps:
for key, val in ep.items():
if 'log_achievement_' in key:
eval_data[prefix + 'rew_'+key.split('log_achievement_')[1]].append(val.item())
eval_data[prefix + 'sr_'+key.split('log_achievement_')[1]].append(1 if val.item() > 0 else 0)
eval_data['reward'].append(ep['log_reward'].item())
eval_data = {key: np.mean(val) for key, val in eval_data.items()}
eval_data[prefix + 'crafter_score'] = gmean([val for key, val in eval_data.items() if 'eval_sr' in key])
elif task in DMC_TASK_IDS:
rewards = [ep['reward'] for ep in eps[1:]]
for idx, goal in enumerate(DMC_TASK_IDS[task]):
eval_data[prefix + 'reward_' + goal] = np.sum([r[idx] for r in rewards])
else:
eval_data[prefix + 'reward'] = np.sum([ep['reward'] for ep in eps])
return eval_data
def get_stats(driver, task):
per_env_data = defaultdict(list)
num_envs = len(driver._envs)
for i in range(num_envs):
stat = get_stats_at_idx(driver, task, i)
for k, v in stat.items():
per_env_data[k].append(v)
data = {}
for k, v in per_env_data.items():
data[k] = np.mean(v)
return data
def eval(driver, config, expl_policies, logdir):
## reward for the exploration agents
mets = {}
mean_pop = {}
for idx in range(config.num_agents):
policy = expl_policies[idx]
driver(policy, episodes=config.eval_eps, policy_idx=idx)
data = get_stats(driver, task=config.task)
if idx == 0:
for key, val in data.items():
mean_pop[key] = np.mean(val)
else:
for key,val in data.items():
mean_pop[key] += np.mean(val)
mets.update({key: np.mean(val) for key, val in mean_pop.items()})
return mets | cascade-main | dreamerv2/common/eval.py |
import numpy as np
class Driver:
def __init__(self, envs, **kwargs):
self._envs = envs
self._kwargs = kwargs
self._on_steps = []
self._on_resets = []
self._on_episodes = []
self._act_spaces = [env.act_space for env in envs]
self.reset()
def on_step(self, callback):
self._on_steps.append(callback)
def on_reset(self, callback):
self._on_resets.append(callback)
def on_episode(self, callback):
self._on_episodes.append(callback)
def reset(self):
self._obs = [None] * len(self._envs)
self._eps = [None] * len(self._envs)
self._state = None
def __call__(self, policy, steps=0, episodes=0, policy_idx=0, save_img=False):
step, episode = 0, 0
while step < steps or episode < episodes:
obs = {
i: self._envs[i].reset()
for i, ob in enumerate(self._obs) if ob is None or ob['is_last']}
for i, ob in obs.items():
self._obs[i] = ob() if callable(ob) else ob
act = {k: np.zeros(v.shape) for k, v in self._act_spaces[i].items()}
tran = {k: self._convert(v) for k, v in {**ob, **act}.items()}
[fn(tran, worker=i, **self._kwargs) for fn in self._on_resets]
self._eps[i] = [tran]
obs = {k: np.stack([o[k] for o in self._obs]) for k in self._obs[0]}
actions, self._state = policy(obs, self._state, **self._kwargs)
actions = [
{k: np.array(actions[k][i]) for k in actions}
for i in range(len(self._envs))]
assert len(actions) == len(self._envs)
# if episode == 0:
should_save_img = save_img
# else:
# should_save_img = False
obs = [e.step(a) for e, a in zip(self._envs, actions)]
obs = [ob() if callable(ob) else ob for ob in obs]
for i, (act, ob) in enumerate(zip(actions, obs)):
tran = {k: self._convert(v) for k, v in {**ob, **act}.items()}
[fn(tran, worker=i, **self._kwargs) for fn in self._on_steps]
self._eps[i].append(tran)
step += 1
if ob['is_last']:
ep = self._eps[i]
ep = {k: self._convert([t[k] for t in ep]) for k in ep[0]}
[fn(ep, **self._kwargs) for fn in self._on_episodes]
episode += 1
self._obs = obs
def _convert(self, value):
value = np.array(value)
if np.issubdtype(value.dtype, np.floating):
return value.astype(np.float32)
elif np.issubdtype(value.dtype, np.signedinteger):
return value.astype(np.int32)
elif np.issubdtype(value.dtype, np.uint8):
return value.astype(np.uint8)
return value
| cascade-main | dreamerv2/common/driver.py |
import functools
@functools.total_ordering
class Counter:
def __init__(self, initial=0):
self.value = initial
def __int__(self):
return int(self.value)
def __eq__(self, other):
return int(self) == other
def __ne__(self, other):
return int(self) != other
def __lt__(self, other):
return int(self) < other
def __add__(self, other):
return int(self) + other
def increment(self, amount=1):
self.value += amount
| cascade-main | dreamerv2/common/counter.py |
import re
import numpy as np
import tensorflow as tf
from tensorflow.keras import layers as tfkl
from tensorflow_probability import distributions as tfd
from tensorflow.keras.mixed_precision import experimental as prec
import common
class EnsembleRSSM(common.Module):
def __init__(
self, ensemble=5, stoch=30, deter=200, hidden=200, discrete=False,
act='elu', norm='none', std_act='softplus', min_std=0.1):
super().__init__()
self._ensemble = ensemble
self._stoch = stoch
self._deter = deter
self._hidden = hidden
self._discrete = discrete
self._act = get_act(act)
self._norm = norm
self._std_act = std_act
self._min_std = min_std
self._cell = GRUCell(self._deter, norm=True)
self._cast = lambda x: tf.cast(x, prec.global_policy().compute_dtype)
def initial(self, batch_size):
dtype = prec.global_policy().compute_dtype
if self._discrete:
state = dict(
logit=tf.zeros([batch_size, self._stoch, self._discrete], dtype),
stoch=tf.zeros([batch_size, self._stoch, self._discrete], dtype),
deter=self._cell.get_initial_state(None, batch_size, dtype))
else:
state = dict(
mean=tf.zeros([batch_size, self._stoch], dtype),
std=tf.zeros([batch_size, self._stoch], dtype),
stoch=tf.zeros([batch_size, self._stoch], dtype),
deter=self._cell.get_initial_state(None, batch_size, dtype))
return state
@tf.function
def observe(self, embed, action, is_first, state=None):
swap = lambda x: tf.transpose(x, [1, 0] + list(range(2, len(x.shape))))
if state is None:
state = self.initial(tf.shape(action)[0])
post, prior = common.static_scan(
lambda prev, inputs: self.obs_step(prev[0], *inputs),
(swap(action), swap(embed), swap(is_first)), (state, state))
post = {k: swap(v) for k, v in post.items()}
prior = {k: swap(v) for k, v in prior.items()}
return post, prior
@tf.function
def imagine(self, action, state=None):
swap = lambda x: tf.transpose(x, [1, 0] + list(range(2, len(x.shape))))
if state is None:
state = self.initial(tf.shape(action)[0])
assert isinstance(state, dict), state
action = swap(action)
prior = common.static_scan(self.img_step, action, state)
prior = {k: swap(v) for k, v in prior.items()}
return prior
def get_feat(self, state):
stoch = self._cast(state['stoch'])
if self._discrete:
shape = stoch.shape[:-2] + [self._stoch * self._discrete]
stoch = tf.reshape(stoch, shape)
return tf.concat([stoch, state['deter']], -1)
def get_dist(self, state, ensemble=False):
if ensemble:
state = self._suff_stats_ensemble(state['deter'])
if self._discrete:
logit = state['logit']
logit = tf.cast(logit, tf.float32)
dist = tfd.Independent(common.OneHotDist(logit), 1)
else:
mean, std = state['mean'], state['std']
mean = tf.cast(mean, tf.float32)
std = tf.cast(std, tf.float32)
dist = tfd.MultivariateNormalDiag(mean, std)
return dist
@tf.function
def obs_step(self, prev_state, prev_action, embed, is_first, sample=True):
# if is_first.any():
prev_state, prev_action = tf.nest.map_structure(
lambda x: tf.einsum(
'b,b...->b...', 1.0 - is_first.astype(x.dtype), x),
(prev_state, prev_action))
prior = self.img_step(prev_state, prev_action, sample)
x = tf.concat([prior['deter'], embed], -1)
x = self.get('obs_out', tfkl.Dense, self._hidden)(x)
x = self.get('obs_out_norm', NormLayer, self._norm)(x)
x = self._act(x)
stats = self._suff_stats_layer('obs_dist', x)
dist = self.get_dist(stats)
stoch = dist.sample() if sample else dist.mode()
post = {'stoch': stoch, 'deter': prior['deter'], **stats}
return post, prior
@tf.function
def img_step(self, prev_state, prev_action, sample=True):
prev_stoch = self._cast(prev_state['stoch'])
prev_action = self._cast(prev_action)
if self._discrete:
shape = prev_stoch.shape[:-2] + [self._stoch * self._discrete]
prev_stoch = tf.reshape(prev_stoch, shape)
x = tf.concat([prev_stoch, prev_action], -1)
x = self.get('img_in', tfkl.Dense, self._hidden)(x)
x = self.get('img_in_norm', NormLayer, self._norm)(x)
x = self._act(x)
deter = prev_state['deter']
x, deter = self._cell(x, [deter])
deter = deter[0] # Keras wraps the state in a list.
stats = self._suff_stats_ensemble(x)
index = tf.random.uniform((), 0, self._ensemble, tf.int32)
stats = {k: v[index] for k, v in stats.items()}
dist = self.get_dist(stats)
stoch = dist.sample() if sample else dist.mode()
prior = {'stoch': stoch, 'deter': deter, **stats}
return prior
def _suff_stats_ensemble(self, inp):
bs = list(inp.shape[:-1])
inp = inp.reshape([-1, inp.shape[-1]])
stats = []
for k in range(self._ensemble):
x = self.get(f'img_out_{k}', tfkl.Dense, self._hidden)(inp)
x = self.get(f'img_out_norm_{k}', NormLayer, self._norm)(x)
x = self._act(x)
stats.append(self._suff_stats_layer(f'img_dist_{k}', x))
stats = {
k: tf.stack([x[k] for x in stats], 0)
for k, v in stats[0].items()}
stats = {
k: v.reshape([v.shape[0]] + bs + list(v.shape[2:]))
for k, v in stats.items()}
return stats
def _suff_stats_layer(self, name, x):
if self._discrete:
x = self.get(name, tfkl.Dense, self._stoch * self._discrete, None)(x)
logit = tf.reshape(x, x.shape[:-1] + [self._stoch, self._discrete])
return {'logit': logit}
else:
x = self.get(name, tfkl.Dense, 2 * self._stoch, None)(x)
mean, std = tf.split(x, 2, -1)
std = {
'softplus': lambda: tf.nn.softplus(std),
'sigmoid': lambda: tf.nn.sigmoid(std),
'sigmoid2': lambda: 2 * tf.nn.sigmoid(std / 2),
}[self._std_act]()
std = std + self._min_std
return {'mean': mean, 'std': std}
def kl_loss(self, post, prior, forward, balance, free, free_avg):
kld = tfd.kl_divergence
sg = lambda x: tf.nest.map_structure(tf.stop_gradient, x)
lhs, rhs = (prior, post) if forward else (post, prior)
mix = balance if forward else (1 - balance)
if balance == 0.5:
value = kld(self.get_dist(lhs), self.get_dist(rhs))
loss = tf.maximum(value, free).mean()
else:
value_lhs = value = kld(self.get_dist(lhs), self.get_dist(sg(rhs)))
value_rhs = kld(self.get_dist(sg(lhs)), self.get_dist(rhs))
if free_avg:
loss_lhs = tf.maximum(value_lhs.mean(), free)
loss_rhs = tf.maximum(value_rhs.mean(), free)
else:
loss_lhs = tf.maximum(value_lhs, free).mean()
loss_rhs = tf.maximum(value_rhs, free).mean()
loss = mix * loss_lhs + (1 - mix) * loss_rhs
return loss, value
class Encoder(common.Module):
def __init__(
self, shapes, cnn_keys=r'.*', mlp_keys=r'.*', act='elu', norm='none',
cnn_depth=48, cnn_kernels=(4, 4, 4, 4), mlp_layers=[400, 400, 400, 400]):
self.shapes = shapes
self.cnn_keys = [
k for k, v in shapes.items() if re.match(cnn_keys, k) and len(v) == 3]
self.mlp_keys = [
k for k, v in shapes.items() if re.match(mlp_keys, k) and len(v) == 1]
print('Encoder CNN inputs:', list(self.cnn_keys))
print('Encoder MLP inputs:', list(self.mlp_keys))
self._act = get_act(act)
self._norm = norm
self._cnn_depth = cnn_depth
self._cnn_kernels = cnn_kernels
self._mlp_layers = mlp_layers
@tf.function
def __call__(self, data):
key, shape = list(self.shapes.items())[0]
batch_dims = data[key].shape[:-len(shape)]
data = {
k: tf.reshape(v, (-1,) + tuple(v.shape)[len(batch_dims):])
for k, v in data.items()}
outputs = []
if self.cnn_keys:
outputs.append(self._cnn({k: data[k] for k in self.cnn_keys}))
if self.mlp_keys:
outputs.append(self._mlp({k: data[k] for k in self.mlp_keys}))
output = tf.concat(outputs, -1)
return output.reshape(batch_dims + output.shape[1:])
def _cnn(self, data):
x = tf.concat(list(data.values()), -1)
x = x.astype(prec.global_policy().compute_dtype)
for i, kernel in enumerate(self._cnn_kernels):
depth = 2 ** i * self._cnn_depth
x = self.get(f'conv{i}', tfkl.Conv2D, depth, kernel, 2)(x)
x = self.get(f'convnorm{i}', NormLayer, self._norm)(x)
x = self._act(x)
return x.reshape(tuple(x.shape[:-3]) + (-1,))
def _mlp(self, data):
x = tf.concat(list(data.values()), -1)
x = x.astype(prec.global_policy().compute_dtype)
for i, width in enumerate(self._mlp_layers):
x = self.get(f'dense{i}', tfkl.Dense, width)(x)
x = self.get(f'densenorm{i}', NormLayer, self._norm)(x)
x = self._act(x)
return x
class Decoder(common.Module):
def __init__(
self, shapes, cnn_keys=r'.*', mlp_keys=r'.*', act='elu', norm='none',
cnn_depth=48, cnn_kernels=(4, 4, 4, 4), mlp_layers=[400, 400, 400, 400]):
self._shapes = shapes
self.cnn_keys = [
k for k, v in shapes.items() if re.match(cnn_keys, k) and len(v) == 3]
self.mlp_keys = [
k for k, v in shapes.items() if re.match(mlp_keys, k) and len(v) == 1]
print('Decoder CNN outputs:', list(self.cnn_keys))
print('Decoder MLP outputs:', list(self.mlp_keys))
self._act = get_act(act)
self._norm = norm
self._cnn_depth = cnn_depth
self._cnn_kernels = cnn_kernels
self._mlp_layers = mlp_layers
def __call__(self, features):
features = tf.cast(features, prec.global_policy().compute_dtype)
outputs = {}
if self.cnn_keys:
outputs.update(self._cnn(features))
if self.mlp_keys:
outputs.update(self._mlp(features))
return outputs
def _cnn(self, features):
channels = {k: self._shapes[k][-1] for k in self.cnn_keys}
ConvT = tfkl.Conv2DTranspose
x = self.get('convin', tfkl.Dense, 32 * self._cnn_depth)(features)
x = tf.reshape(x, [-1, 1, 1, 32 * self._cnn_depth])
for i, kernel in enumerate(self._cnn_kernels):
depth = 2 ** (len(self._cnn_kernels) - i - 2) * self._cnn_depth
act, norm = self._act, self._norm
if i == len(self._cnn_kernels) - 1:
depth, act, norm = sum(channels.values()), tf.identity, 'none'
x = self.get(f'conv{i}', ConvT, depth, kernel, 2)(x)
x = self.get(f'convnorm{i}', NormLayer, norm)(x)
x = act(x)
x = x.reshape(features.shape[:-1] + x.shape[1:])
means = tf.split(x, list(channels.values()), -1)
dists = {
key: tfd.Independent(tfd.Normal(mean, 1), 3)
for (key, shape), mean in zip(channels.items(), means)}
return dists
def _mlp(self, features):
shapes = {k: self._shapes[k] for k in self.mlp_keys}
x = features
for i, width in enumerate(self._mlp_layers):
x = self.get(f'dense{i}', tfkl.Dense, width)(x)
x = self.get(f'densenorm{i}', NormLayer, self._norm)(x)
x = self._act(x)
dists = {}
for key, shape in shapes.items():
dists[key] = self.get(f'dense_{key}', DistLayer, shape)(x)
return dists
class MLP(common.Module):
def __init__(self, shape, layers, units, act='elu', norm='none', **out):
self._shape = (shape,) if isinstance(shape, int) else shape
self._layers = layers
self._units = units
self._norm = norm
self._act = get_act(act)
self._out = out
def __call__(self, features):
x = tf.cast(features, prec.global_policy().compute_dtype)
x = x.reshape([-1, x.shape[-1]])
for index in range(self._layers):
x = self.get(f'dense{index}', tfkl.Dense, self._units)(x)
x = self.get(f'norm{index}', NormLayer, self._norm)(x)
x = self._act(x)
x = x.reshape(features.shape[:-1] + [x.shape[-1]])
return self.get('out', DistLayer, self._shape, **self._out)(x)
class MultiMLP(common.Module):
# initial feature extraction layers
def __init__(self, shape, layers, units, act='elu', norm='none', **out):
self._shape = (shape,) if isinstance(shape, int) else shape
self._layers = layers
self._units = units
self._norm = norm
self._act = get_act(act)
self._out = out
def __call__(self, features, idx=0):
x = tf.cast(features, prec.global_policy().compute_dtype)
x = x.reshape([-1, x.shape[-1]])
for index in range(self._layers):
x = self.get(f'dense{index}', tfkl.Dense, self._units)(x)
x = self.get(f'norm{index}', NormLayer, self._norm)(x)
x = self._act(x)
x = x.reshape(features.shape[:-1] + [x.shape[-1]])
## pass in idx for the MultiDistLayer!
return self.get('out', MultiDistLayer, self._shape, **self._out)(x, idx)
class GRUCell(tf.keras.layers.AbstractRNNCell):
def __init__(self, size, norm=False, act='tanh', update_bias=-1, **kwargs):
super().__init__()
self._size = size
self._act = get_act(act)
self._norm = norm
self._update_bias = update_bias
self._layer = tfkl.Dense(3 * size, use_bias=norm is not None, **kwargs)
if norm:
self._norm = tfkl.LayerNormalization(dtype=tf.float32)
@property
def state_size(self):
return self._size
@tf.function
def call(self, inputs, state):
state = state[0] # Keras wraps the state in a list.
parts = self._layer(tf.concat([inputs, state], -1))
if self._norm:
dtype = parts.dtype
parts = tf.cast(parts, tf.float32)
parts = self._norm(parts)
parts = tf.cast(parts, dtype)
reset, cand, update = tf.split(parts, 3, -1)
reset = tf.nn.sigmoid(reset)
cand = self._act(reset * cand)
update = tf.nn.sigmoid(update + self._update_bias)
output = update * cand + (1 - update) * state
return output, [output]
class DistLayer(common.Module):
def __init__(
self, shape, dist='mse', min_std=0.1, init_std=0.0):
self._shape = shape
self._dist = dist
self._min_std = min_std
self._init_std = init_std
def __call__(self, inputs):
out = self.get('out', tfkl.Dense, np.prod(self._shape))(inputs)
out = tf.reshape(out, tf.concat([tf.shape(inputs)[:-1], self._shape], 0))
out = tf.cast(out, tf.float32)
if self._dist in ('normal', 'tanh_normal', 'trunc_normal'):
std = self.get('std', tfkl.Dense, np.prod(self._shape))(inputs)
std = tf.reshape(std, tf.concat([tf.shape(inputs)[:-1], self._shape], 0))
std = tf.cast(std, tf.float32)
if self._dist == 'mse':
dist = tfd.Normal(out, 1.0)
return tfd.Independent(dist, len(self._shape))
if self._dist == 'normal':
dist = tfd.Normal(out, std)
return tfd.Independent(dist, len(self._shape))
if self._dist == 'binary':
dist = tfd.Bernoulli(out)
return tfd.Independent(dist, len(self._shape))
if self._dist == 'tanh_normal':
mean = 5 * tf.tanh(out / 5)
std = tf.nn.softplus(std + self._init_std) + self._min_std
dist = tfd.Normal(mean, std)
dist = tfd.TransformedDistribution(dist, common.TanhBijector())
dist = tfd.Independent(dist, len(self._shape))
return common.SampleDist(dist)
if self._dist == 'trunc_normal':
std = 2 * tf.nn.sigmoid((std + self._init_std) / 2) + self._min_std
dist = common.TruncNormalDist(tf.tanh(out), std, -1, 1)
return tfd.Independent(dist, 1)
if self._dist == 'onehot':
return common.OneHotDist(out)
raise NotImplementedError(self._dist)
class MultiDistLayer(common.Module):
def __init__(
self, shape, dist='mse', min_std=0.1, init_std=0.0):
self._shape = shape
self._dist = dist
self._min_std = min_std
self._init_std = init_std
def __call__(self, inputs, idx=0):
out = self.get(f'out{idx}', tfkl.Dense, np.prod(self._shape))(inputs)
out = tf.reshape(out, tf.concat([tf.shape(inputs)[:-1], self._shape], 0))
out = tf.cast(out, tf.float32)
if self._dist in ('normal', 'tanh_normal', 'trunc_normal'):
std = self.get(f'std{idx}', tfkl.Dense, np.prod(self._shape))(inputs)
std = tf.reshape(std, tf.concat([tf.shape(inputs)[:-1], self._shape], 0))
std = tf.cast(std, tf.float32)
if self._dist == 'mse':
dist = tfd.Normal(out, 1.0)
return tfd.Independent(dist, len(self._shape))
if self._dist == 'normal':
dist = tfd.Normal(out, std)
return tfd.Independent(dist, len(self._shape))
if self._dist == 'binary':
dist = tfd.Bernoulli(out)
return tfd.Independent(dist, len(self._shape))
if self._dist == 'tanh_normal':
mean = 5 * tf.tanh(out / 5)
std = tf.nn.softplus(std + self._init_std) + self._min_std
dist = tfd.Normal(mean, std)
dist = tfd.TransformedDistribution(dist, common.TanhBijector())
dist = tfd.Independent(dist, len(self._shape))
return common.SampleDist(dist)
if self._dist == 'trunc_normal':
std = 2 * tf.nn.sigmoid((std + self._init_std) / 2) + self._min_std
dist = common.TruncNormalDist(tf.tanh(out), std, -1, 1)
return tfd.Independent(dist, 1)
if self._dist == 'onehot':
return common.OneHotDist(out)
raise NotImplementedError(self._dist)
class NormLayer(common.Module):
def __init__(self, name):
if name == 'none':
self._layer = None
elif name == 'layer':
self._layer = tfkl.LayerNormalization()
else:
raise NotImplementedError(name)
def __call__(self, features):
if not self._layer:
return features
return self._layer(features)
def get_act(name):
if name == 'none':
return tf.identity
if name == 'mish':
return lambda x: x * tf.math.tanh(tf.nn.softplus(x))
elif hasattr(tf.nn, name):
return getattr(tf.nn, name)
elif hasattr(tf, name):
return getattr(tf, name)
else:
raise NotImplementedError(name)
| cascade-main | dreamerv2/common/nets.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import collections
import os
from dm_control import mujoco
from dm_control.rl import control
from dm_control.suite import base
from dm_control.suite import common
from dm_control.suite.utils import randomizers
from dm_control.utils import containers
from dm_control.utils import rewards
from dm_control.utils import io as resources
from dm_control import suite
_DEFAULT_TIME_LIMIT = 25
_CONTROL_TIMESTEP = .025
# Minimal height of torso over foot above which stand reward is 1.
_STAND_HEIGHT = 1.2
# Horizontal speeds (meters/second) above which move reward is 1.
_WALK_SPEED = 1
_RUN_SPEED = 8
_SPIN_SPEED = 5
SUITE = containers.TaggedTasks()
def make_walker(task,
task_kwargs=None,
environment_kwargs=None,
visualize_reward=False):
task_kwargs = task_kwargs or {}
if environment_kwargs is not None:
task_kwargs = task_kwargs.copy()
task_kwargs['environment_kwargs'] = environment_kwargs
env = SUITE[task](**task_kwargs)
env.task.visualize_reward = visualize_reward
return env
def get_model_and_assets():
"""Returns a tuple containing the model XML string and a dict of assets."""
root_dir = os.path.dirname(os.path.dirname(__file__))
xml = resources.GetResource(os.path.join(root_dir, 'cdmc',
'walker.xml'))
return xml, common.ASSETS
@SUITE.add('benchmarking')
def flip(time_limit=_DEFAULT_TIME_LIMIT,
random=None,
environment_kwargs=None):
"""Returns the Run task."""
physics = Physics.from_xml_string(*get_model_and_assets())
task = PlanarWalker(move_speed=_RUN_SPEED,
forward=True,
flip=True,
random=random)
environment_kwargs = environment_kwargs or {}
return control.Environment(physics,
task,
time_limit=time_limit,
control_timestep=_CONTROL_TIMESTEP,
**environment_kwargs)
@SUITE.add('benchmarking')
def all(time_limit=_DEFAULT_TIME_LIMIT,
random=None,
environment_kwargs=None):
"""Returns the Run task."""
physics = Physics.from_xml_string(*get_model_and_assets())
task = PlanarWalker(move_speed=_RUN_SPEED,
forward=True,
flip=True,
all=True,
random=random)
environment_kwargs = environment_kwargs or {}
return control.Environment(physics,
task,
time_limit=time_limit,
control_timestep=_CONTROL_TIMESTEP,
**environment_kwargs)
class Physics(mujoco.Physics):
"""Physics simulation with additional features for the Walker domain."""
def torso_upright(self):
"""Returns projection from z-axes of torso to the z-axes of world."""
return self.named.data.xmat['torso', 'zz']
def torso_height(self):
"""Returns the height of the torso."""
return self.named.data.xpos['torso', 'z']
def horizontal_velocity(self):
"""Returns the horizontal velocity of the center-of-mass."""
return self.named.data.sensordata['torso_subtreelinvel'][0]
def orientations(self):
"""Returns planar orientations of all bodies."""
return self.named.data.xmat[1:, ['xx', 'xz']].ravel()
def angmomentum(self):
"""Returns the angular momentum of torso of the Cheetah about Y axis."""
return self.named.data.subtree_angmom['torso'][1]
class PlanarWalker(base.Task):
"""A planar walker task."""
def __init__(self, move_speed, forward=True, flip=False, random=None, all=False):
"""Initializes an instance of `PlanarWalker`.
Args:
move_speed: A float. If this value is zero, reward is given simply for
standing up. Otherwise this specifies a target horizontal velocity for
the walking task.
random: Optional, either a `numpy.random.RandomState` instance, an
integer seed for creating a new `RandomState`, or None to select a seed
automatically (default).
"""
self._move_speed = move_speed
self._forward = 1 if forward else -1
self._flip = flip
self._all = all
super(PlanarWalker, self).__init__(random=random)
def initialize_episode(self, physics):
"""Sets the state of the environment at the start of each episode.
In 'standing' mode, use initial orientation and small velocities.
In 'random' mode, randomize joint angles and let fall to the floor.
Args:
physics: An instance of `Physics`.
"""
randomizers.randomize_limited_and_rotational_joints(
physics, self.random)
super(PlanarWalker, self).initialize_episode(physics)
def get_observation(self, physics):
"""Returns an observation of body orientations, height and velocites."""
obs = collections.OrderedDict()
obs['orientations'] = physics.orientations()
obs['height'] = physics.torso_height()
obs['velocity'] = physics.velocity()
return obs
def get_reward(self, physics):
"""Returns a reward to the agent."""
standing = rewards.tolerance(physics.torso_height(),
bounds=(_STAND_HEIGHT, float('inf')),
margin=_STAND_HEIGHT / 2)
upright = (1 + physics.torso_upright()) / 2
stand_reward = (3 * standing + upright) / 4
if self._flip:
move_reward = rewards.tolerance(self._forward *
physics.angmomentum(),
bounds=(_SPIN_SPEED, float('inf')),
margin=_SPIN_SPEED,
value_at_margin=0,
sigmoid='linear')
else:
move_reward = rewards.tolerance(
self._forward * physics.horizontal_velocity(),
bounds=(self._move_speed, float('inf')),
margin=self._move_speed / 2,
value_at_margin=0.5,
sigmoid='linear')
if self._all:
walk_reward = rewards.tolerance(
self._forward * physics.horizontal_velocity(),
bounds=(_WALK_SPEED, float('inf')),
margin=_WALK_SPEED / 2,
value_at_margin=0.5,
sigmoid='linear')
run_reward = rewards.tolerance(
self._forward * physics.horizontal_velocity(),
bounds=(_RUN_SPEED, float('inf')),
margin=_RUN_SPEED / 2,
value_at_margin=0.5,
sigmoid='linear')
flip_reward = rewards.tolerance(self._forward *
physics.angmomentum(),
bounds=(_SPIN_SPEED, float('inf')),
margin=_SPIN_SPEED,
value_at_margin=0,
sigmoid='linear')
reward_dict = {
'stand': stand_reward,
'walk': stand_reward * (5*walk_reward + 1) / 6,
'run': stand_reward * (5*run_reward + 1) / 6,
'flip': flip_reward
}
return reward_dict
else:
return stand_reward * (5 * move_reward + 1) / 6 | cascade-main | dreamerv2/common/cdmc/walker.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from .walker import make_walker
from .cheetah import make_cheetah
def make_dmc_all(domain, task,
task_kwargs=None,
environment_kwargs=None,
visualize_reward=False):
if domain == 'walker':
return make_walker(task,
task_kwargs=task_kwargs,
environment_kwargs=environment_kwargs,
visualize_reward=visualize_reward)
elif domain == 'cheetah':
return make_cheetah(task,
task_kwargs=task_kwargs,
environment_kwargs=environment_kwargs,
visualize_reward=visualize_reward)
DMC_TASK_IDS = {
'dmc_walker_all': ['stand', 'walk', 'run', 'flip'],
'dmc_cheetah_all': ['run-fwd', 'run-bwd', 'flip-fwd', 'flip-bwd'],
} | cascade-main | dreamerv2/common/cdmc/__init__.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import collections
from dm_control import mujoco
from dm_control.rl import control
from dm_control.suite import base
from dm_control.suite import common
from dm_control.utils import containers
from dm_control.utils import rewards
# How long the simulation will run, in seconds.
_DEFAULT_TIME_LIMIT = 10
# Running speed above which reward is 1.
_RUN_SPEED = 10
_SPIN_SPEED = 5
SUITE = containers.TaggedTasks()
def make_cheetah(task,
task_kwargs=None,
environment_kwargs=None,
visualize_reward=False):
task_kwargs = task_kwargs or {}
if environment_kwargs is not None:
task_kwargs = task_kwargs.copy()
task_kwargs['environment_kwargs'] = environment_kwargs
env = SUITE[task](**task_kwargs)
env.task.visualize_reward = visualize_reward
return env
def get_model_and_assets():
"""Returns a tuple containing the model XML string and a dict of assets."""
return common.read_model('cheetah.xml'), common.ASSETS
@SUITE.add('benchmarking')
def run(time_limit=_DEFAULT_TIME_LIMIT, random=None, environment_kwargs=None):
"""Returns the run task."""
physics = Physics.from_xml_string(*get_model_and_assets())
task = Cheetah(forward=True,random=random)
environment_kwargs = environment_kwargs or {}
return control.Environment(physics, task, time_limit=time_limit,
**environment_kwargs)
@SUITE.add('benchmarking')
def run_back(time_limit=_DEFAULT_TIME_LIMIT, random=None, environment_kwargs=None):
"""Returns the run task."""
physics = Physics.from_xml_string(*get_model_and_assets())
task = Cheetah(forward=False,random=random)
environment_kwargs = environment_kwargs or {}
return control.Environment(physics, task, time_limit=time_limit,
**environment_kwargs)
@SUITE.add('benchmarking')
def flip_forward(time_limit=_DEFAULT_TIME_LIMIT, random=None, environment_kwargs=None):
"""Returns the run task."""
physics = Physics.from_xml_string(*get_model_and_assets())
task = Cheetah(forward=False,flip=True,random=random)
environment_kwargs = environment_kwargs or {}
return control.Environment(physics, task, time_limit=time_limit,
**environment_kwargs)
@SUITE.add('benchmarking')
def flip_backward(time_limit=_DEFAULT_TIME_LIMIT, random=None, environment_kwargs=None):
"""Returns the run task."""
physics = Physics.from_xml_string(*get_model_and_assets())
task = Cheetah(forward=True,flip=True,random=random)
environment_kwargs = environment_kwargs or {}
return control.Environment(physics, task, time_limit=time_limit,
**environment_kwargs)
@SUITE.add('benchmarking')
def all(time_limit=_DEFAULT_TIME_LIMIT,
random=None,
environment_kwargs=None):
"""Returns the Run task."""
physics = Physics.from_xml_string(*get_model_and_assets())
task = Cheetah(forward=True,flip=True,random=random,all=True)
environment_kwargs = environment_kwargs or {}
return control.Environment(physics,
task,
time_limit=time_limit,
**environment_kwargs)
class Physics(mujoco.Physics):
"""Physics simulation with additional features for the Cheetah domain."""
def speed(self):
"""Returns the horizontal speed of the Cheetah."""
return self.named.data.sensordata['torso_subtreelinvel'][0]
def angmomentum(self):
"""Returns the angular momentum of torso of the Cheetah about Y axis."""
return self.named.data.subtree_angmom['torso'][1]
class Cheetah(base.Task):
"""A `Task` to train a running Cheetah."""
def __init__(self, forward=True, flip=False, random=None, all=False):
self._forward = 1 if forward else -1
self._flip = flip
self._all = all
super(Cheetah, self).__init__(random=random)
def initialize_episode(self, physics):
"""Sets the state of the environment at the start of each episode."""
# The indexing below assumes that all joints have a single DOF.
assert physics.model.nq == physics.model.njnt
is_limited = physics.model.jnt_limited == 1
lower, upper = physics.model.jnt_range[is_limited].T
physics.data.qpos[is_limited] = self.random.uniform(lower, upper)
# Stabilize the model before the actual simulation.
for _ in range(200):
physics.step()
physics.data.time = 0
self._timeout_progress = 0
super(Cheetah, self).initialize_episode(physics)
def get_observation(self, physics):
"""Returns an observation of the state, ignoring horizontal position."""
obs = collections.OrderedDict()
# Ignores horizontal position to maintain translational invariance.
obs['position'] = physics.data.qpos[1:].copy()
obs['velocity'] = physics.velocity()
return obs
def get_reward(self, physics):
"""Returns a reward to the agent."""
if self._flip:
reward = rewards.tolerance(self._forward*physics.angmomentum(),
bounds=(_SPIN_SPEED, float('inf')),
margin=_SPIN_SPEED,
value_at_margin=0,
sigmoid='linear')
else:
reward = rewards.tolerance(self._forward*physics.speed(),
bounds=(_RUN_SPEED, float('inf')),
margin=_RUN_SPEED,
value_at_margin=0,
sigmoid='linear')
if self._all:
flip_fwd = rewards.tolerance(1*physics.angmomentum(),
bounds=(_SPIN_SPEED, float('inf')),
margin=_SPIN_SPEED,
value_at_margin=0,
sigmoid='linear')
flip_bwd = rewards.tolerance(-1*physics.angmomentum(),
bounds=(_SPIN_SPEED, float('inf')),
margin=_SPIN_SPEED,
value_at_margin=0,
sigmoid='linear')
run_fwd = rewards.tolerance(1*physics.speed(),
bounds=(_RUN_SPEED, float('inf')),
margin=_RUN_SPEED,
value_at_margin=0,
sigmoid='linear')
run_bwd = rewards.tolerance(-1*physics.speed(),
bounds=(_RUN_SPEED, float('inf')),
margin=_RUN_SPEED,
value_at_margin=0,
sigmoid='linear')
reward = {
'run-fwd': run_fwd,
'run-bwd': run_bwd,
'flip-fwd': flip_fwd,
'flip-bwd': flip_bwd
}
return reward | cascade-main | dreamerv2/common/cdmc/cheetah.py |
#!/usr/bin/env python
#
# Copyright 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""A script to prepare version informtion for use the gtest Info.plist file.
This script extracts the version information from the configure.ac file and
uses it to generate a header file containing the same information. The
#defines in this header file will be included in during the generation of
the Info.plist of the framework, giving the correct value to the version
shown in the Finder.
This script makes the following assumptions (these are faults of the script,
not problems with the Autoconf):
1. The AC_INIT macro will be contained within the first 1024 characters
of configure.ac
2. The version string will be 3 integers separated by periods and will be
surrounded by squre brackets, "[" and "]" (e.g. [1.0.1]). The first
segment represents the major version, the second represents the minor
version and the third represents the fix version.
3. No ")" character exists between the opening "(" and closing ")" of
AC_INIT, including in comments and character strings.
"""
import sys
import re
# Read the command line argument (the output directory for Version.h)
if (len(sys.argv) < 3):
print "Usage: versiongenerate.py input_dir output_dir"
sys.exit(1)
else:
input_dir = sys.argv[1]
output_dir = sys.argv[2]
# Read the first 1024 characters of the configure.ac file
config_file = open("%s/configure.ac" % input_dir, 'r')
buffer_size = 1024
opening_string = config_file.read(buffer_size)
config_file.close()
# Extract the version string from the AC_INIT macro
# The following init_expression means:
# Extract three integers separated by periods and surrounded by squre
# brackets(e.g. "[1.0.1]") between "AC_INIT(" and ")". Do not be greedy
# (*? is the non-greedy flag) since that would pull in everything between
# the first "(" and the last ")" in the file.
version_expression = re.compile(r"AC_INIT\(.*?\[(\d+)\.(\d+)\.(\d+)\].*?\)",
re.DOTALL)
version_values = version_expression.search(opening_string)
major_version = version_values.group(1)
minor_version = version_values.group(2)
fix_version = version_values.group(3)
# Write the version information to a header file to be included in the
# Info.plist file.
file_data = """//
// DO NOT MODIFY THIS FILE (but you can delete it)
//
// This file is autogenerated by the versiongenerate.py script. This script
// is executed in a "Run Script" build phase when creating gtest.framework. This
// header file is not used during compilation of C-source. Rather, it simply
// defines some version strings for substitution in the Info.plist. Because of
// this, we are not not restricted to C-syntax nor are we using include guards.
//
#define GTEST_VERSIONINFO_SHORT %s.%s
#define GTEST_VERSIONINFO_LONG %s.%s.%s
""" % (major_version, minor_version, major_version, minor_version, fix_version)
version_file = open("%s/Version.h" % output_dir, 'w')
version_file.write(file_data)
version_file.close()
| dimmwitted-master | lib/gtest-1.7.0/xcode/Scripts/versiongenerate.py |
#!/usr/bin/env python
#
# Copyright 2006, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test for the gtest_xml_output module"""
__author__ = '[email protected] (Sean Mcafee)'
import datetime
import errno
import os
import re
import sys
from xml.dom import minidom, Node
import gtest_test_utils
import gtest_xml_test_utils
GTEST_FILTER_FLAG = '--gtest_filter'
GTEST_LIST_TESTS_FLAG = '--gtest_list_tests'
GTEST_OUTPUT_FLAG = "--gtest_output"
GTEST_DEFAULT_OUTPUT_FILE = "test_detail.xml"
GTEST_PROGRAM_NAME = "gtest_xml_output_unittest_"
SUPPORTS_STACK_TRACES = False
if SUPPORTS_STACK_TRACES:
STACK_TRACE_TEMPLATE = '\nStack trace:\n*'
else:
STACK_TRACE_TEMPLATE = ''
EXPECTED_NON_EMPTY_XML = """<?xml version="1.0" encoding="UTF-8"?>
<testsuites tests="23" failures="4" disabled="2" errors="0" time="*" timestamp="*" name="AllTests" ad_hoc_property="42">
<testsuite name="SuccessfulTest" tests="1" failures="0" disabled="0" errors="0" time="*">
<testcase name="Succeeds" status="run" time="*" classname="SuccessfulTest"/>
</testsuite>
<testsuite name="FailedTest" tests="1" failures="1" disabled="0" errors="0" time="*">
<testcase name="Fails" status="run" time="*" classname="FailedTest">
<failure message="gtest_xml_output_unittest_.cc:*
Value of: 2
Expected: 1" type=""><![CDATA[gtest_xml_output_unittest_.cc:*
Value of: 2
Expected: 1%(stack)s]]></failure>
</testcase>
</testsuite>
<testsuite name="MixedResultTest" tests="3" failures="1" disabled="1" errors="0" time="*">
<testcase name="Succeeds" status="run" time="*" classname="MixedResultTest"/>
<testcase name="Fails" status="run" time="*" classname="MixedResultTest">
<failure message="gtest_xml_output_unittest_.cc:*
Value of: 2
Expected: 1" type=""><![CDATA[gtest_xml_output_unittest_.cc:*
Value of: 2
Expected: 1%(stack)s]]></failure>
<failure message="gtest_xml_output_unittest_.cc:*
Value of: 3
Expected: 2" type=""><![CDATA[gtest_xml_output_unittest_.cc:*
Value of: 3
Expected: 2%(stack)s]]></failure>
</testcase>
<testcase name="DISABLED_test" status="notrun" time="*" classname="MixedResultTest"/>
</testsuite>
<testsuite name="XmlQuotingTest" tests="1" failures="1" disabled="0" errors="0" time="*">
<testcase name="OutputsCData" status="run" time="*" classname="XmlQuotingTest">
<failure message="gtest_xml_output_unittest_.cc:*
Failed
XML output: <?xml encoding="utf-8"><top><![CDATA[cdata text]]></top>" type=""><![CDATA[gtest_xml_output_unittest_.cc:*
Failed
XML output: <?xml encoding="utf-8"><top><![CDATA[cdata text]]>]]><![CDATA[</top>%(stack)s]]></failure>
</testcase>
</testsuite>
<testsuite name="InvalidCharactersTest" tests="1" failures="1" disabled="0" errors="0" time="*">
<testcase name="InvalidCharactersInMessage" status="run" time="*" classname="InvalidCharactersTest">
<failure message="gtest_xml_output_unittest_.cc:*
Failed
Invalid characters in brackets []" type=""><![CDATA[gtest_xml_output_unittest_.cc:*
Failed
Invalid characters in brackets []%(stack)s]]></failure>
</testcase>
</testsuite>
<testsuite name="DisabledTest" tests="1" failures="0" disabled="1" errors="0" time="*">
<testcase name="DISABLED_test_not_run" status="notrun" time="*" classname="DisabledTest"/>
</testsuite>
<testsuite name="PropertyRecordingTest" tests="4" failures="0" disabled="0" errors="0" time="*" SetUpTestCase="yes" TearDownTestCase="aye">
<testcase name="OneProperty" status="run" time="*" classname="PropertyRecordingTest" key_1="1"/>
<testcase name="IntValuedProperty" status="run" time="*" classname="PropertyRecordingTest" key_int="1"/>
<testcase name="ThreeProperties" status="run" time="*" classname="PropertyRecordingTest" key_1="1" key_2="2" key_3="3"/>
<testcase name="TwoValuesForOneKeyUsesLastValue" status="run" time="*" classname="PropertyRecordingTest" key_1="2"/>
</testsuite>
<testsuite name="NoFixtureTest" tests="3" failures="0" disabled="0" errors="0" time="*">
<testcase name="RecordProperty" status="run" time="*" classname="NoFixtureTest" key="1"/>
<testcase name="ExternalUtilityThatCallsRecordIntValuedProperty" status="run" time="*" classname="NoFixtureTest" key_for_utility_int="1"/>
<testcase name="ExternalUtilityThatCallsRecordStringValuedProperty" status="run" time="*" classname="NoFixtureTest" key_for_utility_string="1"/>
</testsuite>
<testsuite name="Single/ValueParamTest" tests="4" failures="0" disabled="0" errors="0" time="*">
<testcase name="HasValueParamAttribute/0" value_param="33" status="run" time="*" classname="Single/ValueParamTest" />
<testcase name="HasValueParamAttribute/1" value_param="42" status="run" time="*" classname="Single/ValueParamTest" />
<testcase name="AnotherTestThatHasValueParamAttribute/0" value_param="33" status="run" time="*" classname="Single/ValueParamTest" />
<testcase name="AnotherTestThatHasValueParamAttribute/1" value_param="42" status="run" time="*" classname="Single/ValueParamTest" />
</testsuite>
<testsuite name="TypedTest/0" tests="1" failures="0" disabled="0" errors="0" time="*">
<testcase name="HasTypeParamAttribute" type_param="*" status="run" time="*" classname="TypedTest/0" />
</testsuite>
<testsuite name="TypedTest/1" tests="1" failures="0" disabled="0" errors="0" time="*">
<testcase name="HasTypeParamAttribute" type_param="*" status="run" time="*" classname="TypedTest/1" />
</testsuite>
<testsuite name="Single/TypeParameterizedTestCase/0" tests="1" failures="0" disabled="0" errors="0" time="*">
<testcase name="HasTypeParamAttribute" type_param="*" status="run" time="*" classname="Single/TypeParameterizedTestCase/0" />
</testsuite>
<testsuite name="Single/TypeParameterizedTestCase/1" tests="1" failures="0" disabled="0" errors="0" time="*">
<testcase name="HasTypeParamAttribute" type_param="*" status="run" time="*" classname="Single/TypeParameterizedTestCase/1" />
</testsuite>
</testsuites>""" % {'stack': STACK_TRACE_TEMPLATE}
EXPECTED_FILTERED_TEST_XML = """<?xml version="1.0" encoding="UTF-8"?>
<testsuites tests="1" failures="0" disabled="0" errors="0" time="*"
timestamp="*" name="AllTests" ad_hoc_property="42">
<testsuite name="SuccessfulTest" tests="1" failures="0" disabled="0"
errors="0" time="*">
<testcase name="Succeeds" status="run" time="*" classname="SuccessfulTest"/>
</testsuite>
</testsuites>"""
EXPECTED_EMPTY_XML = """<?xml version="1.0" encoding="UTF-8"?>
<testsuites tests="0" failures="0" disabled="0" errors="0" time="*"
timestamp="*" name="AllTests">
</testsuites>"""
GTEST_PROGRAM_PATH = gtest_test_utils.GetTestExecutablePath(GTEST_PROGRAM_NAME)
SUPPORTS_TYPED_TESTS = 'TypedTest' in gtest_test_utils.Subprocess(
[GTEST_PROGRAM_PATH, GTEST_LIST_TESTS_FLAG], capture_stderr=False).output
class GTestXMLOutputUnitTest(gtest_xml_test_utils.GTestXMLTestCase):
"""
Unit test for Google Test's XML output functionality.
"""
# This test currently breaks on platforms that do not support typed and
# type-parameterized tests, so we don't run it under them.
if SUPPORTS_TYPED_TESTS:
def testNonEmptyXmlOutput(self):
"""
Runs a test program that generates a non-empty XML output, and
tests that the XML output is expected.
"""
self._TestXmlOutput(GTEST_PROGRAM_NAME, EXPECTED_NON_EMPTY_XML, 1)
def testEmptyXmlOutput(self):
"""Verifies XML output for a Google Test binary without actual tests.
Runs a test program that generates an empty XML output, and
tests that the XML output is expected.
"""
self._TestXmlOutput('gtest_no_test_unittest', EXPECTED_EMPTY_XML, 0)
def testTimestampValue(self):
"""Checks whether the timestamp attribute in the XML output is valid.
Runs a test program that generates an empty XML output, and checks if
the timestamp attribute in the testsuites tag is valid.
"""
actual = self._GetXmlOutput('gtest_no_test_unittest', [], 0)
date_time_str = actual.documentElement.getAttributeNode('timestamp').value
# datetime.strptime() is only available in Python 2.5+ so we have to
# parse the expected datetime manually.
match = re.match(r'(\d+)-(\d\d)-(\d\d)T(\d\d):(\d\d):(\d\d)', date_time_str)
self.assertTrue(
re.match,
'XML datettime string %s has incorrect format' % date_time_str)
date_time_from_xml = datetime.datetime(
year=int(match.group(1)), month=int(match.group(2)),
day=int(match.group(3)), hour=int(match.group(4)),
minute=int(match.group(5)), second=int(match.group(6)))
time_delta = abs(datetime.datetime.now() - date_time_from_xml)
# timestamp value should be near the current local time
self.assertTrue(time_delta < datetime.timedelta(seconds=600),
'time_delta is %s' % time_delta)
actual.unlink()
def testDefaultOutputFile(self):
"""
Confirms that Google Test produces an XML output file with the expected
default name if no name is explicitly specified.
"""
output_file = os.path.join(gtest_test_utils.GetTempDir(),
GTEST_DEFAULT_OUTPUT_FILE)
gtest_prog_path = gtest_test_utils.GetTestExecutablePath(
'gtest_no_test_unittest')
try:
os.remove(output_file)
except OSError, e:
if e.errno != errno.ENOENT:
raise
p = gtest_test_utils.Subprocess(
[gtest_prog_path, '%s=xml' % GTEST_OUTPUT_FLAG],
working_dir=gtest_test_utils.GetTempDir())
self.assert_(p.exited)
self.assertEquals(0, p.exit_code)
self.assert_(os.path.isfile(output_file))
def testSuppressedXmlOutput(self):
"""
Tests that no XML file is generated if the default XML listener is
shut down before RUN_ALL_TESTS is invoked.
"""
xml_path = os.path.join(gtest_test_utils.GetTempDir(),
GTEST_PROGRAM_NAME + 'out.xml')
if os.path.isfile(xml_path):
os.remove(xml_path)
command = [GTEST_PROGRAM_PATH,
'%s=xml:%s' % (GTEST_OUTPUT_FLAG, xml_path),
'--shut_down_xml']
p = gtest_test_utils.Subprocess(command)
if p.terminated_by_signal:
# p.signal is avalable only if p.terminated_by_signal is True.
self.assertFalse(
p.terminated_by_signal,
'%s was killed by signal %d' % (GTEST_PROGRAM_NAME, p.signal))
else:
self.assert_(p.exited)
self.assertEquals(1, p.exit_code,
"'%s' exited with code %s, which doesn't match "
'the expected exit code %s.'
% (command, p.exit_code, 1))
self.assert_(not os.path.isfile(xml_path))
def testFilteredTestXmlOutput(self):
"""Verifies XML output when a filter is applied.
Runs a test program that executes only some tests and verifies that
non-selected tests do not show up in the XML output.
"""
self._TestXmlOutput(GTEST_PROGRAM_NAME, EXPECTED_FILTERED_TEST_XML, 0,
extra_args=['%s=SuccessfulTest.*' % GTEST_FILTER_FLAG])
def _GetXmlOutput(self, gtest_prog_name, extra_args, expected_exit_code):
"""
Returns the xml output generated by running the program gtest_prog_name.
Furthermore, the program's exit code must be expected_exit_code.
"""
xml_path = os.path.join(gtest_test_utils.GetTempDir(),
gtest_prog_name + 'out.xml')
gtest_prog_path = gtest_test_utils.GetTestExecutablePath(gtest_prog_name)
command = ([gtest_prog_path, '%s=xml:%s' % (GTEST_OUTPUT_FLAG, xml_path)] +
extra_args)
p = gtest_test_utils.Subprocess(command)
if p.terminated_by_signal:
self.assert_(False,
'%s was killed by signal %d' % (gtest_prog_name, p.signal))
else:
self.assert_(p.exited)
self.assertEquals(expected_exit_code, p.exit_code,
"'%s' exited with code %s, which doesn't match "
'the expected exit code %s.'
% (command, p.exit_code, expected_exit_code))
actual = minidom.parse(xml_path)
return actual
def _TestXmlOutput(self, gtest_prog_name, expected_xml,
expected_exit_code, extra_args=None):
"""
Asserts that the XML document generated by running the program
gtest_prog_name matches expected_xml, a string containing another
XML document. Furthermore, the program's exit code must be
expected_exit_code.
"""
actual = self._GetXmlOutput(gtest_prog_name, extra_args or [],
expected_exit_code)
expected = minidom.parseString(expected_xml)
self.NormalizeXml(actual.documentElement)
self.AssertEquivalentNodes(expected.documentElement,
actual.documentElement)
expected.unlink()
actual.unlink()
if __name__ == '__main__':
os.environ['GTEST_STACK_TRACE_DEPTH'] = '1'
gtest_test_utils.Main()
| dimmwitted-master | lib/gtest-1.7.0/test/gtest_xml_output_unittest.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.