python_code
stringlengths 0
679k
| repo_name
stringlengths 9
41
| file_path
stringlengths 6
149
|
---|---|---|
# Copyright 2020 Google Research. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# Visualization library is mostly based on TensorFlow object detection API:
# https://github.com/tensorflow/models/tree/master/research/object_detection
| DeepLearningExamples-master | TensorFlow2/Detection/Efficientdet/visualize/__init__.py |
# Copyright 2020 Google Research. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains classes specifying naming conventions used for object detection.
Specifies:
InputDataFields: standard fields used by reader/preprocessor/batcher.
DetectionResultFields: standard fields returned by object detector.
BoxListFields: standard field used by BoxList
TfExampleFields: standard fields for tf-example data format (go/tf-example).
"""
class InputDataFields(object):
"""Names for the input tensors.
Holds the standard data field names to use for identifying input tensors. This
should be used by the decoder to identify keys for the returned tensor_dict
containing input tensors. And it should be used by the model to identify the
tensors it needs.
Attributes:
image: image.
image_additional_channels: additional channels.
original_image: image in the original input size.
original_image_spatial_shape: image in the original input size.
key: unique key corresponding to image.
source_id: source of the original image.
filename: original filename of the dataset (without common path).
groundtruth_image_classes: image-level class labels.
groundtruth_image_confidences: image-level class confidences.
groundtruth_boxes: coordinates of the ground truth boxes in the image.
groundtruth_classes: box-level class labels.
groundtruth_confidences: box-level class confidences. The shape should be
the same as the shape of groundtruth_classes.
groundtruth_label_types: box-level label types (e.g. explicit negative).
groundtruth_is_crowd: [DEPRECATED, use groundtruth_group_of instead]
is the groundtruth a single object or a crowd.
groundtruth_area: area of a groundtruth segment.
groundtruth_difficult: is a `difficult` object
groundtruth_group_of: is a `group_of` objects, e.g. multiple objects of the
same class, forming a connected group, where instances are heavily
occluding each other.
proposal_boxes: coordinates of object proposal boxes.
proposal_objectness: objectness score of each proposal.
groundtruth_instance_masks: ground truth instance masks.
groundtruth_instance_boundaries: ground truth instance boundaries.
groundtruth_instance_classes: instance mask-level class labels.
groundtruth_keypoints: ground truth keypoints.
groundtruth_keypoint_visibilities: ground truth keypoint visibilities.
groundtruth_keypoint_weights: groundtruth weight factor for keypoints.
groundtruth_label_weights: groundtruth label weights.
groundtruth_weights: groundtruth weight factor for bounding boxes.
num_groundtruth_boxes: number of groundtruth boxes.
is_annotated: whether an image has been labeled or not.
true_image_shapes: true shapes of images in the resized images, as resized
images can be padded with zeros.
multiclass_scores: the label score per class for each box.
context_features: a flattened list of contextual features.
context_feature_length: the fixed length of each feature in
context_features, used for reshaping.
valid_context_size: the valid context size, used in filtering the padded
context features.
"""
image = 'image'
image_additional_channels = 'image_additional_channels'
original_image = 'original_image'
original_image_spatial_shape = 'original_image_spatial_shape'
key = 'key'
source_id = 'source_id'
filename = 'filename'
groundtruth_image_classes = 'groundtruth_image_classes'
groundtruth_image_confidences = 'groundtruth_image_confidences'
groundtruth_boxes = 'groundtruth_boxes'
groundtruth_classes = 'groundtruth_classes'
groundtruth_confidences = 'groundtruth_confidences'
groundtruth_label_types = 'groundtruth_label_types'
groundtruth_is_crowd = 'groundtruth_is_crowd'
groundtruth_area = 'groundtruth_area'
groundtruth_difficult = 'groundtruth_difficult'
groundtruth_group_of = 'groundtruth_group_of'
proposal_boxes = 'proposal_boxes'
proposal_objectness = 'proposal_objectness'
groundtruth_instance_masks = 'groundtruth_instance_masks'
groundtruth_instance_boundaries = 'groundtruth_instance_boundaries'
groundtruth_instance_classes = 'groundtruth_instance_classes'
groundtruth_keypoints = 'groundtruth_keypoints'
groundtruth_keypoint_visibilities = 'groundtruth_keypoint_visibilities'
groundtruth_keypoint_weights = 'groundtruth_keypoint_weights'
groundtruth_label_weights = 'groundtruth_label_weights'
groundtruth_weights = 'groundtruth_weights'
num_groundtruth_boxes = 'num_groundtruth_boxes'
is_annotated = 'is_annotated'
true_image_shape = 'true_image_shape'
multiclass_scores = 'multiclass_scores'
context_features = 'context_features'
context_feature_length = 'context_feature_length'
valid_context_size = 'valid_context_size'
class DetectionResultFields(object):
"""Naming conventions for storing the output of the detector.
Attributes:
source_id: source of the original image.
key: unique key corresponding to image.
detection_boxes: coordinates of the detection boxes in the image.
detection_scores: detection scores for the detection boxes in the image.
detection_multiclass_scores: class score distribution (including background)
for detection boxes in the image including background class.
detection_classes: detection-level class labels.
detection_masks: contains a segmentation mask for each detection box.
detection_boundaries: contains an object boundary for each detection box.
detection_keypoints: contains detection keypoints for each detection box.
detection_keypoint_scores: contains detection keypoint scores.
num_detections: number of detections in the batch.
raw_detection_boxes: contains decoded detection boxes without Non-Max
suppression.
raw_detection_scores: contains class score logits for raw detection boxes.
detection_anchor_indices: The anchor indices of the detections after NMS.
detection_features: contains extracted features for each detected box
after NMS.
"""
source_id = 'source_id'
key = 'key'
detection_boxes = 'detection_boxes'
detection_scores = 'detection_scores'
detection_multiclass_scores = 'detection_multiclass_scores'
detection_features = 'detection_features'
detection_classes = 'detection_classes'
detection_masks = 'detection_masks'
detection_boundaries = 'detection_boundaries'
detection_keypoints = 'detection_keypoints'
detection_keypoint_scores = 'detection_keypoint_scores'
num_detections = 'num_detections'
raw_detection_boxes = 'raw_detection_boxes'
raw_detection_scores = 'raw_detection_scores'
detection_anchor_indices = 'detection_anchor_indices'
class BoxListFields(object):
"""Naming conventions for BoxLists.
Attributes:
boxes: bounding box coordinates.
classes: classes per bounding box.
scores: scores per bounding box.
weights: sample weights per bounding box.
objectness: objectness score per bounding box.
masks: masks per bounding box.
boundaries: boundaries per bounding box.
keypoints: keypoints per bounding box.
keypoint_heatmaps: keypoint heatmaps per bounding box.
is_crowd: is_crowd annotation per bounding box.
"""
boxes = 'boxes'
classes = 'classes'
scores = 'scores'
weights = 'weights'
confidences = 'confidences'
objectness = 'objectness'
masks = 'masks'
boundaries = 'boundaries'
keypoints = 'keypoints'
keypoint_heatmaps = 'keypoint_heatmaps'
is_crowd = 'is_crowd'
class PredictionFields(object):
"""Naming conventions for standardized prediction outputs.
Attributes:
feature_maps: List of feature maps for prediction.
anchors: Generated anchors.
raw_detection_boxes: Decoded detection boxes without NMS.
raw_detection_feature_map_indices: Feature map indices from which each raw
detection box was produced.
"""
feature_maps = 'feature_maps'
anchors = 'anchors'
raw_detection_boxes = 'raw_detection_boxes'
raw_detection_feature_map_indices = 'raw_detection_feature_map_indices'
class TfExampleFields(object):
"""TF-example proto feature names for object detection.
Holds the standard feature names to load from an Example proto for object
detection.
Attributes:
image_encoded: JPEG encoded string
image_format: image format, e.g. "JPEG"
filename: filename
channels: number of channels of image
colorspace: colorspace, e.g. "RGB"
height: height of image in pixels, e.g. 462
width: width of image in pixels, e.g. 581
source_id: original source of the image
image_class_text: image-level label in text format
image_class_label: image-level label in numerical format
object_class_text: labels in text format, e.g. ["person", "cat"]
object_class_label: labels in numbers, e.g. [16, 8]
object_bbox_xmin: xmin coordinates of groundtruth box, e.g. 10, 30
object_bbox_xmax: xmax coordinates of groundtruth box, e.g. 50, 40
object_bbox_ymin: ymin coordinates of groundtruth box, e.g. 40, 50
object_bbox_ymax: ymax coordinates of groundtruth box, e.g. 80, 70
object_view: viewpoint of object, e.g. ["frontal", "left"]
object_truncated: is object truncated, e.g. [true, false]
object_occluded: is object occluded, e.g. [true, false]
object_difficult: is object difficult, e.g. [true, false]
object_group_of: is object a single object or a group of objects
object_depiction: is object a depiction
object_is_crowd: [DEPRECATED, use object_group_of instead]
is the object a single object or a crowd
object_segment_area: the area of the segment.
object_weight: a weight factor for the object's bounding box.
instance_masks: instance segmentation masks.
instance_boundaries: instance boundaries.
instance_classes: Classes for each instance segmentation mask.
detection_class_label: class label in numbers.
detection_bbox_ymin: ymin coordinates of a detection box.
detection_bbox_xmin: xmin coordinates of a detection box.
detection_bbox_ymax: ymax coordinates of a detection box.
detection_bbox_xmax: xmax coordinates of a detection box.
detection_score: detection score for the class label and box.
"""
image_encoded = 'image/encoded'
image_format = 'image/format' # format is reserved keyword
filename = 'image/filename'
channels = 'image/channels'
colorspace = 'image/colorspace'
height = 'image/height'
width = 'image/width'
source_id = 'image/source_id'
image_class_text = 'image/class/text'
image_class_label = 'image/class/label'
object_class_text = 'image/object/class/text'
object_class_label = 'image/object/class/label'
object_bbox_ymin = 'image/object/bbox/ymin'
object_bbox_xmin = 'image/object/bbox/xmin'
object_bbox_ymax = 'image/object/bbox/ymax'
object_bbox_xmax = 'image/object/bbox/xmax'
object_view = 'image/object/view'
object_truncated = 'image/object/truncated'
object_occluded = 'image/object/occluded'
object_difficult = 'image/object/difficult'
object_group_of = 'image/object/group_of'
object_depiction = 'image/object/depiction'
object_is_crowd = 'image/object/is_crowd'
object_segment_area = 'image/object/segment/area'
object_weight = 'image/object/weight'
instance_masks = 'image/segmentation/object'
instance_boundaries = 'image/boundaries/object'
instance_classes = 'image/segmentation/object/class'
detection_class_label = 'image/detection/label'
detection_bbox_ymin = 'image/detection/bbox/ymin'
detection_bbox_xmin = 'image/detection/bbox/xmin'
detection_bbox_ymax = 'image/detection/bbox/ymax'
detection_bbox_xmax = 'image/detection/bbox/xmax'
detection_score = 'image/detection/score'
| DeepLearningExamples-master | TensorFlow2/Detection/Efficientdet/visualize/standard_fields.py |
# Copyright 2020 Google Research. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A set of functions that are used for visualization.
These functions often receive an image, perform some visualization on the image.
The functions do not return a value, instead they modify the image itself.
"""
import abc
import collections
import matplotlib
matplotlib.use('Agg') # Set headless-friendly backend.
import matplotlib.pyplot as plt # pylint: disable=g-import-not-at-top
import numpy as np
import PIL.Image as Image
import PIL.ImageColor as ImageColor
import PIL.ImageDraw as ImageDraw
import PIL.ImageFont as ImageFont
import six
from six.moves import range
from six.moves import zip
import tensorflow.compat.v1 as tf
from visualize import shape_utils
from visualize import standard_fields as fields
_TITLE_LEFT_MARGIN = 10
_TITLE_TOP_MARGIN = 10
STANDARD_COLORS = [
'AliceBlue', 'Chartreuse', 'Aqua', 'Aquamarine', 'Azure', 'Beige', 'Bisque',
'BlanchedAlmond', 'BlueViolet', 'BurlyWood', 'CadetBlue', 'AntiqueWhite',
'Chocolate', 'Coral', 'CornflowerBlue', 'Cornsilk', 'Crimson', 'Cyan',
'DarkCyan', 'DarkGoldenRod', 'DarkGrey', 'DarkKhaki', 'DarkOrange',
'DarkOrchid', 'DarkSalmon', 'DarkSeaGreen', 'DarkTurquoise', 'DarkViolet',
'DeepPink', 'DeepSkyBlue', 'DodgerBlue', 'FireBrick', 'FloralWhite',
'ForestGreen', 'Fuchsia', 'Gainsboro', 'GhostWhite', 'Gold', 'GoldenRod',
'Salmon', 'Tan', 'HoneyDew', 'HotPink', 'IndianRed', 'Ivory', 'Khaki',
'Lavender', 'LavenderBlush', 'LawnGreen', 'LemonChiffon', 'LightBlue',
'LightCoral', 'LightCyan', 'LightGoldenRodYellow', 'LightGray', 'LightGrey',
'LightGreen', 'LightPink', 'LightSalmon', 'LightSeaGreen', 'LightSkyBlue',
'LightSlateGray', 'LightSlateGrey', 'LightSteelBlue', 'LightYellow', 'Lime',
'LimeGreen', 'Linen', 'Magenta', 'MediumAquaMarine', 'MediumOrchid',
'MediumPurple', 'MediumSeaGreen', 'MediumSlateBlue', 'MediumSpringGreen',
'MediumTurquoise', 'MediumVioletRed', 'MintCream', 'MistyRose', 'Moccasin',
'NavajoWhite', 'OldLace', 'Olive', 'OliveDrab', 'Orange', 'OrangeRed',
'Orchid', 'PaleGoldenRod', 'PaleGreen', 'PaleTurquoise', 'PaleVioletRed',
'PapayaWhip', 'PeachPuff', 'Peru', 'Pink', 'Plum', 'PowderBlue', 'Purple',
'Red', 'RosyBrown', 'RoyalBlue', 'SaddleBrown', 'Green', 'SandyBrown',
'SeaGreen', 'SeaShell', 'Sienna', 'Silver', 'SkyBlue', 'SlateBlue',
'SlateGray', 'SlateGrey', 'Snow', 'SpringGreen', 'SteelBlue', 'GreenYellow',
'Teal', 'Thistle', 'Tomato', 'Turquoise', 'Violet', 'Wheat', 'White',
'WhiteSmoke', 'Yellow', 'YellowGreen'
]
def _get_multiplier_for_color_randomness():
"""Returns a multiplier to get semi-random colors from successive indices.
This function computes a prime number, p, in the range [2, 17] that:
- is closest to len(STANDARD_COLORS) / 10
- does not divide len(STANDARD_COLORS)
If no prime numbers in that range satisfy the constraints, p is returned as 1.
Once p is established, it can be used as a multiplier to select
non-consecutive colors from STANDARD_COLORS:
colors = [(p * i) % len(STANDARD_COLORS) for i in range(20)]
"""
num_colors = len(STANDARD_COLORS)
prime_candidates = [5, 7, 11, 13, 17]
# Remove all prime candidates that divide the number of colors.
prime_candidates = [p for p in prime_candidates if num_colors % p]
if not prime_candidates:
return 1
# Return the closest prime number to num_colors / 10.
abs_distance = [np.abs(num_colors / 10. - p) for p in prime_candidates]
num_candidates = len(abs_distance)
inds = [i for _, i in sorted(zip(abs_distance, range(num_candidates)))]
return prime_candidates[inds[0]]
def save_image_array_as_png(image, output_path):
"""Saves an image (represented as a numpy array) to PNG.
Args:
image: a numpy array with shape [height, width, 3].
output_path: path to which image should be written.
"""
image_pil = Image.fromarray(np.uint8(image)).convert('RGB')
with tf.gfile.Open(output_path, 'w') as fid:
image_pil.save(fid, 'PNG')
def encode_image_array_as_png_str(image):
"""Encodes a numpy array into a PNG string.
Args:
image: a numpy array with shape [height, width, 3].
Returns:
PNG encoded image string.
"""
image_pil = Image.fromarray(np.uint8(image))
output = six.BytesIO()
image_pil.save(output, format='PNG')
png_string = output.getvalue()
output.close()
return png_string
def draw_bounding_box_on_image_array(image,
ymin,
xmin,
ymax,
xmax,
color='red',
thickness=4,
display_str_list=(),
use_normalized_coordinates=True):
"""Adds a bounding box to an image (numpy array).
Bounding box coordinates can be specified in either absolute (pixel) or
normalized coordinates by setting the use_normalized_coordinates argument.
Args:
image: a numpy array with shape [height, width, 3].
ymin: ymin of bounding box.
xmin: xmin of bounding box.
ymax: ymax of bounding box.
xmax: xmax of bounding box.
color: color to draw bounding box. Default is red.
thickness: line thickness. Default value is 4.
display_str_list: list of strings to display in box (each to be shown on its
own line).
use_normalized_coordinates: If True (default), treat coordinates ymin, xmin,
ymax, xmax as relative to the image. Otherwise treat coordinates as
absolute.
"""
image_pil = Image.fromarray(np.uint8(image)).convert('RGB')
draw_bounding_box_on_image(image_pil, ymin, xmin, ymax, xmax, color,
thickness, display_str_list,
use_normalized_coordinates)
np.copyto(image, np.array(image_pil))
def draw_bounding_box_on_image(image,
ymin,
xmin,
ymax,
xmax,
color='red',
thickness=4,
display_str_list=(),
use_normalized_coordinates=True):
"""Adds a bounding box to an image.
Bounding box coordinates can be specified in either absolute (pixel) or
normalized coordinates by setting the use_normalized_coordinates argument.
Each string in display_str_list is displayed on a separate line above the
bounding box in black text on a rectangle filled with the input 'color'.
If the top of the bounding box extends to the edge of the image, the strings
are displayed below the bounding box.
Args:
image: a PIL.Image object.
ymin: ymin of bounding box.
xmin: xmin of bounding box.
ymax: ymax of bounding box.
xmax: xmax of bounding box.
color: color to draw bounding box. Default is red.
thickness: line thickness. Default value is 4.
display_str_list: list of strings to display in box (each to be shown on its
own line).
use_normalized_coordinates: If True (default), treat coordinates ymin, xmin,
ymax, xmax as relative to the image. Otherwise treat coordinates as
absolute.
"""
draw = ImageDraw.Draw(image)
im_width, im_height = image.size
if use_normalized_coordinates:
(left, right, top, bottom) = (xmin * im_width, xmax * im_width,
ymin * im_height, ymax * im_height)
else:
(left, right, top, bottom) = (xmin, xmax, ymin, ymax)
if thickness > 0:
draw.line([(left, top), (left, bottom), (right, bottom), (right, top),
(left, top)],
width=thickness,
fill=color)
try:
font = ImageFont.truetype('arial.ttf', 24)
except IOError:
font = ImageFont.load_default()
# If the total height of the display strings added to the top of the bounding
# box exceeds the top of the image, stack the strings below the bounding box
# instead of above.
display_str_heights = [font.getsize(ds)[1] for ds in display_str_list]
# Each display_str has a top and bottom margin of 0.05x.
total_display_str_height = (1 + 2 * 0.05) * sum(display_str_heights)
if top > total_display_str_height:
text_bottom = top
else:
text_bottom = bottom + total_display_str_height
# Reverse list and print from bottom to top.
for display_str in display_str_list[::-1]:
text_width, text_height = font.getsize(display_str)
margin = np.ceil(0.05 * text_height)
draw.rectangle([(left, text_bottom - text_height - 2 * margin),
(left + text_width, text_bottom)],
fill=color)
draw.text((left + margin, text_bottom - text_height - margin),
display_str,
fill='black',
font=font)
text_bottom -= text_height - 2 * margin
def draw_bounding_boxes_on_image_array(image,
boxes,
color='red',
thickness=4,
display_str_list_list=()):
"""Draws bounding boxes on image (numpy array).
Args:
image: a numpy array object.
boxes: a 2 dimensional numpy array of [N, 4]: (ymin, xmin, ymax, xmax). The
coordinates are in normalized format between [0, 1].
color: color to draw bounding box. Default is red.
thickness: line thickness. Default value is 4.
display_str_list_list: list of list of strings. a list of strings for each
bounding box. The reason to pass a list of strings for a bounding box is
that it might contain multiple labels.
Raises:
ValueError: if boxes is not a [N, 4] array
"""
image_pil = Image.fromarray(image)
draw_bounding_boxes_on_image(image_pil, boxes, color, thickness,
display_str_list_list)
np.copyto(image, np.array(image_pil))
def draw_bounding_boxes_on_image(image,
boxes,
color='red',
thickness=4,
display_str_list_list=()):
"""Draws bounding boxes on image.
Args:
image: a PIL.Image object.
boxes: a 2 dimensional numpy array of [N, 4]: (ymin, xmin, ymax, xmax). The
coordinates are in normalized format between [0, 1].
color: color to draw bounding box. Default is red.
thickness: line thickness. Default value is 4.
display_str_list_list: list of list of strings. a list of strings for each
bounding box. The reason to pass a list of strings for a bounding box is
that it might contain multiple labels.
Raises:
ValueError: if boxes is not a [N, 4] array
"""
boxes_shape = boxes.shape
if not boxes_shape:
return
if len(boxes_shape) != 2 or boxes_shape[1] != 4:
raise ValueError('Input must be of size [N, 4]')
for i in range(boxes_shape[0]):
display_str_list = ()
if display_str_list_list:
display_str_list = display_str_list_list[i]
draw_bounding_box_on_image(image, boxes[i, 0], boxes[i, 1], boxes[i, 2],
boxes[i, 3], color, thickness, display_str_list)
def create_visualization_fn(category_index,
include_masks=False,
include_keypoints=False,
include_track_ids=False,
**kwargs):
"""Constructs a visualization function that can be wrapped in a py_func.
py_funcs only accept positional arguments. This function returns a suitable
function with the correct positional argument mapping. The positional
arguments in order are:
0: image
1: boxes
2: classes
3: scores
[4-6]: masks (optional)
[4-6]: keypoints (optional)
[4-6]: track_ids (optional)
-- Example 1 --
vis_only_masks_fn = create_visualization_fn(category_index,
include_masks=True, include_keypoints=False, include_track_ids=False,
**kwargs)
image = tf.py_func(vis_only_masks_fn,
inp=[image, boxes, classes, scores, masks],
Tout=tf.uint8)
-- Example 2 --
vis_masks_and_track_ids_fn = create_visualization_fn(category_index,
include_masks=True, include_keypoints=False, include_track_ids=True,
**kwargs)
image = tf.py_func(vis_masks_and_track_ids_fn,
inp=[image, boxes, classes, scores, masks, track_ids],
Tout=tf.uint8)
Args:
category_index: a dict that maps integer ids to category dicts. e.g.
{1: {1: 'dog'}, 2: {2: 'cat'}, ...}
include_masks: Whether masks should be expected as a positional argument in
the returned function.
include_keypoints: Whether keypoints should be expected as a positional
argument in the returned function.
include_track_ids: Whether track ids should be expected as a positional
argument in the returned function.
**kwargs: Additional kwargs that will be passed to
visualize_boxes_and_labels_on_image_array.
Returns:
Returns a function that only takes tensors as positional arguments.
"""
def visualization_py_func_fn(*args):
"""Visualization function that can be wrapped in a tf.py_func.
Args:
*args: First 4 positional arguments must be: image - uint8 numpy array
with shape (img_height, img_width, 3). boxes - a numpy array of shape
[N, 4]. classes - a numpy array of shape [N]. scores - a numpy array of
shape [N] or None. -- Optional positional arguments -- instance_masks -
a numpy array of shape [N, image_height, image_width]. keypoints - a
numpy array of shape [N, num_keypoints, 2]. track_ids - a numpy array of
shape [N] with unique track ids.
Returns:
uint8 numpy array with shape (img_height, img_width, 3) with overlaid
boxes.
"""
image = args[0]
boxes = args[1]
classes = args[2]
scores = args[3]
masks = keypoints = track_ids = None
pos_arg_ptr = 4 # Positional argument for first optional tensor (masks).
if include_masks:
masks = args[pos_arg_ptr]
pos_arg_ptr += 1
if include_keypoints:
keypoints = args[pos_arg_ptr]
pos_arg_ptr += 1
if include_track_ids:
track_ids = args[pos_arg_ptr]
return visualize_boxes_and_labels_on_image_array(
image,
boxes,
classes,
scores,
category_index=category_index,
instance_masks=masks,
keypoints=keypoints,
track_ids=track_ids,
**kwargs)
return visualization_py_func_fn
def _resize_original_image(image, image_shape):
image = tf.expand_dims(image, 0)
image = tf.image.resize_images(
image,
image_shape,
method=tf.image.ResizeMethod.NEAREST_NEIGHBOR,
align_corners=True)
return tf.cast(tf.squeeze(image, 0), tf.uint8)
def draw_bounding_boxes_on_image_tensors(images,
boxes,
classes,
scores,
category_index,
original_image_spatial_shape=None,
true_image_shape=None,
instance_masks=None,
keypoints=None,
keypoint_edges=None,
track_ids=None,
max_boxes_to_draw=20,
min_score_thresh=0.2,
use_normalized_coordinates=True):
"""Draws bounding boxes, masks, and keypoints on batch of image tensors.
Args:
images: A 4D uint8 image tensor of shape [N, H, W, C]. If C > 3, additional
channels will be ignored. If C = 1, then we convert the images to RGB
images.
boxes: [N, max_detections, 4] float32 tensor of detection boxes.
classes: [N, max_detections] int tensor of detection classes. Note that
classes are 1-indexed.
scores: [N, max_detections] float32 tensor of detection scores.
category_index: a dict that maps integer ids to category dicts. e.g.
{1: {1: 'dog'}, 2: {2: 'cat'}, ...}
original_image_spatial_shape: [N, 2] tensor containing the spatial size of
the original image.
true_image_shape: [N, 3] tensor containing the spatial size of unpadded
original_image.
instance_masks: A 4D uint8 tensor of shape [N, max_detection, H, W] with
instance masks.
keypoints: A 4D float32 tensor of shape [N, max_detection, num_keypoints, 2]
with keypoints.
keypoint_edges: A list of tuples with keypoint indices that specify which
keypoints should be connected by an edge, e.g. [(0, 1), (2, 4)] draws
edges from keypoint 0 to 1 and from keypoint 2 to 4.
track_ids: [N, max_detections] int32 tensor of unique tracks ids (i.e.
instance ids for each object). If provided, the color-coding of boxes is
dictated by these ids, and not classes.
max_boxes_to_draw: Maximum number of boxes to draw on an image. Default 20.
min_score_thresh: Minimum score threshold for visualization. Default 0.2.
use_normalized_coordinates: Whether to assume boxes and kepoints are in
normalized coordinates (as opposed to absolute coordiantes). Default is
True.
Returns:
4D image tensor of type uint8, with boxes drawn on top.
"""
# Additional channels are being ignored.
if images.shape[3] > 3:
images = images[:, :, :, 0:3]
elif images.shape[3] == 1:
images = tf.image.grayscale_to_rgb(images)
visualization_keyword_args = {
'use_normalized_coordinates': use_normalized_coordinates,
'max_boxes_to_draw': max_boxes_to_draw,
'min_score_thresh': min_score_thresh,
'agnostic_mode': False,
'line_thickness': 4,
'keypoint_edges': keypoint_edges
}
if true_image_shape is None:
true_shapes = tf.constant(-1, shape=[images.shape.as_list()[0], 3])
else:
true_shapes = true_image_shape
if original_image_spatial_shape is None:
original_shapes = tf.constant(-1, shape=[images.shape.as_list()[0], 2])
else:
original_shapes = original_image_spatial_shape
visualize_boxes_fn = create_visualization_fn(
category_index,
include_masks=instance_masks is not None,
include_keypoints=keypoints is not None,
include_track_ids=track_ids is not None,
**visualization_keyword_args)
elems = [true_shapes, original_shapes, images, boxes, classes, scores]
if instance_masks is not None:
elems.append(instance_masks)
if keypoints is not None:
elems.append(keypoints)
if track_ids is not None:
elems.append(track_ids)
def draw_boxes(image_and_detections):
"""Draws boxes on image."""
true_shape = image_and_detections[0]
original_shape = image_and_detections[1]
if true_image_shape is not None:
image = shape_utils.pad_or_clip_nd(image_and_detections[2],
[true_shape[0], true_shape[1], 3])
if original_image_spatial_shape is not None:
image_and_detections[2] = _resize_original_image(image, original_shape)
image_with_boxes = tf.py_func(visualize_boxes_fn, image_and_detections[2:],
tf.uint8)
return image_with_boxes
images = tf.map_fn(draw_boxes, elems, dtype=tf.uint8, back_prop=False)
return images
def draw_side_by_side_evaluation_image(eval_dict,
category_index,
max_boxes_to_draw=20,
min_score_thresh=0.2,
use_normalized_coordinates=True,
keypoint_edges=None):
"""Creates a side-by-side image with detections and groundtruth.
Bounding boxes (and instance masks, if available) are visualized on both
subimages.
Args:
eval_dict: The evaluation dictionary returned by
eval_util.result_dict_for_batched_example() or
eval_util.result_dict_for_single_example().
category_index: A category index (dictionary) produced from a labelmap.
max_boxes_to_draw: The maximum number of boxes to draw for detections.
min_score_thresh: The minimum score threshold for showing detections.
use_normalized_coordinates: Whether to assume boxes and keypoints are in
normalized coordinates (as opposed to absolute coordinates). Default is
True.
keypoint_edges: A list of tuples with keypoint indices that specify which
keypoints should be connected by an edge, e.g. [(0, 1), (2, 4)] draws
edges from keypoint 0 to 1 and from keypoint 2 to 4.
Returns:
A list of [1, H, 2 * W, C] uint8 tensor. The subimage on the left
corresponds to detections, while the subimage on the right corresponds to
groundtruth.
"""
detection_fields = fields.DetectionResultFields()
input_data_fields = fields.InputDataFields()
images_with_detections_list = []
# Add the batch dimension if the eval_dict is for single example.
if len(eval_dict[detection_fields.detection_classes].shape) == 1:
for key in eval_dict:
if key != input_data_fields.original_image and key != input_data_fields.image_additional_channels:
eval_dict[key] = tf.expand_dims(eval_dict[key], 0)
for indx in range(eval_dict[input_data_fields.original_image].shape[0]):
instance_masks = None
if detection_fields.detection_masks in eval_dict:
instance_masks = tf.cast(
tf.expand_dims(
eval_dict[detection_fields.detection_masks][indx], axis=0),
tf.uint8)
keypoints = None
if detection_fields.detection_keypoints in eval_dict:
keypoints = tf.expand_dims(
eval_dict[detection_fields.detection_keypoints][indx], axis=0)
groundtruth_instance_masks = None
if input_data_fields.groundtruth_instance_masks in eval_dict:
groundtruth_instance_masks = tf.cast(
tf.expand_dims(
eval_dict[input_data_fields.groundtruth_instance_masks][indx],
axis=0), tf.uint8)
images_with_detections = draw_bounding_boxes_on_image_tensors(
tf.expand_dims(
eval_dict[input_data_fields.original_image][indx], axis=0),
tf.expand_dims(
eval_dict[detection_fields.detection_boxes][indx], axis=0),
tf.expand_dims(
eval_dict[detection_fields.detection_classes][indx], axis=0),
tf.expand_dims(
eval_dict[detection_fields.detection_scores][indx], axis=0),
category_index,
original_image_spatial_shape=tf.expand_dims(
eval_dict[input_data_fields.original_image_spatial_shape][indx],
axis=0),
true_image_shape=tf.expand_dims(
eval_dict[input_data_fields.true_image_shape][indx], axis=0),
instance_masks=instance_masks,
keypoints=keypoints,
keypoint_edges=keypoint_edges,
max_boxes_to_draw=max_boxes_to_draw,
min_score_thresh=min_score_thresh,
use_normalized_coordinates=use_normalized_coordinates)
images_with_groundtruth = draw_bounding_boxes_on_image_tensors(
tf.expand_dims(
eval_dict[input_data_fields.original_image][indx], axis=0),
tf.expand_dims(
eval_dict[input_data_fields.groundtruth_boxes][indx], axis=0),
tf.expand_dims(
eval_dict[input_data_fields.groundtruth_classes][indx], axis=0),
tf.expand_dims(
tf.ones_like(
eval_dict[input_data_fields.groundtruth_classes][indx],
dtype=tf.float32),
axis=0),
category_index,
original_image_spatial_shape=tf.expand_dims(
eval_dict[input_data_fields.original_image_spatial_shape][indx],
axis=0),
true_image_shape=tf.expand_dims(
eval_dict[input_data_fields.true_image_shape][indx], axis=0),
instance_masks=groundtruth_instance_masks,
keypoints=None,
keypoint_edges=None,
max_boxes_to_draw=None,
min_score_thresh=0.0,
use_normalized_coordinates=use_normalized_coordinates)
images_to_visualize = tf.concat(
[images_with_detections, images_with_groundtruth], axis=2)
if input_data_fields.image_additional_channels in eval_dict:
images_with_additional_channels_groundtruth = (
draw_bounding_boxes_on_image_tensors(
tf.expand_dims(
eval_dict[input_data_fields.image_additional_channels][indx],
axis=0),
tf.expand_dims(
eval_dict[input_data_fields.groundtruth_boxes][indx], axis=0),
tf.expand_dims(
eval_dict[input_data_fields.groundtruth_classes][indx],
axis=0),
tf.expand_dims(
tf.ones_like(
eval_dict[input_data_fields.groundtruth_classes][indx],
dtype=tf.float32),
axis=0),
category_index,
original_image_spatial_shape=tf.expand_dims(
eval_dict[input_data_fields.original_image_spatial_shape]
[indx],
axis=0),
true_image_shape=tf.expand_dims(
eval_dict[input_data_fields.true_image_shape][indx], axis=0),
instance_masks=groundtruth_instance_masks,
keypoints=None,
keypoint_edges=None,
max_boxes_to_draw=None,
min_score_thresh=0.0,
use_normalized_coordinates=use_normalized_coordinates))
images_to_visualize = tf.concat(
[images_to_visualize, images_with_additional_channels_groundtruth],
axis=2)
images_with_detections_list.append(images_to_visualize)
return images_with_detections_list
def draw_keypoints_on_image_array(image,
keypoints,
color='red',
radius=2,
use_normalized_coordinates=True,
keypoint_edges=None,
keypoint_edge_color='green',
keypoint_edge_width=2):
"""Draws keypoints on an image (numpy array).
Args:
image: a numpy array with shape [height, width, 3].
keypoints: a numpy array with shape [num_keypoints, 2].
color: color to draw the keypoints with. Default is red.
radius: keypoint radius. Default value is 2.
use_normalized_coordinates: if True (default), treat keypoint values as
relative to the image. Otherwise treat them as absolute.
keypoint_edges: A list of tuples with keypoint indices that specify which
keypoints should be connected by an edge, e.g. [(0, 1), (2, 4)] draws
edges from keypoint 0 to 1 and from keypoint 2 to 4.
keypoint_edge_color: color to draw the keypoint edges with. Default is red.
keypoint_edge_width: width of the edges drawn between keypoints. Default
value is 2.
"""
image_pil = Image.fromarray(np.uint8(image)).convert('RGB')
draw_keypoints_on_image(image_pil, keypoints, color, radius,
use_normalized_coordinates, keypoint_edges,
keypoint_edge_color, keypoint_edge_width)
np.copyto(image, np.array(image_pil))
def draw_keypoints_on_image(image,
keypoints,
color='red',
radius=2,
use_normalized_coordinates=True,
keypoint_edges=None,
keypoint_edge_color='green',
keypoint_edge_width=2):
"""Draws keypoints on an image.
Args:
image: a PIL.Image object.
keypoints: a numpy array with shape [num_keypoints, 2].
color: color to draw the keypoints with. Default is red.
radius: keypoint radius. Default value is 2.
use_normalized_coordinates: if True (default), treat keypoint values as
relative to the image. Otherwise treat them as absolute.
keypoint_edges: A list of tuples with keypoint indices that specify which
keypoints should be connected by an edge, e.g. [(0, 1), (2, 4)] draws
edges from keypoint 0 to 1 and from keypoint 2 to 4.
keypoint_edge_color: color to draw the keypoint edges with. Default is red.
keypoint_edge_width: width of the edges drawn between keypoints. Default
value is 2.
"""
draw = ImageDraw.Draw(image)
im_width, im_height = image.size
keypoints_x = [k[1] for k in keypoints]
keypoints_y = [k[0] for k in keypoints]
if use_normalized_coordinates:
keypoints_x = tuple([im_width * x for x in keypoints_x])
keypoints_y = tuple([im_height * y for y in keypoints_y])
for keypoint_x, keypoint_y in zip(keypoints_x, keypoints_y):
draw.ellipse([(keypoint_x - radius, keypoint_y - radius),
(keypoint_x + radius, keypoint_y + radius)],
outline=color,
fill=color)
if keypoint_edges is not None:
for keypoint_start, keypoint_end in keypoint_edges:
if (keypoint_start < 0 or keypoint_start >= len(keypoints) or
keypoint_end < 0 or keypoint_end >= len(keypoints)):
continue
edge_coordinates = [
keypoints_x[keypoint_start], keypoints_y[keypoint_start],
keypoints_x[keypoint_end], keypoints_y[keypoint_end]
]
draw.line(
edge_coordinates, fill=keypoint_edge_color, width=keypoint_edge_width)
def draw_mask_on_image_array(image, mask, color='red', alpha=0.4):
"""Draws mask on an image.
Args:
image: uint8 numpy array with shape (img_height, img_height, 3)
mask: a uint8 numpy array of shape (img_height, img_height) with values
between either 0 or 1.
color: color to draw the keypoints with. Default is red.
alpha: transparency value between 0 and 1. (default: 0.4)
Raises:
ValueError: On incorrect data type for image or masks.
"""
if image.dtype != np.uint8:
raise ValueError('`image` not of type np.uint8')
if mask.dtype != np.uint8:
raise ValueError('`mask` not of type np.uint8')
if np.any(np.logical_and(mask != 1, mask != 0)):
raise ValueError('`mask` elements should be in [0, 1]')
if image.shape[:2] != mask.shape:
raise ValueError('The image has spatial dimensions %s but the mask has '
'dimensions %s' % (image.shape[:2], mask.shape))
rgb = ImageColor.getrgb(color)
pil_image = Image.fromarray(image)
solid_color = np.expand_dims(
np.ones_like(mask), axis=2) * np.reshape(list(rgb), [1, 1, 3])
pil_solid_color = Image.fromarray(np.uint8(solid_color)).convert('RGBA')
pil_mask = Image.fromarray(np.uint8(255.0 * alpha * mask)).convert('L')
pil_image = Image.composite(pil_solid_color, pil_image, pil_mask)
np.copyto(image, np.array(pil_image.convert('RGB')))
def visualize_boxes_and_labels_on_image_array(
image,
boxes,
classes,
scores,
category_index,
instance_masks=None,
instance_boundaries=None,
keypoints=None,
keypoint_edges=None,
track_ids=None,
use_normalized_coordinates=False,
max_boxes_to_draw=20,
min_score_thresh=.5,
agnostic_mode=False,
line_thickness=4,
groundtruth_box_visualization_color='black',
skip_boxes=False,
skip_scores=False,
skip_labels=False,
skip_track_ids=False):
"""Overlay labeled boxes on an image with formatted scores and label names.
This function groups boxes that correspond to the same location
and creates a display string for each detection and overlays these
on the image. Note that this function modifies the image in place, and returns
that same image.
Args:
image: uint8 numpy array with shape (img_height, img_width, 3)
boxes: a numpy array of shape [N, 4]
classes: a numpy array of shape [N]. Note that class indices are 1-based,
and match the keys in the label map.
scores: a numpy array of shape [N] or None. If scores=None, then this
function assumes that the boxes to be plotted are groundtruth boxes and
plot all boxes as black with no classes or scores.
category_index: a dict containing category dictionaries (each holding
category index `id` and category name `name`) keyed by category indices.
instance_masks: a numpy array of shape [N, image_height, image_width] with
values ranging between 0 and 1, can be None.
instance_boundaries: a numpy array of shape [N, image_height, image_width]
with values ranging between 0 and 1, can be None.
keypoints: a numpy array of shape [N, num_keypoints, 2], can be None
keypoint_edges: A list of tuples with keypoint indices that specify which
keypoints should be connected by an edge, e.g. [(0, 1), (2, 4)] draws
edges from keypoint 0 to 1 and from keypoint 2 to 4.
track_ids: a numpy array of shape [N] with unique track ids. If provided,
color-coding of boxes will be determined by these ids, and not the class
indices.
use_normalized_coordinates: whether boxes is to be interpreted as normalized
coordinates or not.
max_boxes_to_draw: maximum number of boxes to visualize. If None, draw all
boxes.
min_score_thresh: minimum score threshold for a box to be visualized
agnostic_mode: boolean (default: False) controlling whether to evaluate in
class-agnostic mode or not. This mode will display scores but ignore
classes.
line_thickness: integer (default: 4) controlling line width of the boxes.
groundtruth_box_visualization_color: box color for visualizing groundtruth
boxes
skip_boxes: whether to skip the drawing of bounding boxes.
skip_scores: whether to skip score when drawing a single detection
skip_labels: whether to skip label when drawing a single detection
skip_track_ids: whether to skip track id when drawing a single detection
Returns:
uint8 numpy array with shape (img_height, img_width, 3) with overlaid boxes.
"""
# Create a display string (and color) for every box location, group any boxes
# that correspond to the same location.
box_to_display_str_map = collections.defaultdict(list)
box_to_color_map = collections.defaultdict(str)
box_to_instance_masks_map = {}
box_to_instance_boundaries_map = {}
box_to_keypoints_map = collections.defaultdict(list)
box_to_track_ids_map = {}
if not max_boxes_to_draw:
max_boxes_to_draw = boxes.shape[0]
for i in range(boxes.shape[0]):
if max_boxes_to_draw == len(box_to_color_map):
break
if scores is None or scores[i] > min_score_thresh:
box = tuple(boxes[i].tolist())
if instance_masks is not None:
box_to_instance_masks_map[box] = instance_masks[i]
if instance_boundaries is not None:
box_to_instance_boundaries_map[box] = instance_boundaries[i]
if keypoints is not None:
box_to_keypoints_map[box].extend(keypoints[i])
if track_ids is not None:
box_to_track_ids_map[box] = track_ids[i]
if scores is None:
box_to_color_map[box] = groundtruth_box_visualization_color
else:
display_str = ''
if not skip_labels:
if not agnostic_mode:
if classes[i] in six.viewkeys(category_index):
class_name = category_index[classes[i]]['name']
else:
class_name = 'N/A'
display_str = str(class_name)
if not skip_scores:
if not display_str:
display_str = '{}%'.format(int(100 * scores[i]))
else:
display_str = '{}: {}%'.format(display_str, int(100 * scores[i]))
if not skip_track_ids and track_ids is not None:
if not display_str:
display_str = 'ID {}'.format(track_ids[i])
else:
display_str = '{}: ID {}'.format(display_str, track_ids[i])
box_to_display_str_map[box].append(display_str)
if agnostic_mode:
box_to_color_map[box] = 'DarkOrange'
elif track_ids is not None:
prime_multipler = _get_multiplier_for_color_randomness()
box_to_color_map[box] = STANDARD_COLORS[(prime_multipler *
track_ids[i]) %
len(STANDARD_COLORS)]
else:
box_to_color_map[box] = STANDARD_COLORS[classes[i] %
len(STANDARD_COLORS)]
# Draw all boxes onto image.
for box, color in box_to_color_map.items():
ymin, xmin, ymax, xmax = box
if instance_masks is not None:
draw_mask_on_image_array(
image, box_to_instance_masks_map[box], color=color)
if instance_boundaries is not None:
draw_mask_on_image_array(
image, box_to_instance_boundaries_map[box], color='red', alpha=1.0)
draw_bounding_box_on_image_array(
image,
ymin,
xmin,
ymax,
xmax,
color=color,
thickness=0 if skip_boxes else line_thickness,
display_str_list=box_to_display_str_map[box],
use_normalized_coordinates=use_normalized_coordinates)
if keypoints is not None:
draw_keypoints_on_image_array(
image,
box_to_keypoints_map[box],
color=color,
radius=line_thickness / 2,
use_normalized_coordinates=use_normalized_coordinates,
keypoint_edges=keypoint_edges,
keypoint_edge_color=color,
keypoint_edge_width=line_thickness // 2)
return image
def add_cdf_image_summary(values, name):
"""Adds a tf.summary.image for a CDF plot of the values.
Normalizes `values` such that they sum to 1, plots the cumulative distribution
function and creates a tf image summary.
Args:
values: a 1-D float32 tensor containing the values.
name: name for the image summary.
"""
def cdf_plot(values):
"""Numpy function to plot CDF."""
normalized_values = values / np.sum(values)
sorted_values = np.sort(normalized_values)
cumulative_values = np.cumsum(sorted_values)
fraction_of_examples = (
np.arange(cumulative_values.size, dtype=np.float32) /
cumulative_values.size)
fig = plt.figure(frameon=False)
ax = fig.add_subplot('111')
ax.plot(fraction_of_examples, cumulative_values)
ax.set_ylabel('cumulative normalized values')
ax.set_xlabel('fraction of examples')
fig.canvas.draw()
width, height = fig.get_size_inches() * fig.get_dpi()
image = np.frombuffer(
fig.canvas.tostring_rgb(),
dtype='uint8').reshape(1, int(height), int(width), 3)
return image
cdf_plot = tf.py_func(cdf_plot, [values], tf.uint8)
tf.summary.image(name, cdf_plot)
def add_hist_image_summary(values, bins, name):
"""Adds a tf.summary.image for a histogram plot of the values.
Plots the histogram of values and creates a tf image summary.
Args:
values: a 1-D float32 tensor containing the values.
bins: bin edges which will be directly passed to np.histogram.
name: name for the image summary.
"""
def hist_plot(values, bins):
"""Numpy function to plot hist."""
fig = plt.figure(frameon=False)
ax = fig.add_subplot('111')
y, x = np.histogram(values, bins=bins)
ax.plot(x[:-1], y)
ax.set_ylabel('count')
ax.set_xlabel('value')
fig.canvas.draw()
width, height = fig.get_size_inches() * fig.get_dpi()
image = np.frombuffer(
fig.canvas.tostring_rgb(),
dtype='uint8').reshape(1, int(height), int(width), 3)
return image
hist_plot = tf.py_func(hist_plot, [values, bins], tf.uint8)
tf.summary.image(name, hist_plot)
class EvalMetricOpsVisualization(six.with_metaclass(abc.ABCMeta, object)):
"""Abstract base class responsible for visualizations during evaluation.
Currently, summary images are not run during evaluation. One way to produce
evaluation images in Tensorboard is to provide tf.summary.image strings as
`value_ops` in tf.estimator.EstimatorSpec's `eval_metric_ops`. This class is
responsible for accruing images (with overlaid detections and groundtruth)
and returning a dictionary that can be passed to `eval_metric_ops`.
"""
def __init__(self,
category_index,
max_examples_to_draw=5,
max_boxes_to_draw=20,
min_score_thresh=0.2,
use_normalized_coordinates=True,
summary_name_prefix='evaluation_image',
keypoint_edges=None):
"""Creates an EvalMetricOpsVisualization.
Args:
category_index: A category index (dictionary) produced from a labelmap.
max_examples_to_draw: The maximum number of example summaries to produce.
max_boxes_to_draw: The maximum number of boxes to draw for detections.
min_score_thresh: The minimum score threshold for showing detections.
use_normalized_coordinates: Whether to assume boxes and keypoints are in
normalized coordinates (as opposed to absolute coordinates). Default is
True.
summary_name_prefix: A string prefix for each image summary.
keypoint_edges: A list of tuples with keypoint indices that specify which
keypoints should be connected by an edge, e.g. [(0, 1), (2, 4)] draws
edges from keypoint 0 to 1 and from keypoint 2 to 4.
"""
self._category_index = category_index
self._max_examples_to_draw = max_examples_to_draw
self._max_boxes_to_draw = max_boxes_to_draw
self._min_score_thresh = min_score_thresh
self._use_normalized_coordinates = use_normalized_coordinates
self._summary_name_prefix = summary_name_prefix
self._keypoint_edges = keypoint_edges
self._images = []
def clear(self):
self._images = []
def add_images(self, images):
"""Store a list of images, each with shape [1, H, W, C]."""
if len(self._images) >= self._max_examples_to_draw:
return
# Store images and clip list if necessary.
self._images.extend(images)
if len(self._images) > self._max_examples_to_draw:
self._images[self._max_examples_to_draw:] = []
def get_estimator_eval_metric_ops(self, eval_dict):
# pyformat: disable
"""Returns metric ops for use in tf.estimator.EstimatorSpec.
Args:
eval_dict: A dictionary that holds an image, groundtruth, and detections
for a batched example. Note that, we use only the first example for
visualization. See eval_util.result_dict_for_batched_example() for a
convenient method for constructing such a dictionary. The dictionary
contains
fields.InputDataFields.original_image: [batch_size, H, W, 3] image.
fields.InputDataFields.original_image_spatial_shape: [batch_size, 2]
tensor containing the size of the original image.
fields.InputDataFields.true_image_shape: [batch_size, 3]
tensor containing the spatial size of the upadded original image.
fields.InputDataFields.groundtruth_boxes - [batch_size, num_boxes, 4]
float32 tensor with groundtruth boxes in range [0.0, 1.0].
fields.InputDataFields.groundtruth_classes - [batch_size, num_boxes]
int64 tensor with 1-indexed groundtruth classes.
fields.InputDataFields.groundtruth_instance_masks - (optional)
[batch_size, num_boxes, H, W] int64 tensor with instance masks.
fields.DetectionResultFields.detection_boxes - [batch_size,
max_num_boxes, 4] float32 tensor with detection boxes in range [0.0,
1.0].
fields.DetectionResultFields.detection_classes - [batch_size,
max_num_boxes] int64 tensor with 1-indexed detection classes.
fields.DetectionResultFields.detection_scores - [batch_size,
max_num_boxes] float32 tensor with detection scores.
fields.DetectionResultFields.detection_masks - (optional) [batch_size,
max_num_boxes, H, W] float32 tensor of binarized masks.
fields.DetectionResultFields.detection_keypoints - (optional)
[batch_size, max_num_boxes, num_keypoints, 2] float32 tensor with
keypoints.
Returns:
A dictionary of image summary names to tuple of (value_op, update_op). The
`update_op` is the same for all items in the dictionary, and is
responsible for saving a single side-by-side image with detections and
groundtruth. Each `value_op` holds the tf.summary.image string for a given
image.
"""
# pyformat: enable
if self._max_examples_to_draw == 0:
return {}
images = self.images_from_evaluation_dict(eval_dict)
def get_images():
"""Returns a list of images, padded to self._max_images_to_draw."""
images = self._images
while len(images) < self._max_examples_to_draw:
images.append(np.array(0, dtype=np.uint8))
self.clear()
return images
def image_summary_or_default_string(summary_name, image):
"""Returns image summaries for non-padded elements."""
return tf.cond(
tf.equal(tf.size(tf.shape(image)), 4), # pyformat: disable
lambda: tf.summary.image(summary_name, image),
lambda: tf.constant(''))
if tf.executing_eagerly():
update_op = self.add_images([[images[0]]]) # pylint: disable=assignment-from-none
image_tensors = get_images()
else:
update_op = tf.py_func(self.add_images, [[images[0]]], [])
image_tensors = tf.py_func(get_images, [],
[tf.uint8] * self._max_examples_to_draw)
eval_metric_ops = {}
for i, image in enumerate(image_tensors):
summary_name = self._summary_name_prefix + '/' + str(i)
value_op = image_summary_or_default_string(summary_name, image)
eval_metric_ops[summary_name] = (value_op, update_op)
return eval_metric_ops
@abc.abstractmethod
def images_from_evaluation_dict(self, eval_dict):
"""Converts evaluation dictionary into a list of image tensors.
To be overridden by implementations.
Args:
eval_dict: A dictionary with all the necessary information for producing
visualizations.
Returns:
A list of [1, H, W, C] uint8 tensors.
"""
raise NotImplementedError
class VisualizeSingleFrameDetections(EvalMetricOpsVisualization):
"""Class responsible for single-frame object detection visualizations."""
def __init__(self,
category_index,
max_examples_to_draw=5,
max_boxes_to_draw=20,
min_score_thresh=0.2,
use_normalized_coordinates=True,
summary_name_prefix='Detections_Left_Groundtruth_Right',
keypoint_edges=None):
super(VisualizeSingleFrameDetections, self).__init__(
category_index=category_index,
max_examples_to_draw=max_examples_to_draw,
max_boxes_to_draw=max_boxes_to_draw,
min_score_thresh=min_score_thresh,
use_normalized_coordinates=use_normalized_coordinates,
summary_name_prefix=summary_name_prefix,
keypoint_edges=keypoint_edges)
def images_from_evaluation_dict(self, eval_dict):
return draw_side_by_side_evaluation_image(eval_dict, self._category_index,
self._max_boxes_to_draw,
self._min_score_thresh,
self._use_normalized_coordinates,
self._keypoint_edges)
| DeepLearningExamples-master | TensorFlow2/Detection/Efficientdet/visualize/vis_utils.py |
# Copyright 2020 Google Research. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Helper functions to access TensorShape values.
The rank 4 tensor_shape must be of the form [batch_size, height, width, depth].
"""
def get_dim_as_int(dim):
"""Utility to get v1 or v2 TensorShape dim as an int.
Args:
dim: The TensorShape dimension to get as an int
Returns:
None or an int.
"""
try:
return dim.value
except AttributeError:
return dim
def get_batch_size(tensor_shape):
"""Returns batch size from the tensor shape.
Args:
tensor_shape: A rank 4 TensorShape.
Returns:
An integer representing the batch size of the tensor.
"""
tensor_shape.assert_has_rank(rank=4)
return get_dim_as_int(tensor_shape[0])
def get_height(tensor_shape):
"""Returns height from the tensor shape.
Args:
tensor_shape: A rank 4 TensorShape.
Returns:
An integer representing the height of the tensor.
"""
tensor_shape.assert_has_rank(rank=4)
return get_dim_as_int(tensor_shape[1])
def get_width(tensor_shape):
"""Returns width from the tensor shape.
Args:
tensor_shape: A rank 4 TensorShape.
Returns:
An integer representing the width of the tensor.
"""
tensor_shape.assert_has_rank(rank=4)
return get_dim_as_int(tensor_shape[2])
def get_depth(tensor_shape):
"""Returns depth from the tensor shape.
Args:
tensor_shape: A rank 4 TensorShape.
Returns:
An integer representing the depth of the tensor.
"""
tensor_shape.assert_has_rank(rank=4)
return get_dim_as_int(tensor_shape[3])
| DeepLearningExamples-master | TensorFlow2/Detection/Efficientdet/visualize/static_shape.py |
# Copyright 2020 Google Research. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Optimizer related utils."""
from absl import logging
import tensorflow as tf
from tensorflow_addons.optimizers import MovingAverage
from model import learning_rate
@tf.keras.utils.register_keras_serializable(package='Custom')
class HvdMovingAverage(MovingAverage):
def swap_weights(self):
"""Swap the average and moving weights.
The original function in the parent class assumes a cross replica
context, which fails for single GPU training. It also failed in the case of
multi-GPU training with Horovod.
"""
self._swap_weights()
def _create_slots(self, var_list):
"""[summary]
The original function in the parent class, in addition to calling
_create_slots() of the base optimizer, reassigns trainable tensors to
self._average_weights and self._model_weights, which has the effect of
removing non-trainable tensors (e.g., moving means and variances) from EMA.
By overriding it, we simply keep the part that calls _create_slots of the base
optimizer. To make up for the removed part of the code, we call shadow_copy, which
assigns both trainable and non-trainable tensors to self._average_weights and
self._model_weights.
Args:
var_list ([type]): [description]
"""
self._optimizer._create_slots(var_list=var_list)
def apply_gradients(self, grads_and_vars, name=None, experimental_aggregate_gradients=True):
self._optimizer._iterations = self.iterations
result = super().apply_gradients(grads_and_vars, name)
# update EMA weights after the weights are updated
self.update_average(self._optimizer.iterations)
return result
def _resource_apply_dense(self, grad, var):
"""[summary]
We must override this function, eliminating the part that performs
EMA updates for trainable variables. The reasons is that we use our custom
self.update_average(), called in apply_gradients, which performs EMA updates
for both trainable and non-trainable variables. If we don't override this
function, in each iteration, EMA of trainable variables get updated twice
(once here and once in apply_gradient) while EMA of non-trainable variables get
updated only once in apply_gradients.
"""
return self._optimizer._resource_apply_dense(grad, var)
def _resource_apply_sparse(self, grad, var, indices):
"""[summary]
We must override this function, eliminating the part that performs
EMA updates for trainable variables. The reasons is that we use our custom
self.update_average(), called in apply_gradients, which performs EMA updates
for both trainable and non-trainable variables. If we don't override this
function, in each iteration, EMA of trainable variables get updated twice
(once here and once in apply_gradient) while EMA of non-trainable variables get
updated only once in apply_gradients.
"""
return self._optimizer._resource_apply_sparse(grad, var, indices)
def _resource_apply_sparse_duplicate_indices(self, grad, var, indices):
"""[summary]
We must override this function, eliminating the part that performs
EMA updates for trainable variables. The reasons is that we use our custom
self.update_average(), called in apply_gradients, which performs EMA updates
for both trainable and non-trainable variables. If we don't override this
function, in each iteration, EMA of trainable variables get updated twice
(once here and once in apply_gradient) while EMA of non-trainable variables get
updated only once in apply_gradients.
"""
return self._optimizer._resource_apply_sparse_duplicate_indices(
grad, var, indices)
@tf.function
def update_average(self, step: tf.Tensor):
step = tf.cast(step, tf.float32)
average_decay = self._get_hyper("average_decay", tf.dtypes.float32)
if step < self._start_step:
decay = tf.constant(0., tf.float32)
elif self._dynamic_decay:
decay = step - self._start_step
decay = tf.minimum(average_decay, (1. + decay) / (10. + decay))
else:
decay = average_decay
def _apply_moving(v_moving, v_normal):
diff = v_moving - v_normal
v_moving.assign_sub(tf.cast(1. - decay, v_moving.dtype) * diff)
return v_moving
def _update(strategy, v_moving_and_v_normal):
for v_moving, v_normal in v_moving_and_v_normal:
strategy.extended.update(v_moving, _apply_moving, args=(v_normal,))
ctx = tf.distribute.get_replica_context()
return ctx.merge_call(_update, args=(zip(self._average_weights,
self._model_weights),))
@classmethod
def from_config(cls, config, custom_objects=None):
optimizer = tf.keras.optimizers.deserialize(
config.pop('optimizer'),
custom_objects=custom_objects,
)
return cls(optimizer, **config)
def get_optimizer(params):
"""Get optimizer."""
lr = learning_rate.learning_rate_schedule(params)
if params['optimizer'].lower() == 'sgd':
logging.info('Use SGD optimizer')
optimizer = tf.keras.optimizers.SGD(
lr, momentum=params['momentum'])
else:
raise ValueError('optimizer should be sgd')
moving_average_decay = params['moving_average_decay']
if moving_average_decay is not None and moving_average_decay > 0.0:
optimizer = HvdMovingAverage(optimizer, average_decay=moving_average_decay, dynamic_decay=True)
if params['mixed_precision']:
optimizer = tf.keras.mixed_precision.LossScaleOptimizer(
optimizer, initial_scale=params['loss_scale'])
return optimizer | DeepLearningExamples-master | TensorFlow2/Detection/Efficientdet/model/optimizer_builder.py |
# Copyright 2020 Google Research. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Common utils."""
from typing import Text, Union
import tensorflow as tf
import horovod.tensorflow.keras as hvd
from utils.horovod_utils import get_world_size
from model import activation_builder
class BatchNormalization(tf.keras.layers.BatchNormalization):
"""Fixed default name of BatchNormalization to match TpuBatchNormalization."""
def __init__(self, **kwargs):
if not kwargs.get('name', None):
kwargs['name'] = 'tpu_batch_normalization'
super().__init__(**kwargs)
def batch_norm_class(is_training=True):
if is_training:
# TODO(fsx950223): use SyncBatchNorm after TF bug is fixed (incorrect nccl
# all_reduce). See https://github.com/tensorflow/tensorflow/issues/41980
return BatchNormalization
else:
return BatchNormalization
def batch_normalization(inputs, training=False, **kwargs):
"""A wrapper for TpuBatchNormalization."""
bn_layer = batch_norm_class(training)(**kwargs)
return bn_layer(inputs, training=training)
def batch_norm_act(inputs,
is_training_bn: bool,
act_type: Union[Text, None],
init_zero: bool = False,
data_format: Text = 'channels_last',
momentum: float = 0.99,
epsilon: float = 1e-3,
name: Text = None):
"""Performs a batch normalization followed by a non-linear activation.
Args:
inputs: `Tensor` of shape `[batch, channels, ...]`.
is_training_bn: `bool` for whether the model is training.
act_type: non-linear relu function type. If None, omits the relu operation.
init_zero: `bool` if True, initializes scale parameter of batch
normalization with 0 instead of 1 (default).
data_format: `str` either "channels_first" for `[batch, channels, height,
width]` or "channels_last for `[batch, height, width, channels]`.
momentum: `float`, momentume of batch norm.
epsilon: `float`, small value for numerical stability.
name: the name of the batch normalization layer
Returns:
A normalized `Tensor` with the same `data_format`.
"""
if init_zero:
gamma_initializer = tf.zeros_initializer()
else:
gamma_initializer = tf.ones_initializer()
if data_format == 'channels_first':
axis = 1
else:
axis = 3
inputs = batch_normalization(
inputs=inputs,
axis=axis,
momentum=momentum,
epsilon=epsilon,
center=True,
scale=True,
training=is_training_bn,
gamma_initializer=gamma_initializer,
name=name)
if act_type:
inputs = activation_builder.activation_fn(inputs, act_type)
return inputs
| DeepLearningExamples-master | TensorFlow2/Detection/Efficientdet/model/normalization_builder.py |
# Copyright 2020 Google Research. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""IoU utils for box regression with iou losses.
Distance-IoU Loss: Faster and Better Learning for Bounding Box Regression.
https://arxiv.org/pdf/1911.08287.pdf
"""
import math
from typing import Union, Text
import numpy as np
import tensorflow as tf
FloatType = Union[tf.Tensor, float, np.float32, np.float64]
def _get_v(b1_height: FloatType, b1_width: FloatType, b2_height: FloatType,
b2_width: FloatType) -> tf.Tensor:
"""Get the consistency measurement of aspect ratio for ciou."""
@tf.custom_gradient
def _get_grad_v(height, width):
"""backpropogate gradient."""
arctan = tf.atan(tf.math.divide_no_nan(b1_width, b1_height)) - tf.atan(
tf.math.divide_no_nan(width, height))
v = 4 * ((arctan / math.pi)**2)
def _grad_v(dv):
"""Grad for eager mode."""
gdw = dv * 8 * arctan * height / (math.pi**2)
gdh = -dv * 8 * arctan * width / (math.pi**2)
return [gdh, gdw]
def _grad_v_graph(dv, variables):
"""Grad for graph mode."""
gdw = dv * 8 * arctan * height / (math.pi**2)
gdh = -dv * 8 * arctan * width / (math.pi**2)
return [gdh, gdw], tf.gradients(v, variables, grad_ys=dv)
if tf.compat.v1.executing_eagerly_outside_functions():
return v, _grad_v
return v, _grad_v_graph
return _get_grad_v(b2_height, b2_width)
def _iou_per_anchor(pred_boxes: FloatType,
target_boxes: FloatType,
iou_type: Text = 'iou') -> tf.Tensor:
"""Computing the IoU for a single anchor.
Args:
pred_boxes: predicted boxes, with coordinate [y_min, x_min, y_max, x_max].
target_boxes: target boxes, with coordinate [y_min, x_min, y_max, x_max].
iou_type: one of ['iou', 'ciou', 'diou', 'giou'].
Returns:
IoU loss float `Tensor`.
"""
# t_ denotes target boxes and p_ denotes predicted boxes.
t_ymin, t_xmin, t_ymax, t_xmax = target_boxes
p_ymin, p_xmin, p_ymax, p_xmax = pred_boxes
zero = tf.convert_to_tensor(0.0, t_ymin.dtype)
p_width = tf.maximum(zero, p_xmax - p_xmin)
p_height = tf.maximum(zero, p_ymax - p_ymin)
t_width = tf.maximum(zero, t_xmax - t_xmin)
t_height = tf.maximum(zero, t_ymax - t_ymin)
p_area = p_width * p_height
t_area = t_width * t_height
intersect_ymin = tf.maximum(p_ymin, t_ymin)
intersect_xmin = tf.maximum(p_xmin, t_xmin)
intersect_ymax = tf.minimum(p_ymax, t_ymax)
intersect_xmax = tf.minimum(p_xmax, t_xmax)
intersect_width = tf.maximum(zero, intersect_xmax - intersect_xmin)
intersect_height = tf.maximum(zero, intersect_ymax - intersect_ymin)
intersect_area = intersect_width * intersect_height
union_area = p_area + t_area - intersect_area
iou_v = tf.math.divide_no_nan(intersect_area, union_area)
if iou_type == 'iou':
return iou_v # iou is the simplest form.
enclose_ymin = tf.minimum(p_ymin, t_ymin)
enclose_xmin = tf.minimum(p_xmin, t_xmin)
enclose_ymax = tf.maximum(p_ymax, t_ymax)
enclose_xmax = tf.maximum(p_xmax, t_xmax)
assert iou_type in ('giou', 'diou', 'ciou')
if iou_type == 'giou': # giou is the generalized iou.
enclose_width = tf.maximum(zero, enclose_xmax - enclose_xmin)
enclose_height = tf.maximum(zero, enclose_ymax - enclose_ymin)
enclose_area = enclose_width * enclose_height
giou_v = iou_v - tf.math.divide_no_nan(
(enclose_area - union_area), enclose_area)
return giou_v
assert iou_type in ('diou', 'ciou')
p_center = tf.stack([(p_ymin + p_ymax) / 2, (p_xmin + p_xmax) / 2], axis=-1)
t_center = tf.stack([(t_ymin + t_ymax) / 2, (t_xmin + t_xmax) / 2], axis=-1)
euclidean = tf.linalg.norm(t_center - p_center, axis=-1)
diag_length = tf.linalg.norm(
tf.stack([enclose_ymax - enclose_ymin, enclose_xmax - enclose_xmin],
axis=-1),
axis=-1)
diou_v = iou_v - tf.math.divide_no_nan(euclidean**2, diag_length**2)
if iou_type == 'diou': # diou is the distance iou.
return diou_v
assert iou_type == 'ciou'
v = _get_v(p_height, p_width, t_height, t_width)
alpha = tf.math.divide_no_nan(v, ((1 - iou_v) + v))
return diou_v - alpha * v # the last one is ciou.
def iou_loss(pred_boxes: FloatType,
target_boxes: FloatType,
iou_type: Text = 'iou') -> tf.Tensor:
"""A unified interface for computing various IoU losses.
Let B and B_gt denotes the pred_box and B_gt is the target box (ground truth):
IoU = |B & B_gt| / |B | B_gt|
GIoU = IoU - |C - B U B_gt| / C, where C is the smallest box covering B and
B_gt.
DIoU = IoU - E(B, B_gt)^2 / c^2, E is the Euclidean distance of the center
points of B and B_gt, and c is the diagonal length of the smallest box
covering the two boxes
CIoU = IoU - DIoU - a * v, where a is a positive trade-off parameter, and
v measures the consistency of aspect ratio:
v = (arctan(w_gt / h_gt) - arctan(w / h)) * 4 / pi^2
where (w_gt, h_gt) and (w, h) are the width and height of the target and
predicted box respectively.
The returned loss is computed as 1 - one of {IoU, GIoU, DIoU, CIoU}.
Args:
pred_boxes: predicted boxes, with coordinate [y_min, x_min, y_max, x_max]*.
It can be multiple anchors, with each anchor box has four coordinates.
target_boxes: target boxes, with coordinate [y_min, x_min, y_max, x_max]*.
It can be multiple anchors, with each anchor box has four coordinates.
iou_type: one of ['iou', 'ciou', 'diou', 'giou'].
Returns:
IoU loss float `Tensor`.
"""
if iou_type not in ('iou', 'ciou', 'diou', 'giou'):
raise ValueError(
'Unknown loss_type {}, not iou/ciou/diou/giou'.format(iou_type))
pred_boxes = tf.convert_to_tensor(pred_boxes, tf.float32)
target_boxes = tf.cast(target_boxes, pred_boxes.dtype)
# t_ denotes target boxes and p_ denotes predicted boxes: (y, x, y_max, x_max)
pred_boxes_list = tf.unstack(pred_boxes, None, axis=-1)
target_boxes_list = tf.unstack(target_boxes, None, axis=-1)
assert len(pred_boxes_list) == len(target_boxes_list)
assert len(pred_boxes_list) % 4 == 0
iou_loss_list = []
for i in range(0, len(pred_boxes_list), 4):
pred_boxes = pred_boxes_list[i:i + 4]
target_boxes = target_boxes_list[i:i + 4]
# Compute mask.
t_ymin, t_xmin, t_ymax, t_xmax = target_boxes
mask = tf.math.logical_and(t_ymax > t_ymin, t_xmax > t_xmin)
mask = tf.cast(mask, t_ymin.dtype)
# Loss should be mask * (1 - iou) = mask - masked_iou.
pred_boxes = [b * mask for b in pred_boxes]
target_boxes = [b * mask for b in target_boxes]
iou_loss_list.append(
mask *
(1 - tf.squeeze(_iou_per_anchor(pred_boxes, target_boxes, iou_type))))
if len(iou_loss_list) == 1:
return iou_loss_list[0]
return tf.reduce_sum(tf.stack(iou_loss_list), 0)
| DeepLearningExamples-master | TensorFlow2/Detection/Efficientdet/model/iou_utils.py |
# Copyright 2020 Google Research. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A few predefined label id mapping."""
import tensorflow as tf
import yaml
coco = {
# 0: 'background',
1: 'person',
2: 'bicycle',
3: 'car',
4: 'motorcycle',
5: 'airplane',
6: 'bus',
7: 'train',
8: 'truck',
9: 'boat',
10: 'traffic light',
11: 'fire hydrant',
13: 'stop sign',
14: 'parking meter',
15: 'bench',
16: 'bird',
17: 'cat',
18: 'dog',
19: 'horse',
20: 'sheep',
21: 'cow',
22: 'elephant',
23: 'bear',
24: 'zebra',
25: 'giraffe',
27: 'backpack',
28: 'umbrella',
31: 'handbag',
32: 'tie',
33: 'suitcase',
34: 'frisbee',
35: 'skis',
36: 'snowboard',
37: 'sports ball',
38: 'kite',
39: 'baseball bat',
40: 'baseball glove',
41: 'skateboard',
42: 'surfboard',
43: 'tennis racket',
44: 'bottle',
46: 'wine glass',
47: 'cup',
48: 'fork',
49: 'knife',
50: 'spoon',
51: 'bowl',
52: 'banana',
53: 'apple',
54: 'sandwich',
55: 'orange',
56: 'broccoli',
57: 'carrot',
58: 'hot dog',
59: 'pizza',
60: 'donut',
61: 'cake',
62: 'chair',
63: 'couch',
64: 'potted plant',
65: 'bed',
67: 'dining table',
70: 'toilet',
72: 'tv',
73: 'laptop',
74: 'mouse',
75: 'remote',
76: 'keyboard',
77: 'cell phone',
78: 'microwave',
79: 'oven',
80: 'toaster',
81: 'sink',
82: 'refrigerator',
84: 'book',
85: 'clock',
86: 'vase',
87: 'scissors',
88: 'teddy bear',
89: 'hair drier',
90: 'toothbrush',
}
voc = {
# 0: 'background',
1: 'aeroplane',
2: 'bicycle',
3: 'bird',
4: 'boat',
5: 'bottle',
6: 'bus',
7: 'car',
8: 'cat',
9: 'chair',
10: 'cow',
11: 'diningtable',
12: 'dog',
13: 'horse',
14: 'motorbike',
15: 'person',
16: 'pottedplant',
17: 'sheep',
18: 'sofa',
19: 'train',
20: 'tvmonitor',
}
waymo = {
# 0: 'background',
1: 'vehicle',
2: 'pedestrian',
3: 'cyclist',
}
def get_label_map(mapping):
"""Get label id map based on the name, filename, or dict."""
# case 1: if it is None or dict, just return it.
if not mapping or isinstance(mapping, dict):
return mapping
# case 2: if it is a yaml file, load it to a dict and return the dict.
assert isinstance(mapping, str), 'mapping must be dict or str.'
if mapping.endswith('.yaml'):
with tf.io.gfile.GFile(mapping) as f:
return yaml.load(f, Loader=yaml.FullLoader)
# case 3: it is a name of a predefined dataset.
return {'coco': coco, 'voc': voc, 'waymo': waymo}[mapping]
| DeepLearningExamples-master | TensorFlow2/Detection/Efficientdet/model/label_util.py |
# Copyright 2020 Google Research. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""BiFPN/QuFPN and other FPN configs.
BiFPN is presented in the EfficientDet paper.
QuFPN is proposed in https://github.com/google/automl/pull/580
"""
import itertools
from utils import hparams_config
def bifpn_config(min_level, max_level, weight_method):
"""A dynamic bifpn config that can adapt to different min/max levels."""
p = hparams_config.Config()
p.weight_method = weight_method or 'fastattn'
# Node id starts from the input features and monotonically increase whenever
# a new node is added. Here is an example for level P3 - P7:
# P7 (4) P7" (12)
# P6 (3) P6' (5) P6" (11)
# P5 (2) P5' (6) P5" (10)
# P4 (1) P4' (7) P4" (9)
# P3 (0) P3" (8)
# So output would be like:
# [
# {'feat_level': 6, 'inputs_offsets': [3, 4]}, # for P6'
# {'feat_level': 5, 'inputs_offsets': [2, 5]}, # for P5'
# {'feat_level': 4, 'inputs_offsets': [1, 6]}, # for P4'
# {'feat_level': 3, 'inputs_offsets': [0, 7]}, # for P3"
# {'feat_level': 4, 'inputs_offsets': [1, 7, 8]}, # for P4"
# {'feat_level': 5, 'inputs_offsets': [2, 6, 9]}, # for P5"
# {'feat_level': 6, 'inputs_offsets': [3, 5, 10]}, # for P6"
# {'feat_level': 7, 'inputs_offsets': [4, 11]}, # for P7"
# ]
num_levels = max_level - min_level + 1
node_ids = {min_level + i: [i] for i in range(num_levels)}
level_last_id = lambda level: node_ids[level][-1]
level_all_ids = lambda level: node_ids[level]
id_cnt = itertools.count(num_levels)
p.nodes = []
for i in range(max_level - 1, min_level - 1, -1):
# top-down path.
p.nodes.append({
'feat_level': i,
'inputs_offsets': [level_last_id(i),
level_last_id(i + 1)]
})
node_ids[i].append(next(id_cnt))
for i in range(min_level + 1, max_level + 1):
# bottom-up path.
p.nodes.append({
'feat_level': i,
'inputs_offsets': level_all_ids(i) + [level_last_id(i - 1)]
})
node_ids[i].append(next(id_cnt))
return p
def qufpn_config(min_level, max_level, weight_method=None):
"""A dynamic quad fpn config that can adapt to different min/max levels."""
# It extends the idea of BiFPN, and has four paths:
# (up_down -> bottom_up) + (bottom_up -> up_down).
# See test for an example for level 2 and 7.
p = hparams_config.Config()
p.weight_method = weight_method or 'fastattn'
p.quad_method = 'fastattn'
num_levels = max_level - min_level + 1
node_ids = {min_level + i: [i] for i in range(num_levels)}
level_last_id = lambda level: node_ids[level][-1]
level_all_ids = lambda level: node_ids[level]
level_first_id = lambda level: node_ids[level][0]
id_cnt = itertools.count(num_levels)
p.nodes = []
for i in range(max_level - 1, min_level - 1, -1):
# top-down path 1.
p.nodes.append({
'feat_level': i,
'inputs_offsets': [level_last_id(i),
level_last_id(i + 1)],
'weight_method': p.weight_method
})
node_ids[i].append(next(id_cnt))
node_ids[max_level].append(node_ids[max_level][-1])
for i in range(min_level + 1, max_level):
# bottom-up path 2.
p.nodes.append({
'feat_level': i,
'inputs_offsets': level_all_ids(i) + [level_last_id(i - 1)],
'weight_method': p.weight_method
})
node_ids[i].append(next(id_cnt))
i = max_level
p.nodes.append({
'feat_level': i,
'inputs_offsets': [level_first_id(i)] + [level_last_id(i - 1)],
'weight_method': p.weight_method
})
node_ids[i].append(next(id_cnt))
node_ids[min_level].append(node_ids[min_level][-1])
for i in range(min_level + 1, max_level + 1, 1):
# bottom-up path 3.
p.nodes.append({
'feat_level': i,
'inputs_offsets': [
level_first_id(i),
level_last_id(i - 1) if i != min_level + 1 else level_first_id(i -
1)
],
'weight_method': p.weight_method
})
node_ids[i].append(next(id_cnt))
node_ids[min_level].append(node_ids[min_level][-1])
for i in range(max_level - 1, min_level, -1):
# top-down path 4.
p.nodes.append({
'feat_level':
i,
'inputs_offsets': [node_ids[i][0]] + [node_ids[i][-1]] +
[level_last_id(i + 1)],
'weight_method':
p.weight_method
})
node_ids[i].append(next(id_cnt))
i = min_level
p.nodes.append({
'feat_level': i,
'inputs_offsets': [node_ids[i][0]] + [level_last_id(i + 1)],
'weight_method': p.weight_method
})
node_ids[i].append(next(id_cnt))
node_ids[max_level].append(node_ids[max_level][-1])
for i in range(max_level, min_level - 1, -1):
# quad-add path.
p.nodes.append({
'feat_level': i,
'inputs_offsets': [node_ids[i][2], node_ids[i][4]],
'weight_method': p.quad_method
})
node_ids[i].append(next(id_cnt))
return p
def get_fpn_config(fpn_name, min_level, max_level, weight_method):
"""Get fpn related configuration."""
if not fpn_name:
fpn_name = 'bifpn'
name_to_config = {
'bifpn': bifpn_config(min_level, max_level, weight_method),
'qufpn': qufpn_config(min_level, max_level, weight_method),
# legacy only: to be deprecated.
'bifpn_dyn': bifpn_config(min_level, max_level, weight_method),
}
return name_to_config[fpn_name]
| DeepLearningExamples-master | TensorFlow2/Detection/Efficientdet/model/fpn_configs.py |
# Copyright 2020 Google Research. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Anchor definition."""
import numpy as np
# The minimum score to consider a logit for identifying detections.
MIN_CLASS_SCORE = -5.0
# The score for a dummy detection
_DUMMY_DETECTION_SCORE = -1e5
# The maximum number of (anchor,class) pairs to keep for non-max suppression.
MAX_DETECTION_POINTS = 5000
def diou_nms(dets, iou_thresh=None):
"""DIOU non-maximum suppression.
diou = iou - square of euclidian distance of box centers
/ square of diagonal of smallest enclosing bounding box
Reference: https://arxiv.org/pdf/1911.08287.pdf
Args:
dets: detection with shape (num, 5) and format [x1, y1, x2, y2, score].
iou_thresh: IOU threshold,
Returns:
numpy.array: Retained boxes.
"""
iou_thresh = iou_thresh or 0.5
x1 = dets[:, 0]
y1 = dets[:, 1]
x2 = dets[:, 2]
y2 = dets[:, 3]
scores = dets[:, 4]
areas = (x2 - x1 + 1) * (y2 - y1 + 1)
order = scores.argsort()[::-1]
center_x = (x1 + x2) / 2
center_y = (y1 + y2) / 2
keep = []
while order.size > 0:
i = order[0]
keep.append(i)
xx1 = np.maximum(x1[i], x1[order[1:]])
yy1 = np.maximum(y1[i], y1[order[1:]])
xx2 = np.minimum(x2[i], x2[order[1:]])
yy2 = np.minimum(y2[i], y2[order[1:]])
w = np.maximum(0.0, xx2 - xx1 + 1)
h = np.maximum(0.0, yy2 - yy1 + 1)
intersection = w * h
iou = intersection / (areas[i] + areas[order[1:]] - intersection)
smallest_enclosing_box_x1 = np.minimum(x1[i], x1[order[1:]])
smallest_enclosing_box_x2 = np.maximum(x2[i], x2[order[1:]])
smallest_enclosing_box_y1 = np.minimum(y1[i], y1[order[1:]])
smallest_enclosing_box_y2 = np.maximum(y2[i], y2[order[1:]])
square_of_the_diagonal = (
(smallest_enclosing_box_x2 - smallest_enclosing_box_x1)**2 +
(smallest_enclosing_box_y2 - smallest_enclosing_box_y1)**2)
square_of_center_distance = ((center_x[i] - center_x[order[1:]])**2 +
(center_y[i] - center_y[order[1:]])**2)
# Add 1e-10 for numerical stability.
diou = iou - square_of_center_distance / (square_of_the_diagonal + 1e-10)
inds = np.where(diou <= iou_thresh)[0]
order = order[inds + 1]
return dets[keep]
def hard_nms(dets, iou_thresh=None):
"""The basic hard non-maximum suppression.
Args:
dets: detection with shape (num, 5) and format [x1, y1, x2, y2, score].
iou_thresh: IOU threshold,
Returns:
numpy.array: Retained boxes.
"""
iou_thresh = iou_thresh or 0.5
x1 = dets[:, 0]
y1 = dets[:, 1]
x2 = dets[:, 2]
y2 = dets[:, 3]
scores = dets[:, 4]
areas = (x2 - x1 + 1) * (y2 - y1 + 1)
order = scores.argsort()[::-1]
keep = []
while order.size > 0:
i = order[0]
keep.append(i)
xx1 = np.maximum(x1[i], x1[order[1:]])
yy1 = np.maximum(y1[i], y1[order[1:]])
xx2 = np.minimum(x2[i], x2[order[1:]])
yy2 = np.minimum(y2[i], y2[order[1:]])
w = np.maximum(0.0, xx2 - xx1 + 1)
h = np.maximum(0.0, yy2 - yy1 + 1)
intersection = w * h
overlap = intersection / (areas[i] + areas[order[1:]] - intersection)
inds = np.where(overlap <= iou_thresh)[0]
order = order[inds + 1]
return dets[keep]
def soft_nms(dets, nms_configs):
"""Soft non-maximum suppression.
[1] Soft-NMS -- Improving Object Detection With One Line of Code.
https://arxiv.org/abs/1704.04503
Args:
dets: detection with shape (num, 5) and format [x1, y1, x2, y2, score].
nms_configs: a dict config that may contain the following members
* method: one of {`linear`, `gaussian`, 'hard'}. Use `gaussian` if None.
* iou_thresh (float): IOU threshold, only for `linear`, `hard`.
* sigma: Gaussian parameter, only for method 'gaussian'.
* score_thresh (float): Box score threshold for final boxes.
Returns:
numpy.array: Retained boxes.
"""
method = nms_configs['method']
# Default sigma and iou_thresh are from the original soft-nms paper.
sigma = nms_configs['sigma'] or 0.5
iou_thresh = nms_configs['iou_thresh'] or 0.3
score_thresh = nms_configs['score_thresh'] or 0.001
x1 = np.float32(dets[:, 0])
y1 = np.float32(dets[:, 1])
x2 = np.float32(dets[:, 2])
y2 = np.float32(dets[:, 3])
areas = (x2 - x1 + 1) * (y2 - y1 + 1)
# expand dets with areas, and the second dimension is
# x1, y1, x2, y2, score, area
dets = np.concatenate((dets, areas[:, None]), axis=1)
retained_box = []
while dets.size > 0:
max_idx = np.argmax(dets[:, 4], axis=0)
dets[[0, max_idx], :] = dets[[max_idx, 0], :]
retained_box.append(dets[0, :-1])
xx1 = np.maximum(dets[0, 0], dets[1:, 0])
yy1 = np.maximum(dets[0, 1], dets[1:, 1])
xx2 = np.minimum(dets[0, 2], dets[1:, 2])
yy2 = np.minimum(dets[0, 3], dets[1:, 3])
w = np.maximum(xx2 - xx1 + 1, 0.0)
h = np.maximum(yy2 - yy1 + 1, 0.0)
inter = w * h
iou = inter / (dets[0, 5] + dets[1:, 5] - inter)
if method == 'linear':
weight = np.ones_like(iou)
weight[iou > iou_thresh] -= iou[iou > iou_thresh]
elif method == 'gaussian':
weight = np.exp(-(iou * iou) / sigma)
else: # traditional nms
weight = np.ones_like(iou)
weight[iou > iou_thresh] = 0
dets[1:, 4] *= weight
retained_idx = np.where(dets[1:, 4] >= score_thresh)[0]
dets = dets[retained_idx + 1, :]
return np.vstack(retained_box)
def nms(dets, nms_configs):
"""Non-maximum suppression.
Args:
dets: detection with shape (num, 5) and format [x1, y1, x2, y2, score].
nms_configs: a dict config that may contain parameters.
Returns:
numpy.array: Retained boxes.
"""
nms_configs = nms_configs or {}
method = nms_configs['method']
if method == 'hard' or not method:
return hard_nms(dets, nms_configs['iou_thresh'])
if method == 'diou':
return diou_nms(dets, nms_configs['iou_thresh'])
if method in ('linear', 'gaussian'):
return soft_nms(dets, nms_configs)
raise ValueError('Unknown NMS method: {}'.format(method))
def per_class_nms(boxes, scores, classes, image_id, image_scale, num_classes,
max_boxes_to_draw, nms_configs):
"""Perform per class nms."""
boxes = boxes[:, [1, 0, 3, 2]]
detections = []
for c in range(num_classes):
indices = np.where(classes == c)[0]
if indices.shape[0] == 0:
continue
boxes_cls = boxes[indices, :]
scores_cls = scores[indices]
# Select top-scoring boxes in each class and apply non-maximum suppression
# (nms) for boxes in the same class. The selected boxes from each class are
# then concatenated for the final detection outputs.
all_detections_cls = np.column_stack((boxes_cls, scores_cls))
top_detections_cls = nms(all_detections_cls, nms_configs)
top_detections_cls = np.column_stack(
(np.repeat(image_id, len(top_detections_cls)),
top_detections_cls,
np.repeat(c + 1, len(top_detections_cls)))
)
detections.append(top_detections_cls)
def _generate_dummy_detections(number):
detections_dummy = np.zeros((number, 7), dtype=np.float32)
detections_dummy[:, 0] = image_id[0]
detections_dummy[:, 5] = _DUMMY_DETECTION_SCORE
return detections_dummy
if detections:
detections = np.vstack(detections)
# take final 100 detections
indices = np.argsort(-detections[:, -2])
detections = np.array(
detections[indices[0:max_boxes_to_draw]], dtype=np.float32)
# Add dummy detections to fill up to 100 detections
n = max(max_boxes_to_draw - len(detections), 0)
detections_dummy = _generate_dummy_detections(n)
detections = np.vstack([detections, detections_dummy])
else:
detections = _generate_dummy_detections(max_boxes_to_draw)
detections[:, 1:5] *= image_scale
return detections | DeepLearningExamples-master | TensorFlow2/Detection/Efficientdet/model/nms_np.py |
# Copyright 2020 Google Research. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Postprocessing for anchor-based detection."""
import functools
from typing import List, Tuple
from absl import logging
import tensorflow as tf
from model import nms_np
from utils import model_utils
from model import anchors
T = tf.Tensor # a shortcut for typing check.
CLASS_OFFSET = 1
def to_list(inputs):
if isinstance(inputs, dict):
return [inputs[k] for k in sorted(inputs.keys())]
if isinstance(inputs, list):
return inputs
raise ValueError('Unrecognized inputs : {}'.format(inputs))
def batch_map_fn(map_fn, inputs, *args):
"""Apply map_fn at batch dimension."""
if isinstance(inputs[0], (list, tuple)):
batch_size = len(inputs[0])
else:
batch_size = inputs[0].shape.as_list()[0]
if not batch_size:
# handle dynamic batch size: tf.vectorized_map is faster than tf.map_fn.
return tf.vectorized_map(map_fn, inputs, *args)
outputs = []
for i in range(batch_size):
outputs.append(map_fn([x[i] for x in inputs]))
return [tf.stack(y) for y in zip(*outputs)]
def clip_boxes(boxes: T, image_size: int) -> T:
"""Clip boxes to fit the image size."""
image_size = model_utils.parse_image_size(image_size) * 2
return tf.clip_by_value(boxes, [0], image_size)
def merge_class_box_level_outputs(params, cls_outputs: List[T],
box_outputs: List[T]) -> Tuple[T, T]:
"""Concatenates class and box of all levels into one tensor."""
cls_outputs_all, box_outputs_all = [], []
batch_size = tf.shape(cls_outputs[0])[0]
for level in range(0, params['max_level'] - params['min_level'] + 1):
if params['data_format'] == 'channels_first':
cls_outputs[level] = tf.transpose(cls_outputs[level], [0, 2, 3, 1])
box_outputs[level] = tf.transpose(box_outputs[level], [0, 2, 3, 1])
cls_outputs_all.append(
tf.reshape(cls_outputs[level], [batch_size, -1, params['num_classes']]))
box_outputs_all.append(tf.reshape(box_outputs[level], [batch_size, -1, 4]))
return tf.concat(cls_outputs_all, 1), tf.concat(box_outputs_all, 1)
def topk_class_boxes(params, cls_outputs: T,
box_outputs: T) -> Tuple[T, T, T, T]:
"""Pick the topk class and box outputs."""
batch_size = tf.shape(cls_outputs)[0]
num_classes = params['num_classes']
max_nms_inputs = params['nms_configs'].get('max_nms_inputs', 0)
if max_nms_inputs > 0:
# Prune anchors and detections to only keep max_nms_inputs.
# Due to some issues, top_k is currently slow in graph model.
logging.info('use max_nms_inputs for pre-nms topk.')
cls_outputs_reshape = tf.reshape(cls_outputs, [batch_size, -1])
_, cls_topk_indices = tf.math.top_k(
cls_outputs_reshape, k=max_nms_inputs, sorted=False)
indices = cls_topk_indices // num_classes
classes = cls_topk_indices % num_classes
cls_indices = tf.stack([indices, classes], axis=2)
cls_outputs_topk = tf.gather_nd(cls_outputs, cls_indices, batch_dims=1)
box_outputs_topk = tf.gather_nd(
box_outputs, tf.expand_dims(indices, 2), batch_dims=1)
else:
logging.info('use max_reduce for pre-nms topk.')
# Keep all anchors, but for each anchor, just keep the max probablity for
# each class.
cls_outputs_idx = tf.math.argmax(cls_outputs, axis=-1, output_type=tf.int32)
num_anchors = tf.shape(cls_outputs)[1]
classes = cls_outputs_idx
indices = tf.tile(
tf.expand_dims(tf.range(num_anchors), axis=0), [batch_size, 1])
cls_outputs_topk = tf.reduce_max(cls_outputs, -1)
box_outputs_topk = box_outputs
return cls_outputs_topk, box_outputs_topk, classes, indices
def pre_nms(params, cls_outputs, box_outputs, topk=True):
"""Detection post processing before nms.
It takes the multi-level class and box predictions from network, merge them
into unified tensors, and compute boxes, scores, and classes.
Args:
params: a dict of parameters.
cls_outputs: a list of tensors for classes, each tensor denotes a level of
logits with shape [N, H, W, num_class * num_anchors].
box_outputs: a list of tensors for boxes, each tensor ddenotes a level of
boxes with shape [N, H, W, 4 * num_anchors].
topk: if True, select topk before nms (mainly to speed up nms).
Returns:
A tuple of (boxes, scores, classes).
"""
# get boxes by apply bounding box regression to anchors.
eval_anchors = anchors.Anchors(params['min_level'], params['max_level'],
params['num_scales'], params['aspect_ratios'],
params['anchor_scale'], params['image_size'])
cls_outputs, box_outputs = merge_class_box_level_outputs(
params, cls_outputs, box_outputs)
if topk:
# select topK purely based on scores before NMS, in order to speed up nms.
cls_outputs, box_outputs, classes, indices = topk_class_boxes(
params, cls_outputs, box_outputs)
anchor_boxes = tf.gather(eval_anchors.boxes, indices)
else:
anchor_boxes = eval_anchors.boxes
classes = None
boxes = anchors.decode_box_outputs(box_outputs, anchor_boxes)
# convert logits to scores.
scores = tf.math.sigmoid(cls_outputs)
return boxes, scores, classes
def nms(params, boxes: T, scores: T, classes: T,
padded: bool) -> Tuple[T, T, T, T]:
"""Non-maximum suppression.
Args:
params: a dict of parameters.
boxes: a tensor with shape [N, 4], where N is the number of boxes. Box
format is [y_min, x_min, y_max, x_max].
scores: a tensor with shape [N].
classes: a tensor with shape [N].
padded: a bool vallue indicating whether the results are padded.
Returns:
A tuple (boxes, scores, classes, valid_lens), where valid_lens is a scalar
denoting the valid length of boxes/scores/classes outputs.
"""
nms_configs = params['nms_configs']
method = nms_configs['method']
max_output_size = nms_configs['max_output_size']
if method == 'hard' or not method:
# hard nms.
sigma = 0.0
iou_thresh = nms_configs['iou_thresh'] or 0.5
score_thresh = nms_configs['score_thresh'] or float('-inf')
elif method == 'gaussian':
sigma = nms_configs['sigma'] or 0.5
iou_thresh = 1.0
score_thresh = nms_configs['score_thresh'] or 0.001
else:
raise ValueError('Inference has invalid nms method {}'.format(method))
# TF API's sigma is twice as the paper's value, so here we divide it by 2:
# https://github.com/tensorflow/tensorflow/issues/40253.
nms_top_idx, nms_scores, nms_valid_lens = tf.raw_ops.NonMaxSuppressionV5(
boxes=boxes,
scores=scores,
max_output_size=max_output_size,
iou_threshold=iou_thresh,
score_threshold=score_thresh,
soft_nms_sigma=(sigma / 2),
pad_to_max_output_size=padded)
nms_boxes = tf.gather(boxes, nms_top_idx)
nms_classes = tf.cast(
tf.gather(classes, nms_top_idx) + CLASS_OFFSET, tf.float32)
return nms_boxes, nms_scores, nms_classes, nms_valid_lens
def postprocess_combined(params, cls_outputs, box_outputs, image_scales=None):
"""Post processing with combined NMS.
Leverage the tf combined NMS. It is fast on TensorRT, but slow on CPU/GPU.
Args:
params: a dict of parameters.
cls_outputs: a list of tensors for classes, each tensor denotes a level of
logits with shape [N, H, W, num_class * num_anchors].
box_outputs: a list of tensors for boxes, each tensor ddenotes a level of
boxes with shape [N, H, W, 4 * num_anchors]. Each box format is [y_min,
x_min, y_max, x_man].
image_scales: scaling factor or the final image and bounding boxes.
Returns:
A tuple of batch level (boxes, scores, classess, valid_len) after nms.
"""
cls_outputs = to_list(cls_outputs)
box_outputs = to_list(box_outputs)
# Don't filter any outputs because combine_nms need the raw information.
boxes, scores, _ = pre_nms(params, cls_outputs, box_outputs, topk=False)
max_output_size = params['nms_configs']['max_output_size']
score_thresh = params['nms_configs']['score_thresh'] or float('-inf')
nms_boxes, nms_scores, nms_classes, nms_valid_len = (
tf.image.combined_non_max_suppression(
tf.expand_dims(boxes, axis=2),
scores,
max_output_size,
max_output_size,
score_threshold=score_thresh,
clip_boxes=False))
nms_classes += CLASS_OFFSET
nms_boxes = clip_boxes(nms_boxes, params['image_size'])
if image_scales is not None:
scales = tf.expand_dims(tf.expand_dims(image_scales, -1), -1)
nms_boxes = nms_boxes * tf.cast(scales, nms_boxes.dtype)
return nms_boxes, nms_scores, nms_classes, nms_valid_len
def postprocess_global(params, cls_outputs, box_outputs, image_scales=None):
"""Post processing with global NMS.
A fast but less accurate version of NMS. The idea is to treat the scores for
different classes in a unified way, and perform NMS globally for all classes.
Args:
params: a dict of parameters.
cls_outputs: a list of tensors for classes, each tensor denotes a level of
logits with shape [N, H, W, num_class * num_anchors].
box_outputs: a list of tensors for boxes, each tensor ddenotes a level of
boxes with shape [N, H, W, 4 * num_anchors]. Each box format is [y_min,
x_min, y_max, x_man].
image_scales: scaling factor or the final image and bounding boxes.
Returns:
A tuple of batch level (boxes, scores, classess, valid_len) after nms.
"""
cls_outputs = to_list(cls_outputs)
box_outputs = to_list(box_outputs)
boxes, scores, classes = pre_nms(params, cls_outputs, box_outputs)
def single_batch_fn(element):
return nms(params, element[0], element[1], element[2], True)
nms_boxes, nms_scores, nms_classes, nms_valid_len = batch_map_fn(
single_batch_fn, [boxes, scores, classes])
nms_boxes = clip_boxes(nms_boxes, params['image_size'])
if image_scales is not None:
scales = tf.expand_dims(tf.expand_dims(image_scales, -1), -1)
nms_boxes = nms_boxes * tf.cast(scales, nms_boxes.dtype)
return nms_boxes, nms_scores, nms_classes, nms_valid_len
def per_class_nms(params, boxes, scores, classes, image_scales=None):
"""Per-class nms, a utility for postprocess_per_class.
Args:
params: a dict of parameters.
boxes: A tensor with shape [N, K, 4], where N is batch_size, K is num_boxes.
Box format is [y_min, x_min, y_max, x_max].
scores: A tensor with shape [N, K].
classes: A tensor with shape [N, K].
image_scales: scaling factor or the final image and bounding boxes.
Returns:
A tuple of batch level (boxes, scores, classess, valid_len) after nms.
"""
def single_batch_fn(element):
"""A mapping function for a single batch."""
boxes_i, scores_i, classes_i = element[0], element[1], element[2]
nms_boxes_cls, nms_scores_cls, nms_classes_cls = [], [], []
nms_valid_len_cls = []
for cid in range(params['num_classes']):
indices = tf.where(tf.equal(classes_i, cid))
if indices.shape[0] == 0:
continue
classes_cls = tf.gather_nd(classes_i, indices)
boxes_cls = tf.gather_nd(boxes_i, indices)
scores_cls = tf.gather_nd(scores_i, indices)
nms_boxes, nms_scores, nms_classes, nms_valid_len = nms(
params, boxes_cls, scores_cls, classes_cls, False)
nms_boxes_cls.append(nms_boxes)
nms_scores_cls.append(nms_scores)
nms_classes_cls.append(nms_classes)
nms_valid_len_cls.append(nms_valid_len)
# Pad zeros and select topk.
max_output_size = params['nms_configs'].get('max_output_size', 100)
nms_boxes_cls = tf.pad(
tf.concat(nms_boxes_cls, 0), [[0, max_output_size], [0, 0]])
nms_scores_cls = tf.pad(
tf.concat(nms_scores_cls, 0), [[0, max_output_size]])
nms_classes_cls = tf.pad(
tf.concat(nms_classes_cls, 0), [[0, max_output_size]])
nms_valid_len_cls = tf.stack(nms_valid_len_cls)
_, indices = tf.math.top_k(nms_scores_cls, k=max_output_size, sorted=True)
return tuple((
tf.gather(nms_boxes_cls, indices),
tf.gather(nms_scores_cls, indices),
tf.gather(nms_classes_cls, indices),
tf.minimum(max_output_size, tf.reduce_sum(nms_valid_len_cls))))
# end of single_batch_fn
nms_boxes, nms_scores, nms_classes, nms_valid_len = batch_map_fn(
single_batch_fn, [boxes, scores, classes])
if image_scales is not None:
scales = tf.expand_dims(tf.expand_dims(image_scales, -1), -1)
nms_boxes = nms_boxes * tf.cast(scales, nms_boxes.dtype)
return nms_boxes, nms_scores, nms_classes, nms_valid_len
def postprocess_per_class(params, cls_outputs, box_outputs, image_scales=None):
"""Post processing with per class NMS.
An accurate but relatively slow version of NMS. The idea is to perform NMS for
each class, and then combine them.
Args:
params: a dict of parameters.
cls_outputs: a list of tensors for classes, each tensor denotes a level of
logits with shape [N, H, W, num_class * num_anchors].
box_outputs: a list of tensors for boxes, each tensor ddenotes a level of
boxes with shape [N, H, W, 4 * num_anchors]. Each box format is [y_min,
x_min, y_max, x_man].
image_scales: scaling factor or the final image and bounding boxes.
Returns:
A tuple of batch level (boxes, scores, classess, valid_len) after nms.
"""
cls_outputs = to_list(cls_outputs)
box_outputs = to_list(box_outputs)
boxes, scores, classes = pre_nms(params, cls_outputs, box_outputs)
return per_class_nms(params, boxes, scores, classes, image_scales)
def generate_detections(params,
cls_outputs,
box_outputs,
image_scales,
image_ids,
flip=False):
"""A legacy interface for generating [id, x, y, w, h, score, class]."""
_, width = model_utils.parse_image_size(params['image_size'])
original_image_widths = tf.expand_dims(image_scales, -1) * width
if params['nms_configs'].get('pyfunc', True):
# numpy based soft-nms gives better accuracy than the tensorflow builtin
# the reason why is unknown
detections_bs = []
boxes, scores, classes = pre_nms(params, cls_outputs, box_outputs)
for index in range(boxes.shape[0]):
nms_configs = params['nms_configs']
detections = tf.numpy_function(
functools.partial(nms_np.per_class_nms, nms_configs=nms_configs), [
boxes[index],
scores[index],
classes[index],
tf.slice(image_ids, [index], [1]),
tf.slice(image_scales, [index], [1]),
params['num_classes'],
nms_configs['max_output_size'],
], tf.float32)
if flip:
detections = tf.stack([
detections[:, 0],
# the mirrored location of the left edge is the image width
# minus the position of the right edge
original_image_widths[index] - detections[:, 3],
detections[:, 2],
# the mirrored location of the right edge is the image width
# minus the position of the left edge
original_image_widths[index] - detections[:, 1],
detections[:, 4],
detections[:, 5],
detections[:, 6],
], axis=-1)
detections_bs.append(detections)
return tf.stack(detections_bs, axis=0, name='detnections')
nms_boxes_bs, nms_scores_bs, nms_classes_bs, _ = postprocess_per_class(
params, cls_outputs, box_outputs, image_scales)
image_ids_bs = tf.cast(tf.expand_dims(image_ids, -1), nms_scores_bs.dtype)
if flip:
detections_bs = [
image_ids_bs * tf.ones_like(nms_scores_bs),
# the mirrored location of the left edge is the image width
# minus the position of the right edge
original_image_widths - nms_boxes_bs[:, :, 3],
nms_boxes_bs[:, :, 0],
# the mirrored location of the right edge is the image width
# minus the position of the left edge
original_image_widths - nms_boxes_bs[:, :, 1],
nms_boxes_bs[:, :, 2],
nms_scores_bs,
nms_classes_bs,
]
else:
detections_bs = [
image_ids_bs * tf.ones_like(nms_scores_bs),
nms_boxes_bs[:, :, 1],
nms_boxes_bs[:, :, 0],
nms_boxes_bs[:, :, 3],
nms_boxes_bs[:, :, 2],
nms_scores_bs,
nms_classes_bs,
]
return tf.stack(detections_bs, axis=-1, name='detnections')
def transform_detections(detections):
"""A transforms detections in [id, x1, y1, x2, y2, score, class] form to [id, x, y, w, h, score, class]."""
return tf.stack([
detections[:, :, 0],
detections[:, :, 1],
detections[:, :, 2],
detections[:, :, 3] - detections[:, :, 1],
detections[:, :, 4] - detections[:, :, 2],
detections[:, :, 5],
detections[:, :, 6],
],
axis=-1)
| DeepLearningExamples-master | TensorFlow2/Detection/Efficientdet/model/postprocess.py |
# Copyright 2020 Google Research. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Learning rate related utils."""
import math
from absl import logging
from typing import Any, Mapping
import tensorflow as tf
def update_learning_rate_schedule_parameters(params):
"""Updates params that are related to the learning rate schedule."""
steps_per_epoch = params['steps_per_epoch']
params['lr_warmup_step'] = int(params['lr_warmup_epoch'] * steps_per_epoch)
params['total_steps'] = int(params['num_epochs'] * steps_per_epoch)
@tf.keras.utils.register_keras_serializable(package='Custom')
class CosineLrSchedule(tf.keras.optimizers.schedules.LearningRateSchedule):
"""Cosine learning rate schedule."""
def __init__(self, base_lr: float, lr_warmup_init: float,
lr_warmup_step: int, total_steps: int):
"""Build a CosineLrSchedule.
Args:
base_lr: `float`, The initial learning rate.
lr_warmup_init: `float`, The warm up learning rate.
lr_warmup_step: `int`, The warm up step.
total_steps: `int`, Total train steps.
"""
super(CosineLrSchedule, self).__init__()
logging.info('LR schedule method: cosine')
self.base_lr = base_lr
self.lr_warmup_init = lr_warmup_init
self.lr_warmup_step = lr_warmup_step
self.decay_steps = tf.cast(total_steps - lr_warmup_step, tf.float32)
def __call__(self, step):
linear_warmup = (
self.lr_warmup_init +
(tf.cast(step, dtype=tf.float32) / self.lr_warmup_step *
(self.base_lr - self.lr_warmup_init)))
cosine_lr = 0.5 * self.base_lr * (
1 + tf.cos(math.pi * (tf.cast(step, tf.float32) - self.lr_warmup_step) / self.decay_steps))
return tf.where(step < self.lr_warmup_step, linear_warmup, cosine_lr)
def get_config(self) -> Mapping[str, Any]:
return {
"base_lr": self.base_lr,
"lr_warmup_init": self.lr_warmup_init,
"lr_warmup_step": self.lr_warmup_step,
}
def learning_rate_schedule(params):
"""Learning rate schedule based on global step."""
update_learning_rate_schedule_parameters(params)
lr_decay_method = params['lr_decay_method']
if lr_decay_method == 'cosine':
return CosineLrSchedule(params['learning_rate'],
params['lr_warmup_init'], params['lr_warmup_step'],
params['total_steps'])
raise ValueError('unknown lr_decay_method: {}'.format(lr_decay_method))
| DeepLearningExamples-master | TensorFlow2/Detection/Efficientdet/model/learning_rate.py |
DeepLearningExamples-master | TensorFlow2/Detection/Efficientdet/model/__init__.py |
|
# Copyright 2020 Google Research. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Callback related utils."""
from concurrent import futures
import os
from mpi4py import MPI
import time
import numpy as np
import tensorflow as tf
import horovod.tensorflow.keras.callbacks as hvd_callbacks
from tensorflow_addons.optimizers import MovingAverage
from typeguard import typechecked
from typing import Any, List, MutableMapping, Text
from model import inference, optimizer_builder
from utils import model_utils
from model import efficientdet_keras, coco_metric, label_util, postprocess
from utils.horovod_utils import get_world_size, is_main_process
class DisplayCallback(tf.keras.callbacks.Callback):
"""Display inference result callback."""
def __init__(self, sample_image, output_dir, update_freq=1):
super().__init__()
image_file = tf.io.read_file(sample_image)
self.sample_image = tf.expand_dims(
tf.image.decode_jpeg(image_file, channels=3), axis=0)
self.executor = futures.ThreadPoolExecutor(max_workers=1)
self.update_freq = update_freq
self.output_dir = output_dir
def set_model(self, model: tf.keras.Model):
self.train_model = model
with tf.device('/cpu:0'):
self.model = efficientdet_keras.EfficientDetModel(config=model.config)
height, width = model_utils.parse_image_size(model.config.image_size)
self.model.build((1, height, width, 3))
self.file_writer = tf.summary.create_file_writer(self.output_dir)
self.min_score_thresh = self.model.config.nms_configs['score_thresh'] or 0.4
self.max_boxes_to_draw = (
self.model.config.nms_configs['max_output_size'] or 100)
def on_epoch_end(self, epoch, logs=None):
if epoch % self.update_freq == 0:
self.executor.submit(self.draw_inference, epoch)
@tf.function
def inference(self):
return self.model(self.sample_image, training=False)
def draw_inference(self, epoch):
self.model.set_weights(self.train_model.get_weights())
boxes, scores, classes, valid_len = self.inference()
length = valid_len[0]
image = inference.visualize_image(
self.sample_image[0],
boxes[0].numpy()[:length],
classes[0].numpy().astype(np.int)[:length],
scores[0].numpy()[:length],
label_map=self.model.config.label_map,
min_score_thresh=self.min_score_thresh,
max_boxes_to_draw=self.max_boxes_to_draw)
with self.file_writer.as_default():
tf.summary.image('Test image', tf.expand_dims(image, axis=0), step=epoch)
class BatchTimestamp(object):
"""A structure to store batch time stamp."""
def __init__(self, batch_index, timestamp):
self.batch_index = batch_index
self.timestamp = timestamp
def __repr__(self):
return "'BatchTimestamp<batch_index: {}, timestamp: {}>'".format(
self.batch_index, self.timestamp)
class TimeHistory(tf.keras.callbacks.Callback):
"""Callback for Keras models."""
def __init__(self, batch_size, logger, log_steps=1, logdir=None):
"""Callback for logging performance.
Args:
batch_size: Total batch size.
log_steps: Interval of steps between logging of batch level stats.
logdir: Optional directory to write TensorBoard summaries.
"""
# TODO(wcromar): remove this parameter and rely on `logs` parameter of
# on_train_batch_end()
self.batch_size = batch_size
super(TimeHistory, self).__init__()
self.log_steps = log_steps
self.last_log_step = 0
self.steps_before_epoch = 0
self.steps_in_epoch = 0
self.start_time = None
self.logger = logger
self.step_per_epoch = 0
if logdir:
self.summary_writer = tf.summary.create_file_writer(logdir)
else:
self.summary_writer = None
# Logs start of step 1 then end of each step based on log_steps interval.
self.timestamp_log = []
# Records the time each epoch takes to run from start to finish of epoch.
self.epoch_runtime_log = []
self.latency = []
self.throughput = []
@property
def global_steps(self):
"""The current 1-indexed global step."""
return self.steps_before_epoch + self.steps_in_epoch
@property
def average_steps_per_second(self):
"""The average training steps per second across all epochs."""
return (self.global_steps - self.step_per_epoch) / sum(self.epoch_runtime_log[1:])
@property
def average_examples_per_second(self):
"""The average number of training examples per second across all epochs."""
# return self.average_steps_per_second * self.batch_size
ind = int(0.1*len(self.throughput))
return sum(self.throughput[ind:])/(len(self.throughput[ind:]))
@property
def average_time_per_iteration(self):
"""The average time per iteration in seconds across all epochs."""
ind = int(0.1*len(self.latency))
return sum(self.latency[ind:])/(len(self.latency[ind:]))
def on_train_end(self, logs=None):
self.train_finish_time = time.time()
if self.summary_writer:
self.summary_writer.flush()
def on_epoch_begin(self, epoch, logs=None):
self.epoch_start = time.time()
def on_batch_begin(self, batch, logs=None):
if not self.start_time:
self.start_time = time.time()
# Record the timestamp of the first global step
if not self.timestamp_log:
self.timestamp_log.append(BatchTimestamp(self.global_steps,
self.start_time))
def on_batch_end(self, batch, logs=None):
"""Records elapse time of the batch and calculates examples per second."""
self.steps_in_epoch = batch + 1
steps_since_last_log = self.global_steps - self.last_log_step
if steps_since_last_log >= self.log_steps:
now = time.time()
elapsed_time = now - self.start_time
steps_per_second = steps_since_last_log / elapsed_time
examples_per_second = steps_per_second * self.batch_size
self.timestamp_log.append(BatchTimestamp(self.global_steps, now))
elapsed_time_str='{:.2f} seconds'.format(elapsed_time)
self.logger.log(step='PARAMETER', data={'Latency': elapsed_time_str, 'fps': examples_per_second, 'steps': (self.last_log_step, self.global_steps)})
self.logger.flush()
if self.summary_writer:
with self.summary_writer.as_default():
tf.summary.scalar('global_step/sec', steps_per_second,
self.global_steps)
tf.summary.scalar('examples/sec', examples_per_second,
self.global_steps)
self.last_log_step = self.global_steps
self.start_time = None
self.latency.append(elapsed_time)
self.throughput.append(examples_per_second)
def on_epoch_end(self, epoch, logs=None):
if epoch == 0:
self.step_per_epoch = self.steps_in_epoch
epoch_run_time = time.time() - self.epoch_start
self.epoch_runtime_log.append(epoch_run_time)
self.steps_before_epoch += self.steps_in_epoch
self.steps_in_epoch = 0
class LRTensorBoard(tf.keras.callbacks.Callback):
def __init__(self, log_dir, **kwargs):
super().__init__(**kwargs)
self.summary_writer = tf.summary.create_file_writer(log_dir)
self.steps_before_epoch = 0
self.steps_in_epoch = 0
@property
def global_steps(self):
"""The current 1-indexed global step."""
return self.steps_before_epoch + self.steps_in_epoch
def on_batch_end(self, batch, logs=None):
self.steps_in_epoch = batch + 1
lr = self.model.optimizer.lr(self.global_steps)
with self.summary_writer.as_default():
summary = tf.summary.scalar('learning_rate', lr, self.global_steps)
def on_epoch_end(self, epoch, logs=None):
self.steps_before_epoch += self.steps_in_epoch
self.steps_in_epoch = 0
def on_train_end(self, logs=None):
self.summary_writer.flush()
class LoggingCallback(tf.keras.callbacks.Callback):
def on_train_batch_end(self, batch, logs=None):
print("Iter: {}".format(batch))
for var in self.model.variables:
# if 'dense' in var.name:
# continue
print("Var: {} {}".format(var.name, var.value))
try:
slot = self.model.optimizer.get_slot(var, "average")
print("Avg: {}".format(slot))
except KeyError as e:
print("{} does not have ema average slot".format(var.name))
def fetch_optimizer(model,opt_type) -> tf.keras.optimizers.Optimizer:
"""Get the base optimizer used by the current model."""
# this is the case where our target optimizer is not wrapped by any other optimizer(s)
if isinstance(model.optimizer,opt_type):
return model.optimizer
# Dive into nested optimizer object until we reach the target opt
opt = model.optimizer
while hasattr(opt, '_optimizer'):
opt = opt._optimizer
if isinstance(opt,opt_type):
return opt
raise TypeError(f'Failed to find {opt_type} in the nested optimizer object')
class MovingAverageCallback(tf.keras.callbacks.Callback):
"""A Callback to be used with a `MovingAverage` optimizer.
Applies moving average weights to the model during validation time to test
and predict on the averaged weights rather than the current model weights.
Once training is complete, the model weights will be overwritten with the
averaged weights (by default).
Attributes:
overwrite_weights_on_train_end: Whether to overwrite the current model
weights with the averaged weights from the moving average optimizer.
**kwargs: Any additional callback arguments.
"""
def __init__(self,
overwrite_weights_on_train_end: bool = False,
**kwargs):
super(MovingAverageCallback, self).__init__(**kwargs)
self.overwrite_weights_on_train_end = overwrite_weights_on_train_end
self.ema_opt = None
def set_model(self, model: tf.keras.Model):
super(MovingAverageCallback, self).set_model(model)
self.ema_opt = fetch_optimizer(model, MovingAverage)
self.ema_opt.shadow_copy(self.model.weights)
def on_test_begin(self, logs: MutableMapping[Text, Any] = None):
self.ema_opt.swap_weights()
def on_test_end(self, logs: MutableMapping[Text, Any] = None):
self.ema_opt.swap_weights()
def on_train_end(self, logs: MutableMapping[Text, Any] = None):
if self.overwrite_weights_on_train_end:
self.ema_opt.assign_average_vars(self.model.variables)
class AverageModelCheckpoint(tf.keras.callbacks.ModelCheckpoint):
"""Saves and, optionally, assigns the averaged weights.
Taken from tfa.callbacks.AverageModelCheckpoint [original class].
NOTE1: The original class has a type check decorator, which prevents passing non-string save_freq (fix: removed)
NOTE2: The original class may not properly handle layered (nested) optimizer objects (fix: use fetch_optimizer)
Attributes:
update_weights: If True, assign the moving average weights
to the model, and save them. If False, keep the old
non-averaged weights, but the saved model uses the
average weights.
See `tf.keras.callbacks.ModelCheckpoint` for the other args.
"""
def __init__(
self,
update_weights: bool,
filepath: str,
monitor: str = 'val_loss',
verbose: int = 0,
save_best_only: bool = False,
save_weights_only: bool = False,
mode: str = 'auto',
save_freq: str = 'epoch',
**kwargs):
super().__init__(
filepath,
monitor,
verbose,
save_best_only,
save_weights_only,
mode,
save_freq,
**kwargs)
self.update_weights = update_weights
self.ema_opt = None
def set_model(self, model):
self.ema_opt = fetch_optimizer(model, MovingAverage)
return super().set_model(model)
def _save_model(self, epoch, batch, logs):
assert isinstance(self.ema_opt, MovingAverage)
if self.update_weights:
self.ema_opt.assign_average_vars(self.model.variables)
return super()._save_model(epoch, batch, logs)
else:
# Note: `model.get_weights()` gives us the weights (non-ref)
# whereas `model.variables` returns references to the variables.
non_avg_weights = self.model.get_weights()
self.ema_opt.assign_average_vars(self.model.variables)
# result is currently None, since `super._save_model` doesn't
# return anything, but this may change in the future.
result = super()._save_model(epoch, batch, logs)
self.model.set_weights(non_avg_weights)
return result
class StopEarlyCallback(tf.keras.callbacks.Callback):
def __init__(self, num_epochs, stop_75, **kwargs):
super(StopEarlyCallback, self).__init__(**kwargs)
self.num_epochs = num_epochs
self.stop_75 = stop_75
def on_epoch_end(self, epoch, logs=None):
if ((epoch + 1) > (0.75 * self.num_epochs) and self.stop_75) or ((epoch + 1) == 300):
self.model.stop_training = True
class COCOEvalCallback(tf.keras.callbacks.Callback):
def __init__(self, eval_dataset, eval_freq, start_eval_epoch, eval_params, logger, **kwargs):
super(COCOEvalCallback, self).__init__(**kwargs)
self.dataset = eval_dataset
self.eval_freq = eval_freq
self.start_eval_epoch = start_eval_epoch
self.eval_params = eval_params
self.ema_opt = None
self.logger = logger
label_map = label_util.get_label_map(eval_params['label_map'])
self.evaluator = coco_metric.EvaluationMetric(
filename=eval_params['val_json_file'], label_map=label_map)
self.pbar = tf.keras.utils.Progbar(eval_params['num_samples'])
def set_model(self, model):
self.ema_opt = fetch_optimizer(model, MovingAverage)
return super().set_model(model)
@tf.function
def eval_model_fn(self, images, labels):
cls_outputs, box_outputs = self.model(images, training=False)
detections = postprocess.generate_detections(self.eval_params, cls_outputs, box_outputs,
labels['image_scales'],
labels['source_ids'])
tf.numpy_function(self.evaluator.update_state,
[labels['groundtruth_data'],
postprocess.transform_detections(detections)], [])
def evaluate(self, epoch):
if self.eval_params['moving_average_decay'] > 0:
self.ema_opt.swap_weights() # get ema weights
self.evaluator.reset_states()
# evaluate all images.
for i, (images, labels) in enumerate(self.dataset):
self.eval_model_fn(images, labels)
if is_main_process():
self.pbar.update(i)
# gather detections from all ranks
self.evaluator.gather()
# compute the final eval results.
if is_main_process():
metrics = self.evaluator.result()
metric_dict = {}
for i, name in enumerate(self.evaluator.metric_names):
metric_dict[name] = metrics[i]
# csv format
csv_metrics = ['AP','AP50','AP75','APs','APm','APl']
csv_format = ",".join([str(epoch+1)] + [str(round(metric_dict[key] * 100, 2)) for key in csv_metrics])
print(metric_dict, "csv format:", csv_format)
self.logger.log(step=(), data={'epoch': epoch+1,
'validation_accuracy_mAP': round(metric_dict['AP'] * 100, 2)})
if self.eval_params['moving_average_decay'] > 0:
self.ema_opt.swap_weights() # get base weights
MPI.COMM_WORLD.Barrier()
def on_epoch_end(self, epoch, logs=None):
if (epoch + 1) >= self.start_eval_epoch and (epoch + 1) % self.eval_freq == 0:
self.evaluate(epoch)
def get_callbacks(
params, training_mode, eval_params, eval_dataset, logger,
time_history=True, log_steps=1, lr_tb=True, benchmark=False
):
"""Get callbacks for given params."""
callbacks = []
if is_main_process():
if benchmark == False:
tb_callback = tf.keras.callbacks.TensorBoard(
log_dir=params['model_dir'], profile_batch=0, histogram_freq = 1)
callbacks.append(tb_callback)
if params['moving_average_decay']:
emackpt_callback = AverageModelCheckpoint(
filepath=os.path.join(params['model_dir'], 'ema_weights', 'emackpt-{epoch:02d}'),
update_weights=False,
amp=params['mixed_precision'],
verbose=1,
save_freq='epoch',
save_weights_only=True,
period=params['checkpoint_period'])
callbacks.append(emackpt_callback)
ckpt_callback = tf.keras.callbacks.ModelCheckpoint(
os.path.join(params['model_dir'], 'ckpt'),
verbose=1,
save_freq='epoch',
save_weights_only=True,
period=params['checkpoint_period'])
callbacks.append(ckpt_callback)
if time_history:
time_callback = TimeHistory(params['batch_size'] * get_world_size(),
logger=logger,
logdir=params['model_dir'],
log_steps=log_steps)
callbacks.append(time_callback)
# log LR in tensorboard
if lr_tb == True and benchmark == False:
callbacks.append(LRTensorBoard(log_dir=params['model_dir']))
hvd_callback = hvd_callbacks.BroadcastGlobalVariablesCallback(0)
callbacks.append(hvd_callback)
# for large batch sizes training schedule of 350/400 epochs gives better mAP
# but the best mAP is generally reached after 75% of the training schedule.
# So we can stop training at that point or continue to train until 300 epochs
stop_75 = False if 'eval' in training_mode or '300' in training_mode else True
early_stopping = StopEarlyCallback(params['num_epochs'], stop_75=stop_75)
callbacks.append(early_stopping)
if 'eval' in training_mode:
cocoeval = COCOEvalCallback(eval_dataset,
eval_freq=params['checkpoint_period'],
start_eval_epoch=200,
eval_params=eval_params,
logger=logger)
callbacks.append(cocoeval)
if params['moving_average_decay']:
callbacks.append(MovingAverageCallback())
if params.get('sample_image', None):
display_callback = DisplayCallback(
params.get('sample_image', None),
os.path.join(params['model_dir'], 'train'))
callbacks.append(display_callback)
return callbacks
| DeepLearningExamples-master | TensorFlow2/Detection/Efficientdet/model/callback_builder.py |
# Copyright 2020 Google Research. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras implementation of efficientdet."""
import functools
from absl import logging
import numpy as np
import tensorflow as tf
from efficientnet import efficientnet_model
from model import dataloader
from model import normalization_builder
from model import activation_builder
from model import fpn_configs
from model import postprocess
from utils import hparams_config
from utils import model_utils
from utils import util_keras
# pylint: disable=arguments-differ # fo keras layers.
class FNode(tf.keras.layers.Layer):
"""A Keras Layer implementing BiFPN Node."""
def __init__(self,
feat_level,
inputs_offsets,
fpn_num_filters,
apply_bn_for_resampling,
is_training_bn,
conv_after_downsample,
conv_bn_act_pattern,
separable_conv,
act_type,
weight_method,
data_format,
name='fnode'):
super().__init__(name=name)
self.feat_level = feat_level
self.inputs_offsets = inputs_offsets
self.fpn_num_filters = fpn_num_filters
self.apply_bn_for_resampling = apply_bn_for_resampling
self.separable_conv = separable_conv
self.act_type = act_type
self.is_training_bn = is_training_bn
self.conv_after_downsample = conv_after_downsample
self.data_format = data_format
self.weight_method = weight_method
self.conv_bn_act_pattern = conv_bn_act_pattern
self.resample_layers = []
self.vars = []
def fuse_features(self, nodes):
"""Fuse features from different resolutions and return a weighted sum.
Args:
nodes: a list of tensorflow features at different levels
Returns:
A tensor denoting the fused feature.
"""
dtype = nodes[0].dtype
if self.weight_method == 'attn':
edge_weights = []
for var in self.vars:
var = tf.cast(var, dtype=dtype)
edge_weights.append(var)
normalized_weights = tf.nn.softmax(tf.stack(edge_weights))
nodes = tf.stack(nodes, axis=-1)
new_node = tf.reduce_sum(nodes * normalized_weights, -1)
elif self.weight_method == 'fastattn':
edge_weights = []
for var in self.vars:
var = tf.cast(var, dtype=dtype)
edge_weights.append(var)
weights_sum = tf.add_n(edge_weights)
nodes = [
nodes[i] * edge_weights[i] / (weights_sum + 0.0001)
for i in range(len(nodes))
]
new_node = tf.add_n(nodes)
elif self.weight_method == 'channel_attn':
edge_weights = []
for var in self.vars:
var = tf.cast(var, dtype=dtype)
edge_weights.append(var)
normalized_weights = tf.nn.softmax(tf.stack(edge_weights, -1), axis=-1)
nodes = tf.stack(nodes, axis=-1)
new_node = tf.reduce_sum(nodes * normalized_weights, -1)
elif self.weight_method == 'channel_fastattn':
edge_weights = []
for var in self.vars:
var = tf.cast(var, dtype=dtype)
edge_weights.append(var)
weights_sum = tf.add_n(edge_weights)
nodes = [
nodes[i] * edge_weights[i] / (weights_sum + 0.0001)
for i in range(len(nodes))
]
new_node = tf.add_n(nodes)
elif self.weight_method == 'sum':
new_node = sum(nodes) # tf.add_n is not supported by tflite gpu.
else:
raise ValueError('unknown weight_method %s' % self.weight_method)
return new_node
def _add_wsm(self, initializer):
for i, _ in enumerate(self.inputs_offsets):
name = 'WSM' + ('' if i == 0 else '_' + str(i))
self.vars.append(self.add_weight(initializer=initializer, name=name))
def build(self, feats_shape):
for i, input_offset in enumerate(self.inputs_offsets):
name = 'resample_{}_{}_{}'.format(i, input_offset, len(feats_shape))
self.resample_layers.append(
ResampleFeatureMap(
self.feat_level,
self.fpn_num_filters,
self.apply_bn_for_resampling,
self.is_training_bn,
self.conv_after_downsample,
data_format=self.data_format,
name=name))
if self.weight_method == 'attn':
self._add_wsm('ones')
elif self.weight_method == 'fastattn':
self._add_wsm('ones')
elif self.weight_method == 'channel_attn':
num_filters = int(self.fpn_num_filters)
self._add_wsm(lambda: tf.ones([num_filters]))
elif self.weight_method == 'channel_fastattn':
num_filters = int(self.fpn_num_filters)
self._add_wsm(lambda: tf.ones([num_filters]))
self.op_after_combine = OpAfterCombine(
self.is_training_bn,
self.conv_bn_act_pattern,
self.separable_conv,
self.fpn_num_filters,
self.act_type,
self.data_format,
name='op_after_combine{}'.format(len(feats_shape)))
self.built = True
super().build(feats_shape)
def call(self, feats, training):
nodes = []
for i, input_offset in enumerate(self.inputs_offsets):
input_node = feats[input_offset]
input_node = self.resample_layers[i](input_node, training, feats)
nodes.append(input_node)
new_node = self.fuse_features(nodes)
new_node = self.op_after_combine(new_node)
return feats + [new_node]
class OpAfterCombine(tf.keras.layers.Layer):
"""Operation after combining input features during feature fusiong."""
def __init__(self,
is_training_bn,
conv_bn_act_pattern,
separable_conv,
fpn_num_filters,
act_type,
data_format,
name='op_after_combine'):
super().__init__(name=name)
self.conv_bn_act_pattern = conv_bn_act_pattern
self.separable_conv = separable_conv
self.fpn_num_filters = fpn_num_filters
self.act_type = act_type
self.data_format = data_format
self.is_training_bn = is_training_bn
if self.separable_conv:
conv2d_layer = functools.partial(
tf.keras.layers.SeparableConv2D, depth_multiplier=1)
else:
conv2d_layer = tf.keras.layers.Conv2D
self.conv_op = conv2d_layer(
filters=fpn_num_filters,
kernel_size=(3, 3),
padding='same',
use_bias=not self.conv_bn_act_pattern,
data_format=self.data_format,
name='conv')
self.bn = util_keras.build_batch_norm(
is_training_bn=self.is_training_bn,
data_format=self.data_format,
name='bn')
def call(self, new_node, training):
if not self.conv_bn_act_pattern:
new_node = activation_builder.activation_fn(new_node, self.act_type)
new_node = self.conv_op(new_node)
new_node = self.bn(new_node, training=training)
if self.conv_bn_act_pattern:
new_node = activation_builder.activation_fn(new_node, self.act_type)
return new_node
class ResampleFeatureMap(tf.keras.layers.Layer):
"""Resample feature map for downsampling or upsampling."""
def __init__(self,
feat_level,
target_num_channels,
apply_bn=False,
is_training_bn=None,
conv_after_downsample=False,
data_format=None,
pooling_type=None,
upsampling_type=None,
name='resample_p0'):
super().__init__(name=name)
self.apply_bn = apply_bn
self.is_training_bn = is_training_bn
self.data_format = data_format
self.target_num_channels = target_num_channels
self.feat_level = feat_level
self.conv_after_downsample = conv_after_downsample
self.pooling_type = pooling_type or 'max'
self.upsampling_type = upsampling_type or 'nearest'
self.conv2d = tf.keras.layers.Conv2D(
self.target_num_channels, (1, 1),
padding='same',
data_format=self.data_format,
name='conv2d')
self.bn = util_keras.build_batch_norm(
is_training_bn=self.is_training_bn,
data_format=self.data_format,
name='bn')
def _pool2d(self, inputs, height, width, target_height, target_width):
"""Pool the inputs to target height and width."""
height_stride_size = int((height - 1) // target_height + 1)
width_stride_size = int((width - 1) // target_width + 1)
if self.pooling_type == 'max':
return tf.keras.layers.MaxPooling2D(
pool_size=[height_stride_size + 1, width_stride_size + 1],
strides=[height_stride_size, width_stride_size],
padding='SAME',
data_format=self.data_format)(inputs)
elif self.pooling_type == 'avg':
return tf.keras.layers.AveragePooling2D(
pool_size=[height_stride_size + 1, width_stride_size + 1],
strides=[height_stride_size, width_stride_size],
padding='SAME',
data_format=self.data_format)(inputs)
else:
raise ValueError('Unsupported pooling type {}.'.format(self.pooling_type))
def _upsample2d(self, inputs, target_height, target_width):
return tf.cast(
tf.image.resize(
tf.cast(inputs, tf.float32), [target_height, target_width],
method=self.upsampling_type), inputs.dtype)
def _maybe_apply_1x1(self, feat, training, num_channels):
"""Apply 1x1 conv to change layer width if necessary."""
if num_channels != self.target_num_channels:
feat = self.conv2d(feat)
if self.apply_bn:
feat = self.bn(feat, training=training)
return feat
def call(self, feat, training, all_feats):
hwc_idx = (2, 3, 1) if self.data_format == 'channels_first' else (1, 2, 3)
height, width, num_channels = [feat.shape.as_list()[i] for i in hwc_idx]
if all_feats:
target_feat_shape = all_feats[self.feat_level].shape.as_list()
target_height, target_width, _ = [target_feat_shape[i] for i in hwc_idx]
else:
# Default to downsampling if all_feats is empty.
target_height, target_width = (height + 1) // 2, (width + 1) // 2
# If conv_after_downsample is True, when downsampling, apply 1x1 after
# downsampling for efficiency.
if height > target_height and width > target_width:
if not self.conv_after_downsample:
feat = self._maybe_apply_1x1(feat, training, num_channels)
feat = self._pool2d(feat, height, width, target_height, target_width)
if self.conv_after_downsample:
feat = self._maybe_apply_1x1(feat, training, num_channels)
elif height <= target_height and width <= target_width:
feat = self._maybe_apply_1x1(feat, training, num_channels)
if height < target_height or width < target_width:
feat = self._upsample2d(feat, target_height, target_width)
else:
raise ValueError(
'Incompatible Resampling : feat shape {}x{} target_shape: {}x{}'
.format(height, width, target_height, target_width))
return feat
class ClassNet(tf.keras.layers.Layer):
"""Object class prediction network."""
def __init__(self,
num_classes=90,
num_anchors=9,
num_filters=32,
min_level=3,
max_level=7,
is_training_bn=False,
act_type='swish',
repeats=4,
separable_conv=True,
survival_prob=None,
data_format='channels_last',
name='class_net',
**kwargs):
"""Initialize the ClassNet.
Args:
num_classes: number of classes.
num_anchors: number of anchors.
num_filters: number of filters for "intermediate" layers.
min_level: minimum level for features.
max_level: maximum level for features.
is_training_bn: True if we train the BatchNorm.
act_type: String of the activation used.
repeats: number of intermediate layers.
separable_conv: True to use separable_conv instead of conv2D.
survival_prob: if a value is set then drop connect will be used.
data_format: string of 'channel_first' or 'channels_last'.
name: the name of this layerl.
**kwargs: other parameters.
"""
super().__init__(name=name, **kwargs)
self.num_classes = num_classes
self.num_anchors = num_anchors
self.num_filters = num_filters
self.min_level = min_level
self.max_level = max_level
self.repeats = repeats
self.separable_conv = separable_conv
self.is_training_bn = is_training_bn
self.survival_prob = survival_prob
self.act_type = act_type
self.data_format = data_format
self.conv_ops = []
self.bns = []
if separable_conv:
conv2d_layer = functools.partial(
tf.keras.layers.SeparableConv2D,
depth_multiplier=1,
data_format=data_format,
pointwise_initializer=tf.initializers.VarianceScaling(),
depthwise_initializer=tf.initializers.VarianceScaling())
else:
conv2d_layer = functools.partial(
tf.keras.layers.Conv2D,
data_format=data_format,
kernel_initializer=tf.random_normal_initializer(stddev=0.01))
for i in range(self.repeats):
# If using SeparableConv2D
self.conv_ops.append(
conv2d_layer(
self.num_filters,
kernel_size=3,
bias_initializer=tf.zeros_initializer(),
activation=None,
padding='same',
name='class-%d' % i))
bn_per_level = []
for level in range(self.min_level, self.max_level + 1):
bn_per_level.append(
util_keras.build_batch_norm(
is_training_bn=self.is_training_bn,
data_format=self.data_format,
name='class-%d-bn-%d' % (i, level),
))
self.bns.append(bn_per_level)
self.classes = conv2d_layer(
num_classes * num_anchors,
kernel_size=3,
bias_initializer=tf.constant_initializer(-np.log((1 - 0.01) / 0.01)),
padding='same',
name='class-predict')
def call(self, inputs, training, **kwargs):
"""Call ClassNet."""
class_outputs = []
for level_id in range(0, self.max_level - self.min_level + 1):
image = inputs[level_id]
for i in range(self.repeats):
original_image = image
image = self.conv_ops[i](image)
image = self.bns[i][level_id](image, training=training)
if self.act_type:
image = activation_builder.activation_fn(image, self.act_type)
if i > 0 and self.survival_prob:
image = model_utils.drop_connect(image, training, self.survival_prob)
image = image + original_image
class_outputs.append(self.classes(image))
return class_outputs
class BoxNet(tf.keras.layers.Layer):
"""Box regression network."""
def __init__(self,
num_anchors=9,
num_filters=32,
min_level=3,
max_level=7,
is_training_bn=False,
act_type='swish',
repeats=4,
separable_conv=True,
survival_prob=None,
data_format='channels_last',
name='box_net',
**kwargs):
"""Initialize BoxNet.
Args:
num_anchors: number of anchors used.
num_filters: number of filters for "intermediate" layers.
min_level: minimum level for features.
max_level: maximum level for features.
is_training_bn: True if we train the BatchNorm.
act_type: String of the activation used.
repeats: number of "intermediate" layers.
separable_conv: True to use separable_conv instead of conv2D.
survival_prob: if a value is set then drop connect will be used.
data_format: string of 'channel_first' or 'channels_last'.
name: Name of the layer.
**kwargs: other parameters.
"""
super().__init__(name=name, **kwargs)
self.num_anchors = num_anchors
self.num_filters = num_filters
self.min_level = min_level
self.max_level = max_level
self.repeats = repeats
self.separable_conv = separable_conv
self.is_training_bn = is_training_bn
self.survival_prob = survival_prob
self.act_type = act_type
self.data_format = data_format
self.conv_ops = []
self.bns = []
for i in range(self.repeats):
# If using SeparableConv2D
if self.separable_conv:
self.conv_ops.append(
tf.keras.layers.SeparableConv2D(
filters=self.num_filters,
depth_multiplier=1,
pointwise_initializer=tf.initializers.VarianceScaling(),
depthwise_initializer=tf.initializers.VarianceScaling(),
data_format=self.data_format,
kernel_size=3,
activation=None,
bias_initializer=tf.zeros_initializer(),
padding='same',
name='box-%d' % i))
# If using Conv2d
else:
self.conv_ops.append(
tf.keras.layers.Conv2D(
filters=self.num_filters,
kernel_initializer=tf.random_normal_initializer(stddev=0.01),
data_format=self.data_format,
kernel_size=3,
activation=None,
bias_initializer=tf.zeros_initializer(),
padding='same',
name='box-%d' % i))
bn_per_level = []
for level in range(self.min_level, self.max_level + 1):
bn_per_level.append(
util_keras.build_batch_norm(
is_training_bn=self.is_training_bn,
data_format=self.data_format,
name='box-%d-bn-%d' % (i, level)))
self.bns.append(bn_per_level)
if self.separable_conv:
self.boxes = tf.keras.layers.SeparableConv2D(
filters=4 * self.num_anchors,
depth_multiplier=1,
pointwise_initializer=tf.initializers.VarianceScaling(),
depthwise_initializer=tf.initializers.VarianceScaling(),
data_format=self.data_format,
kernel_size=3,
activation=None,
bias_initializer=tf.zeros_initializer(),
padding='same',
name='box-predict')
else:
self.boxes = tf.keras.layers.Conv2D(
filters=4 * self.num_anchors,
kernel_initializer=tf.random_normal_initializer(stddev=0.01),
data_format=self.data_format,
kernel_size=3,
activation=None,
bias_initializer=tf.zeros_initializer(),
padding='same',
name='box-predict')
def call(self, inputs, training):
"""Call boxnet."""
box_outputs = []
for level_id in range(0, self.max_level - self.min_level + 1):
image = inputs[level_id]
for i in range(self.repeats):
original_image = image
image = self.conv_ops[i](image)
image = self.bns[i][level_id](image, training=training)
if self.act_type:
image = activation_builder.activation_fn(image, self.act_type)
if i > 0 and self.survival_prob:
image = model_utils.drop_connect(image, training, self.survival_prob)
image = image + original_image
box_outputs.append(self.boxes(image))
return box_outputs
class SegmentationHead(tf.keras.layers.Layer):
"""Keras layer for semantic segmentation head."""
def __init__(self,
num_classes,
num_filters,
min_level,
max_level,
data_format,
is_training_bn,
act_type,
**kwargs):
"""Initialize SegmentationHead.
Args:
num_classes: number of classes.
num_filters: number of filters for "intermediate" layers.
min_level: minimum level for features.
max_level: maximum level for features.
data_format: string of 'channel_first' or 'channels_last'.
is_training_bn: True if we train the BatchNorm.
act_type: String of the activation used.
**kwargs: other parameters.
"""
super().__init__(**kwargs)
self.act_type = act_type
self.con2d_ts = []
self.con2d_t_bns = []
for _ in range(max_level - min_level):
self.con2d_ts.append(
tf.keras.layers.Conv2DTranspose(
num_filters,
3,
strides=2,
padding='same',
data_format=data_format,
use_bias=False))
self.con2d_t_bns.append(
util_keras.build_batch_norm(
is_training_bn=is_training_bn,
data_format=data_format,
name='bn'))
self.head_transpose = tf.keras.layers.Conv2DTranspose(
num_classes, 3, strides=2, padding='same')
def call(self, feats, training):
x = feats[-1]
skips = list(reversed(feats[:-1]))
for con2d_t, con2d_t_bn, skip in zip(self.con2d_ts, self.con2d_t_bns,
skips):
x = con2d_t(x)
x = con2d_t_bn(x, training)
x = activation_builder.activation_fn(x, self.act_type)
x = tf.concat([x, skip], axis=-1)
# This is the last layer of the model
return self.head_transpose(x) # 64x64 -> 128x128
class FPNCells(tf.keras.layers.Layer):
"""FPN cells."""
def __init__(self, config, name='fpn_cells'):
super().__init__(name=name)
self.config = config
if config.fpn_config:
self.fpn_config = config.fpn_config
else:
self.fpn_config = fpn_configs.get_fpn_config(config.fpn_name,
config.min_level,
config.max_level,
config.fpn_weight_method)
self.cells = [
FPNCell(self.config, name='cell_%d' % rep)
for rep in range(self.config.fpn_cell_repeats)
]
def call(self, feats, training):
for cell in self.cells:
cell_feats = cell(feats, training)
min_level = self.config.min_level
max_level = self.config.max_level
feats = []
for level in range(min_level, max_level + 1):
for i, fnode in enumerate(reversed(self.fpn_config.nodes)):
if fnode['feat_level'] == level:
feats.append(cell_feats[-1 - i])
break
return feats
class FPNCell(tf.keras.layers.Layer):
"""A single FPN cell."""
def __init__(self, config, name='fpn_cell'):
super().__init__(name=name)
self.config = config
if config.fpn_config:
self.fpn_config = config.fpn_config
else:
self.fpn_config = fpn_configs.get_fpn_config(config.fpn_name,
config.min_level,
config.max_level,
config.fpn_weight_method)
self.fnodes = []
for i, fnode_cfg in enumerate(self.fpn_config.nodes):
logging.info('fnode %d : %s', i, fnode_cfg)
fnode = FNode(
fnode_cfg['feat_level'] - self.config.min_level,
fnode_cfg['inputs_offsets'],
config.fpn_num_filters,
config.apply_bn_for_resampling,
config.is_training_bn,
config.conv_after_downsample,
config.conv_bn_act_pattern,
config.separable_conv,
config.act_type,
weight_method=self.fpn_config.weight_method,
data_format=config.data_format,
name='fnode%d' % i)
self.fnodes.append(fnode)
def call(self, feats, training):
for fnode in self.fnodes:
feats = fnode(feats, training)
return feats
class EfficientDetNet(tf.keras.Model):
"""EfficientDet keras network without pre/post-processing."""
def __init__(self, model_name=None, config=None, name=''):
"""Initialize model."""
super().__init__(name=name)
config = config or hparams_config.get_efficientdet_config(model_name)
self.config = config
# Backbone.
backbone_name = config.backbone_name
is_training_bn = config.is_training_bn
if 'efficientnet' in backbone_name:
override_params = {
'batch_norm':
normalization_builder.batch_norm_class(is_training_bn),
'relu_fn':
functools.partial(activation_builder.activation_fn, act_type=config.act_type),
'weight_decay': config.weight_decay,
'data_format': config.data_format,
'activation': config.act_type,
}
if 'b0' in backbone_name:
override_params['survival_prob'] = 0.0
override_params['data_format'] = config.data_format
self.backbone = efficientnet_model.EfficientNet().from_name(
model_name=backbone_name, features_only=True, model_weights_path=config.backbone_init,
weights_format='saved_model', overrides=override_params)
# Feature network.
self.resample_layers = [] # additional resampling layers.
for level in range(6, config.max_level + 1):
# Adds a coarser level by downsampling the last feature map.
self.resample_layers.append(
ResampleFeatureMap(
feat_level=(level - config.min_level),
target_num_channels=config.fpn_num_filters,
apply_bn=config.apply_bn_for_resampling,
is_training_bn=config.is_training_bn,
conv_after_downsample=config.conv_after_downsample,
data_format=config.data_format,
name='resample_p%d' % level,
))
self.fpn_cells = FPNCells(config)
# class/box output prediction network.
num_anchors = len(config.aspect_ratios) * config.num_scales
num_filters = config.fpn_num_filters
for head in config.heads:
if head == 'object_detection':
self.class_net = ClassNet(
num_classes=config.num_classes,
num_anchors=num_anchors,
num_filters=num_filters,
min_level=config.min_level,
max_level=config.max_level,
is_training_bn=config.is_training_bn,
act_type=config.act_type,
repeats=config.box_class_repeats,
separable_conv=config.separable_conv,
survival_prob=config.survival_prob,
data_format=config.data_format)
self.box_net = BoxNet(
num_anchors=num_anchors,
num_filters=num_filters,
min_level=config.min_level,
max_level=config.max_level,
is_training_bn=config.is_training_bn,
act_type=config.act_type,
repeats=config.box_class_repeats,
separable_conv=config.separable_conv,
survival_prob=config.survival_prob,
data_format=config.data_format)
if head == 'segmentation':
self.seg_head = SegmentationHead(
num_classes=config.seg_num_classes,
num_filters=num_filters,
min_level=config.min_level,
max_level=config.max_level,
is_training_bn=config.is_training_bn,
act_type=config.act_type,
data_format=config.data_format)
def _init_set_name(self, name, zero_based=True):
"""A hack to allow empty model name for legacy checkpoint compitability."""
if name == '': # pylint: disable=g-explicit-bool-comparison
self._name = name
else:
self._name = super().__init__(name, zero_based)
def call(self, inputs, training):
config = self.config
# call backbone network.
all_feats = self.backbone(inputs, training=training)
feats = all_feats[config.min_level:config.max_level + 1]
# Build additional input features that are not from backbone.
for resample_layer in self.resample_layers:
feats.append(resample_layer(feats[-1], training, None))
# call feature network.
fpn_feats = self.fpn_cells(feats, training)
# call class/box/seg output network.
outputs = []
if 'object_detection' in config.heads:
class_outputs = self.class_net(fpn_feats, training)
box_outputs = self.box_net(fpn_feats, training)
outputs.extend([class_outputs, box_outputs])
if 'segmentation' in config.heads:
seg_outputs = self.seg_head(fpn_feats, training)
outputs.append(seg_outputs)
return tuple(outputs)
class EfficientDetModel(EfficientDetNet):
"""EfficientDet full keras model with pre and post processing."""
def _preprocessing(self, raw_images, image_size, mode=None):
"""Preprocess images before feeding to the network."""
if not mode:
return raw_images, None
image_size = model_utils.parse_image_size(image_size)
if mode != 'infer':
# We only support inference for now.
raise ValueError('preprocessing must be infer or empty')
def map_fn(image):
input_processor = dataloader.DetectionInputProcessor(
image, image_size)
input_processor.normalize_image()
input_processor.set_scale_factors_to_output_size()
image = input_processor.resize_and_crop_image()
image_scale = input_processor.image_scale_to_original
return image, image_scale
if raw_images.shape.as_list()[0]: # fixed batch size.
batch_size = raw_images.shape.as_list()[0]
outputs = [map_fn(raw_images[i]) for i in range(batch_size)]
return [tf.stack(y) for y in zip(*outputs)]
# otherwise treat it as dynamic batch size.
return tf.vectorized_map(map_fn, raw_images)
def _postprocess(self, cls_outputs, box_outputs, scales, mode='global'):
"""Postprocess class and box predictions."""
if not mode:
return cls_outputs, box_outputs
# TODO(tanmingxing): remove this cast once FP16 works postprocessing.
cls_outputs = [tf.cast(i, tf.float32) for i in cls_outputs]
box_outputs = [tf.cast(i, tf.float32) for i in box_outputs]
if mode == 'global':
return postprocess.postprocess_global(self.config.as_dict(), cls_outputs,
box_outputs, scales)
if mode == 'per_class':
return postprocess.postprocess_per_class(self.config.as_dict(),
cls_outputs, box_outputs, scales)
raise ValueError('Unsupported postprocess mode {}'.format(mode))
def call(self, inputs, training=False, pre_mode='infer', post_mode='global'):
"""Call this model.
Args:
inputs: a tensor with common shape [batch, height, width, channels].
training: If true, it is training mode. Otherwise, eval mode.
pre_mode: preprocessing mode, must be {None, 'infer'}.
post_mode: postprrocessing mode, must be {None, 'global', 'per_class'}.
Returns:
the output tensor list.
"""
config = self.config
# preprocess.
inputs, scales = self._preprocessing(inputs, config.image_size, pre_mode)
# network.
outputs = super().call(inputs, training)
if 'object_detection' in config.heads and post_mode:
# postprocess for detection
det_outputs = self._postprocess(outputs[0], outputs[1], scales, post_mode)
outputs = det_outputs + outputs[2:]
return outputs
| DeepLearningExamples-master | TensorFlow2/Detection/Efficientdet/model/efficientdet_keras.py |
# Copyright 2020 Google Research. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Anchor definition."""
import collections
import numpy as np
import tensorflow as tf
from utils import model_utils
from object_detection import argmax_matcher
from object_detection import box_list
from object_detection import faster_rcnn_box_coder
from object_detection import region_similarity_calculator
from object_detection import target_assigner
MAX_DETECTION_POINTS = 5000
def decode_box_outputs(pred_boxes, anchor_boxes):
"""Transforms relative regression coordinates to absolute positions.
Network predictions are normalized and relative to a given anchor; this
reverses the transformation and outputs absolute coordinates for the input
image.
Args:
pred_boxes: predicted box regression targets.
anchor_boxes: anchors on all feature levels.
Returns:
outputs: bounding boxes.
"""
anchor_boxes = tf.cast(anchor_boxes, pred_boxes.dtype)
ycenter_a = (anchor_boxes[..., 0] + anchor_boxes[..., 2]) / 2
xcenter_a = (anchor_boxes[..., 1] + anchor_boxes[..., 3]) / 2
ha = anchor_boxes[..., 2] - anchor_boxes[..., 0]
wa = anchor_boxes[..., 3] - anchor_boxes[..., 1]
ty, tx, th, tw = tf.unstack(pred_boxes, num=4, axis=-1)
w = tf.math.exp(tw) * wa
h = tf.math.exp(th) * ha
ycenter = ty * ha + ycenter_a
xcenter = tx * wa + xcenter_a
ymin = ycenter - h / 2.
xmin = xcenter - w / 2.
ymax = ycenter + h / 2.
xmax = xcenter + w / 2.
return tf.stack([ymin, xmin, ymax, xmax], axis=-1)
def _generate_anchor_configs(feat_sizes, min_level, max_level, num_scales,
aspect_ratios):
"""Generates mapping from output level to a list of anchor configurations.
A configuration is a tuple of (num_anchors, scale, aspect_ratio).
Args:
feat_sizes: list of dict of integer numbers of feature map sizes.
min_level: integer number of minimum level of the output feature pyramid.
max_level: integer number of maximum level of the output feature pyramid.
num_scales: integer number representing intermediate scales added
on each level. For instances, num_scales=2 adds two additional
anchor scales [2^0, 2^0.5] on each level.
aspect_ratios: list of tuples representing the aspect ratio anchors added
on each level. For instances, aspect_ratios =
[(1, 1), (1.4, 0.7), (0.7, 1.4)] adds three anchors on each level.
Returns:
anchor_configs: a dictionary with keys as the levels of anchors and
values as a list of anchor configuration.
"""
anchor_configs = {}
for level in range(min_level, max_level + 1):
anchor_configs[level] = []
for scale_octave in range(num_scales):
for aspect in aspect_ratios:
anchor_configs[level].append(
((feat_sizes[0]['height'] / float(feat_sizes[level]['height']),
feat_sizes[0]['width'] / float(feat_sizes[level]['width'])),
scale_octave / float(num_scales), aspect))
return anchor_configs
def _generate_anchor_boxes(image_size, anchor_scale, anchor_configs):
"""Generates multiscale anchor boxes.
Args:
image_size: tuple of integer numbers of input image size.
anchor_scale: float number representing the scale of size of the base
anchor to the feature stride 2^level.
anchor_configs: a dictionary with keys as the levels of anchors and
values as a list of anchor configuration.
Returns:
anchor_boxes: a numpy array with shape [N, 4], which stacks anchors on all
feature levels.
Raises:
ValueError: input size must be the multiple of largest feature stride.
"""
boxes_all = []
for _, configs in anchor_configs.items():
boxes_level = []
for config in configs:
stride, octave_scale, aspect = config
base_anchor_size_x = anchor_scale * stride[1] * 2**octave_scale
base_anchor_size_y = anchor_scale * stride[0] * 2**octave_scale
anchor_size_x_2 = base_anchor_size_x * aspect[0] / 2.0
anchor_size_y_2 = base_anchor_size_y * aspect[1] / 2.0
x = np.arange(stride[1] / 2, image_size[1], stride[1])
y = np.arange(stride[0] / 2, image_size[0], stride[0])
xv, yv = np.meshgrid(x, y)
xv = xv.reshape(-1)
yv = yv.reshape(-1)
boxes = np.vstack((yv - anchor_size_y_2, xv - anchor_size_x_2,
yv + anchor_size_y_2, xv + anchor_size_x_2))
boxes = np.swapaxes(boxes, 0, 1)
boxes_level.append(np.expand_dims(boxes, axis=1))
# concat anchors on the same level to the reshape NxAx4
boxes_level = np.concatenate(boxes_level, axis=1)
boxes_all.append(boxes_level.reshape([-1, 4]))
anchor_boxes = np.vstack(boxes_all)
return anchor_boxes
class Anchors(object):
"""RetinaNet Anchors class."""
def __init__(self, min_level, max_level, num_scales, aspect_ratios,
anchor_scale, image_size):
"""Constructs multiscale RetinaNet anchors.
Args:
min_level: integer number of minimum level of the output feature pyramid.
max_level: integer number of maximum level of the output feature pyramid.
num_scales: integer number representing intermediate scales added
on each level. For instances, num_scales=2 adds two additional
anchor scales [2^0, 2^0.5] on each level.
aspect_ratios: list of tuples representing the aspect ratio anchors added
on each level. For instances, aspect_ratios =
[(1, 1), (1.4, 0.7), (0.7, 1.4)] adds three anchors on each level.
anchor_scale: float number representing the scale of size of the base
anchor to the feature stride 2^level.
image_size: integer number or tuple of integer number of input image size.
"""
self.min_level = min_level
self.max_level = max_level
self.num_scales = num_scales
self.aspect_ratios = aspect_ratios
self.anchor_scale = anchor_scale
self.image_size = model_utils.parse_image_size(image_size)
self.feat_sizes = model_utils.get_feat_sizes(image_size, max_level)
self.config = self._generate_configs()
self.boxes = self._generate_boxes()
def _generate_configs(self):
"""Generate configurations of anchor boxes."""
return _generate_anchor_configs(self.feat_sizes, self.min_level,
self.max_level, self.num_scales,
self.aspect_ratios)
def _generate_boxes(self):
"""Generates multiscale anchor boxes."""
boxes = _generate_anchor_boxes(self.image_size, self.anchor_scale,
self.config)
boxes = tf.convert_to_tensor(boxes, dtype=tf.float32)
return boxes
def get_anchors_per_location(self):
return self.num_scales * len(self.aspect_ratios)
class AnchorLabeler(object):
"""Labeler for multiscale anchor boxes."""
def __init__(self, anchors, num_classes, match_threshold=0.5):
"""Constructs anchor labeler to assign labels to anchors.
Args:
anchors: an instance of class Anchors.
num_classes: integer number representing number of classes in the dataset.
match_threshold: float number between 0 and 1 representing the threshold
to assign positive labels for anchors.
"""
similarity_calc = region_similarity_calculator.IouSimilarity()
matcher = argmax_matcher.ArgMaxMatcher(
match_threshold,
unmatched_threshold=match_threshold,
negatives_lower_than_unmatched=True,
force_match_for_each_row=True)
box_coder = faster_rcnn_box_coder.FasterRcnnBoxCoder()
self._target_assigner = target_assigner.TargetAssigner(
similarity_calc, matcher, box_coder)
self._anchors = anchors
self._match_threshold = match_threshold
self._num_classes = num_classes
def _unpack_labels(self, labels):
"""Unpacks an array of labels into multiscales labels."""
labels_unpacked = collections.OrderedDict()
anchors = self._anchors
count = 0
for level in range(anchors.min_level, anchors.max_level + 1):
feat_size = anchors.feat_sizes[level]
steps = feat_size['height'] * feat_size[
'width'] * anchors.get_anchors_per_location()
indices = tf.range(count, count + steps)
count += steps
labels_unpacked[level] = tf.reshape(
tf.gather(labels, indices),
[feat_size['height'], feat_size['width'], -1])
return labels_unpacked
def label_anchors(self, gt_boxes, gt_labels):
"""Labels anchors with ground truth inputs.
Args:
gt_boxes: A float tensor with shape [N, 4] representing groundtruth boxes.
For each row, it stores [y0, x0, y1, x1] for four corners of a box.
gt_labels: A integer tensor with shape [N, 1] representing groundtruth
classes.
Returns:
cls_targets_dict: ordered dictionary with keys
[min_level, min_level+1, ..., max_level]. The values are tensor with
shape [height_l, width_l, num_anchors]. The height_l and width_l
represent the dimension of class logits at l-th level.
box_targets_dict: ordered dictionary with keys
[min_level, min_level+1, ..., max_level]. The values are tensor with
shape [height_l, width_l, num_anchors * 4]. The height_l and
width_l represent the dimension of bounding box regression output at
l-th level.
num_positives: scalar tensor storing number of positives in an image.
"""
gt_box_list = box_list.BoxList(gt_boxes)
anchor_box_list = box_list.BoxList(self._anchors.boxes)
# cls_weights, box_weights are not used
cls_targets, _, box_targets, _, matches = self._target_assigner.assign(
anchor_box_list, gt_box_list, gt_labels)
# class labels start from 1 and the background class = -1
cls_targets -= 1
cls_targets = tf.cast(cls_targets, tf.int32)
# Unpack labels.
cls_targets_dict = self._unpack_labels(cls_targets)
box_targets_dict = self._unpack_labels(box_targets)
num_positives = tf.reduce_sum(
tf.cast(tf.not_equal(matches.match_results, -1), tf.float32))
return cls_targets_dict, box_targets_dict, num_positives
| DeepLearningExamples-master | TensorFlow2/Detection/Efficientdet/model/anchors.py |
# Copyright 2020 Google Research. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Inference related utilities."""
import copy
import os
import time
from typing import Text, Dict, Any
from absl import logging
import numpy as np
import tensorflow as tf
import dllogger as DLLogger
from model import efficientdet_keras
from model import label_util
from utils import hparams_config
from utils import model_utils
from utils import util_keras
from visualize import vis_utils
def visualize_image(image,
boxes,
classes,
scores,
label_map=None,
min_score_thresh=0.01,
max_boxes_to_draw=1000,
line_thickness=2,
**kwargs):
"""Visualizes a given image.
Args:
image: a image with shape [H, W, C].
boxes: a box prediction with shape [N, 4] ordered [ymin, xmin, ymax, xmax].
classes: a class prediction with shape [N].
scores: A list of float value with shape [N].
label_map: a dictionary from class id to name.
min_score_thresh: minimal score for showing. If claass probability is below
this threshold, then the object will not show up.
max_boxes_to_draw: maximum bounding box to draw.
line_thickness: how thick is the bounding box line.
**kwargs: extra parameters.
Returns:
output_image: an output image with annotated boxes and classes.
"""
label_map = label_util.get_label_map(label_map or 'coco')
category_index = {k: {'id': k, 'name': label_map[k]} for k in label_map}
img = np.array(image)
vis_utils.visualize_boxes_and_labels_on_image_array(
img,
boxes,
classes,
scores,
category_index,
min_score_thresh=min_score_thresh,
max_boxes_to_draw=max_boxes_to_draw,
line_thickness=line_thickness,
**kwargs)
return img
class ExportModel(tf.Module):
def __init__(self, model):
super().__init__()
self.model = model
@tf.function
def __call__(self, imgs):
return self.model(imgs, training=False, post_mode='global')
class ServingDriver(object):
"""A driver for serving single or batch images.
This driver supports serving with image files or arrays, with configurable
batch size.
Example 1. Serving streaming image contents:
driver = inference.ServingDriver(
'efficientdet-d0', '/tmp/efficientdet-d0', batch_size=1)
driver.build()
for m in image_iterator():
predictions = driver.serve_files([m])
boxes, scores, classes, _ = tf.nest.map_structure(np.array, predictions)
driver.visualize(m, boxes[0], scores[0], classes[0])
# m is the new image with annotated boxes.
Example 2. Serving batch image contents:
imgs = []
for f in ['/tmp/1.jpg', '/tmp/2.jpg']:
imgs.append(np.array(Image.open(f)))
driver = inference.ServingDriver(
'efficientdet-d0', '/tmp/efficientdet-d0', batch_size=len(imgs))
driver.build()
predictions = driver.serve(imgs)
boxes, scores, classes, _ = tf.nest.map_structure(np.array, predictions)
for i in range(len(imgs)):
driver.visualize(imgs[i], boxes[i], scores[i], classes[i])
Example 3: another way is to use SavedModel:
# step1: export a model.
driver = inference.ServingDriver('efficientdet-d0', '/tmp/efficientdet-d0')
driver.build()
driver.export('/tmp/saved_model_path')
# step2: Serve a model.
driver.load(self.saved_model_dir)
raw_images = []
for f in tf.io.gfile.glob('/tmp/images/*.jpg'):
raw_images.append(np.array(PIL.Image.open(f)))
detections = driver.serve(raw_images)
boxes, scores, classes, _ = tf.nest.map_structure(np.array, detections)
for i in range(len(imgs)):
driver.visualize(imgs[i], boxes[i], scores[i], classes[i])
"""
def __init__(self,
model_name: Text,
ckpt_path: Text = None,
batch_size: int = 1,
min_score_thresh: float = None,
max_boxes_to_draw: float = None,
model_params: Dict[Text, Any] = None):
"""Initialize the inference driver.
Args:
model_name: target model name, such as efficientdet-d0.
ckpt_path: checkpoint path, such as /tmp/efficientdet-d0/.
batch_size: batch size for inference.
min_score_thresh: minimal score threshold for filtering predictions.
max_boxes_to_draw: the maximum number of boxes per image.
model_params: model parameters for overriding the config.
"""
super().__init__()
self.model_name = model_name
self.ckpt_path = ckpt_path
self.batch_size = batch_size
self.params = hparams_config.get_detection_config(model_name).as_dict()
if model_params:
self.params.update(model_params)
self.params.update(dict(is_training_bn=False))
self.label_map = self.params.get('label_map', None)
self.model = None
self.min_score_thresh = min_score_thresh
self.max_boxes_to_draw = max_boxes_to_draw
mixed_precision = self.params.get('mixed_precision', None)
precision = 'mixed_float16' if mixed_precision else 'float32'
policy = tf.keras.mixed_precision.experimental.Policy(precision)
tf.keras.mixed_precision.experimental.set_policy(policy)
def build(self, params_override=None):
"""Build model and restore checkpoints."""
params = copy.deepcopy(self.params)
if params_override:
params.update(params_override)
config = hparams_config.get_efficientdet_config(self.model_name)
config.override(params)
self.model = efficientdet_keras.EfficientDetModel(config=config)
image_size = model_utils.parse_image_size(params['image_size'])
self.model.build((self.batch_size, *image_size, 3))
util_keras.restore_ckpt(self.model, self.ckpt_path, 0,
params['moving_average_decay'])
def visualize(self, image, boxes, classes, scores, **kwargs):
"""Visualize prediction on image."""
return visualize_image(image, boxes, classes.astype(int), scores,
self.label_map, **kwargs)
def benchmark(self, image_arrays, bm_runs=10, trace_filename=None):
"""Benchmark inference latency/throughput.
Args:
image_arrays: a list of images in numpy array format.
bm_runs: Number of benchmark runs.
trace_filename: If None, specify the filename for saving trace.
"""
if not self.model:
self.build()
@tf.function
def test_func(image_arrays):
return self.model(image_arrays, training=False)
latency = []
for _ in range(10): # warmup 10 runs.
test_func(image_arrays)
start = time.perf_counter()
for _ in range(bm_runs):
batch_start = time.perf_counter()
test_func(image_arrays)
latency.append(time.perf_counter() - batch_start)
end = time.perf_counter()
inference_time = (end - start) / bm_runs
print('Per batch inference time: ', inference_time)
fps = self.batch_size / inference_time
print('FPS: ', fps)
latency_avg = sum(latency) / len(latency)
latency.sort()
def _latency_avg(n):
return sum(latency[:n]) / n
latency_90 = _latency_avg(int(len(latency)*0.9))
latency_95 = _latency_avg(int(len(latency)*0.95))
latency_99 = _latency_avg(int(len(latency)*0.99))
stats = {'inference_fps': fps, 'inference_latency_ms': float(inference_time * 1000),
'latency_avg' : latency_avg, 'latency_90': latency_90,
'latency_95' : latency_95, 'latency_99': latency_99,}
DLLogger.log(step=(), data=stats)
if trace_filename:
options = tf.profiler.experimental.ProfilerOptions()
tf.profiler.experimental.start(trace_filename, options)
test_func(image_arrays)
tf.profiler.experimental.stop()
def serve(self, image_arrays):
"""Serve a list of image arrays.
Args:
image_arrays: A list of image content with each image has shape [height,
width, 3] and uint8 type.
Returns:
A list of detections.
"""
if not self.model:
self.build()
return self.model(image_arrays, False)
def load(self, saved_model_dir_or_frozen_graph: Text):
"""Load the model using saved model or a frozen graph."""
# Load saved model if it is a folder.
if tf.saved_model.contains_saved_model(saved_model_dir_or_frozen_graph):
self.model = tf.saved_model.load(saved_model_dir_or_frozen_graph)
return
# Load a frozen graph.
def wrap_frozen_graph(graph_def, inputs, outputs):
# https://www.tensorflow.org/guide/migrate
imports_graph_def_fn = lambda: tf.import_graph_def(graph_def, name='')
wrapped_import = tf.compat.v1.wrap_function(imports_graph_def_fn, [])
import_graph = wrapped_import.graph
return wrapped_import.prune(
tf.nest.map_structure(import_graph.as_graph_element, inputs),
tf.nest.map_structure(import_graph.as_graph_element, outputs))
graph_def = tf.Graph().as_graph_def()
with tf.io.gfile.GFile(saved_model_dir_or_frozen_graph, 'rb') as f:
graph_def.ParseFromString(f.read())
self.model = wrap_frozen_graph(
graph_def,
inputs='images:0',
outputs=['Identity:0', 'Identity_1:0', 'Identity_2:0', 'Identity_3:0'])
def freeze(self, func):
"""Freeze the graph."""
# pylint: disable=g-import-not-at-top,disable=g-direct-tensorflow-import
from tensorflow.python.framework.convert_to_constants \
import convert_variables_to_constants_v2_as_graph
_, graphdef = convert_variables_to_constants_v2_as_graph(func)
return graphdef
def export(self,
output_dir: Text,
tflite_path: Text = None,
tensorrt: Text = None):
"""Export a saved model, frozen graph, and potential tflite/tensorrt model.
Args:
output_dir: the output folder for saved model.
tflite_path: the path for saved tflite file.
tensorrt: If not None, must be {'FP32', 'FP16', 'INT8'}.
"""
if not self.model:
self.build()
export_model = ExportModel(self.model)
tf.saved_model.save(
export_model,
output_dir,
signatures=export_model.__call__.get_concrete_function(
tf.TensorSpec(
shape=[None, None, None, 3], dtype=tf.uint8, name='images')))
logging.info('Model saved at %s', output_dir)
# also save freeze pb file.
graphdef = self.freeze(
export_model.__call__.get_concrete_function(
tf.TensorSpec(
shape=[None, None, None, 3], dtype=tf.uint8, name='images')))
proto_path = tf.io.write_graph(
graphdef, output_dir, self.model_name + '_frozen.pb', as_text=False)
logging.info('Frozen graph saved at %s', proto_path)
if tflite_path:
# Neither of the two approaches works so far.
converter = tf.lite.TFLiteConverter.from_keras_model(self.model)
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.target_spec.supported_types = [tf.float16]
# converter = tf.lite.TFLiteConverter.from_saved_model(output_dir)
# converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS]
tflite_model = converter.convert()
tf.io.gfile.GFile(tflite_path, 'wb').write(tflite_model)
logging.info('TFLite is saved at %s', tflite_path)
if tensorrt:
trt_path = os.path.join(output_dir, 'tensorrt_' + tensorrt.lower())
conversion_params = tf.experimental.tensorrt.ConversionParams(
max_workspace_size_bytes=(2 << 20),
maximum_cached_engines=1,
precision_mode=tensorrt.upper())
converter = tf.experimental.tensorrt.Converter(
output_dir, conversion_params=conversion_params)
converter.convert()
converter.save(trt_path)
logging.info('TensorRT model is saved at %s', trt_path)
| DeepLearningExamples-master | TensorFlow2/Detection/Efficientdet/model/inference.py |
# Copyright 2020 Google Research. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Data loader and processing."""
from absl import logging
import multiprocessing
import tensorflow as tf
import horovod.tensorflow as hvd
from utils import model_utils
from model import anchors
from object_detection import preprocessor
from object_detection import tf_example_decoder
from utils.horovod_utils import get_rank, get_world_size
from utils.util_keras import get_mixed_precision_policy
class InputProcessor:
"""Base class of Input processor."""
def __init__(self, image, output_size):
"""Initializes a new `InputProcessor`.
Args:
image: The input image before processing.
output_size: The output image size after calling resize_and_crop_image
function.
"""
self._image = image
if isinstance(output_size, int):
self._output_size = (output_size, output_size)
else:
self._output_size = output_size
# Parameters to control rescaling and shifting during preprocessing.
# Image scale defines scale from original image to scaled image.
self._image_scale = tf.constant(1.0)
# The integer height and width of scaled image.
self._scaled_height = tf.shape(image)[0]
self._scaled_width = tf.shape(image)[1]
# The x and y translation offset to crop scaled image to the output size.
self._crop_offset_y = tf.constant(0)
self._crop_offset_x = tf.constant(0)
def normalize_image(self, dtype=tf.float32):
"""Normalize the image to zero mean and unit variance."""
# The image normalization is identical to Cloud TPU ResNet.
self._image = tf.image.convert_image_dtype(self._image, dtype=dtype)
offset = tf.constant([0.485, 0.456, 0.406], dtype=dtype)
offset = tf.expand_dims(offset, axis=0)
offset = tf.expand_dims(offset, axis=0)
self._image -= offset
scale = tf.constant([0.229, 0.224, 0.225], dtype=dtype)
scale = tf.expand_dims(scale, axis=0)
scale = tf.expand_dims(scale, axis=0)
self._image /= scale
def get_image(self):
return self._image
def set_training_random_scale_factors(self,
scale_min,
scale_max,
target_size=None):
"""Set the parameters for multiscale training.
Notably, if train and eval use different sizes, then target_size should be
set as eval size to avoid the discrency between train and eval.
Args:
scale_min: minimal scale factor.
scale_max: maximum scale factor.
target_size: targeted size, usually same as eval. If None, use train size.
"""
if not target_size:
target_size = self._output_size
target_size = model_utils.parse_image_size(target_size)
logging.info('target_size = %s, output_size = %s', target_size,
self._output_size)
# Select a random scale factor.
random_scale_factor = tf.random.uniform([], scale_min, scale_max)
scaled_y = tf.cast(random_scale_factor * target_size[0], tf.int32)
scaled_x = tf.cast(random_scale_factor * target_size[1], tf.int32)
# Recompute the accurate scale_factor using rounded scaled image size.
height = tf.cast(tf.shape(self._image)[0], tf.float32)
width = tf.cast(tf.shape(self._image)[1], tf.float32)
image_scale_y = tf.cast(scaled_y, tf.float32) / height
image_scale_x = tf.cast(scaled_x, tf.float32) / width
image_scale = tf.minimum(image_scale_x, image_scale_y)
# Select non-zero random offset (x, y) if scaled image is larger than
# self._output_size.
scaled_height = tf.cast(height * image_scale, tf.int32)
scaled_width = tf.cast(width * image_scale, tf.int32)
offset_y = tf.cast(scaled_height - self._output_size[0], tf.float32)
offset_x = tf.cast(scaled_width - self._output_size[1], tf.float32)
offset_y = tf.maximum(0.0, offset_y) * tf.random.uniform([], 0, 1)
offset_x = tf.maximum(0.0, offset_x) * tf.random.uniform([], 0, 1)
offset_y = tf.cast(offset_y, tf.int32)
offset_x = tf.cast(offset_x, tf.int32)
self._image_scale = image_scale
self._scaled_height = scaled_height
self._scaled_width = scaled_width
self._crop_offset_x = offset_x
self._crop_offset_y = offset_y
def set_scale_factors_to_output_size(self):
"""Set the parameters to resize input image to self._output_size."""
# Compute the scale_factor using rounded scaled image size.
height = tf.cast(tf.shape(self._image)[0], tf.float32)
width = tf.cast(tf.shape(self._image)[1], tf.float32)
image_scale_y = tf.cast(self._output_size[0], tf.float32) / height
image_scale_x = tf.cast(self._output_size[1], tf.float32) / width
image_scale = tf.minimum(image_scale_x, image_scale_y)
scaled_height = tf.cast(height * image_scale, tf.int32)
scaled_width = tf.cast(width * image_scale, tf.int32)
self._image_scale = image_scale
self._scaled_height = scaled_height
self._scaled_width = scaled_width
def resize_and_crop_image(self, method=tf.image.ResizeMethod.BILINEAR):
"""Resize input image and crop it to the self._output dimension."""
dtype = self._image.dtype
scaled_image = tf.compat.v1.image.resize(
self._image, [self._scaled_height, self._scaled_width], method=method)
if scaled_image.dtype != dtype:
scaled_image = tf.image.convert_image_dtype(scaled_image, dtype=dtype)
scaled_image = scaled_image[self._crop_offset_y:self._crop_offset_y +
self._output_size[0],
self._crop_offset_x:self._crop_offset_x +
self._output_size[1], :]
self._image = tf.image.pad_to_bounding_box(scaled_image, 0, 0,
self._output_size[0],
self._output_size[1])
# self._image = tf.cast(output_image, dtype)
return self._image
class DetectionInputProcessor(InputProcessor):
"""Input processor for object detection."""
def __init__(self, image, output_size, boxes=None, classes=None):
InputProcessor.__init__(self, image, output_size)
self._boxes = boxes
self._classes = classes
def random_horizontal_flip(self):
"""Randomly flip input image and bounding boxes."""
self._image, self._boxes = preprocessor.random_horizontal_flip(
self._image, boxes=self._boxes)
def clip_boxes(self, boxes):
"""Clip boxes to fit in an image."""
ymin, xmin, ymax, xmax = tf.unstack(boxes, axis=1)
ymin = tf.clip_by_value(ymin, 0, self._output_size[0] - 1)
xmin = tf.clip_by_value(xmin, 0, self._output_size[1] - 1)
ymax = tf.clip_by_value(ymax, 0, self._output_size[0] - 1)
xmax = tf.clip_by_value(xmax, 0, self._output_size[1] - 1)
boxes = tf.stack([ymin, xmin, ymax, xmax], axis=1)
return boxes
def resize_and_crop_boxes(self):
"""Resize boxes and crop it to the self._output dimension."""
boxlist = preprocessor.box_list.BoxList(self._boxes)
# boxlist is in range of [0, 1], so here we pass the scale_height/width
# instead of just scale.
boxes = preprocessor.box_list_scale(boxlist, self._scaled_height,
self._scaled_width).get()
# Adjust box coordinates based on the offset.
box_offset = tf.stack([
self._crop_offset_y,
self._crop_offset_x,
self._crop_offset_y,
self._crop_offset_x,
])
boxes -= tf.cast(tf.reshape(box_offset, [1, 4]), tf.float32)
# Clip the boxes.
boxes = self.clip_boxes(boxes)
# Filter out ground truth boxes that are illegal.
indices = tf.where(
tf.not_equal((boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1]),
0))
boxes = tf.gather_nd(boxes, indices)
classes = tf.gather_nd(self._classes, indices)
return boxes, classes
@property
def image_scale(self):
# Return image scale from original image to scaled image.
return self._image_scale
@property
def image_scale_to_original(self):
# Return image scale from scaled image to original image.
return 1.0 / self._image_scale
@property
def offset_x(self):
return self._crop_offset_x
@property
def offset_y(self):
return self._crop_offset_y
def pad_to_fixed_size(data, pad_value, output_shape):
"""Pad data to a fixed length at the first dimension.
Args:
data: Tensor to be padded to output_shape.
pad_value: A constant value assigned to the paddings.
output_shape: The output shape of a 2D tensor.
Returns:
The Padded tensor with output_shape [max_instances_per_image, dimension].
"""
max_instances_per_image = output_shape[0]
dimension = output_shape[1]
data = tf.reshape(data, [-1, dimension])
num_instances = tf.shape(data)[0]
msg = 'ERROR: please increase config.max_instances_per_image'
with tf.control_dependencies(
[tf.assert_less(num_instances, max_instances_per_image, message=msg)]):
pad_length = max_instances_per_image - num_instances
paddings = pad_value * tf.ones([pad_length, dimension])
padded_data = tf.concat([data, paddings], axis=0)
padded_data = tf.reshape(padded_data, output_shape)
return padded_data
class InputReader:
"""Input reader for dataset."""
def __init__(self,
file_pattern,
is_training,
use_fake_data=False,
max_instances_per_image=None,
enable_map_parallelization=True):
self._file_pattern = file_pattern
self._is_training = is_training
self._use_fake_data = use_fake_data
# COCO has 100 limit, but users may set different values for custom dataset.
self._max_instances_per_image = max_instances_per_image or 100
self._enable_map_parallelization = enable_map_parallelization
@tf.autograph.experimental.do_not_convert
def dataset_parser(self, value, example_decoder, anchor_labeler, params):
"""Parse data to a fixed dimension input image and learning targets.
Args:
value: a single serialized tf.Example string.
example_decoder: TF example decoder.
anchor_labeler: anchor box labeler.
params: a dict of extra parameters.
Returns:
image: Image tensor that is preprocessed to have normalized value and
fixed dimension [image_height, image_width, 3]
cls_targets_dict: ordered dictionary with keys
[min_level, min_level+1, ..., max_level]. The values are tensor with
shape [height_l, width_l, num_anchors]. The height_l and width_l
represent the dimension of class logits at l-th level.
box_targets_dict: ordered dictionary with keys
[min_level, min_level+1, ..., max_level]. The values are tensor with
shape [height_l, width_l, num_anchors * 4]. The height_l and
width_l represent the dimension of bounding box regression output at
l-th level.
num_positives: Number of positive anchors in the image.
source_id: Source image id. Default value -1 if the source id is empty
in the groundtruth annotation.
image_scale: Scale of the processed image to the original image.
boxes: Groundtruth bounding box annotations. The box is represented in
[y1, x1, y2, x2] format. The tensor is padded with -1 to the fixed
dimension [self._max_instances_per_image, 4].
is_crowds: Groundtruth annotations to indicate if an annotation
represents a group of instances by value {0, 1}. The tensor is
padded with 0 to the fixed dimension [self._max_instances_per_image].
areas: Groundtruth areas annotations. The tensor is padded with -1
to the fixed dimension [self._max_instances_per_image].
classes: Groundtruth classes annotations. The tensor is padded with -1
to the fixed dimension [self._max_instances_per_image].
"""
with tf.name_scope('parser'):
data = example_decoder.decode(value)
source_id = data['source_id']
image = data['image']
boxes = data['groundtruth_boxes']
classes = data['groundtruth_classes']
classes = tf.reshape(tf.cast(classes, dtype=tf.float32), [-1, 1])
areas = data['groundtruth_area']
is_crowds = data['groundtruth_is_crowd']
image_masks = data.get('groundtruth_instance_masks', [])
classes = tf.reshape(tf.cast(classes, dtype=tf.float32), [-1, 1])
if self._is_training:
# Training time preprocessing.
if params['skip_crowd_during_training']:
indices = tf.where(tf.logical_not(data['groundtruth_is_crowd']))
classes = tf.gather_nd(classes, indices)
boxes = tf.gather_nd(boxes, indices)
input_processor = DetectionInputProcessor(image, params['image_size'],
boxes, classes)
input_processor.normalize_image(dtype=tf.float16 if \
params['mixed_precision'] else tf.float32)
if self._is_training:
if params['input_rand_hflip']:
input_processor.random_horizontal_flip()
input_processor.set_training_random_scale_factors(
params['jitter_min'], params['jitter_max'],
params.get('target_size', None))
else:
input_processor.set_scale_factors_to_output_size()
image = input_processor.resize_and_crop_image()
boxes, classes = input_processor.resize_and_crop_boxes()
# Assign anchors.
(cls_targets, box_targets,
num_positives) = anchor_labeler.label_anchors(boxes, classes)
source_id = tf.where(
tf.equal(source_id, tf.constant('')), '-1', source_id)
source_id = tf.strings.to_number(source_id)
# Pad groundtruth data for evaluation.
image_scale = input_processor.image_scale_to_original
boxes *= image_scale
is_crowds = tf.cast(is_crowds, dtype=tf.float32)
boxes = pad_to_fixed_size(boxes, -1, [self._max_instances_per_image, 4])
is_crowds = pad_to_fixed_size(is_crowds, 0,
[self._max_instances_per_image, 1])
areas = pad_to_fixed_size(areas, -1, [self._max_instances_per_image, 1])
classes = pad_to_fixed_size(classes, -1,
[self._max_instances_per_image, 1])
if params['mixed_precision']:
dtype = get_mixed_precision_policy().compute_dtype
if image.dtype != dtype:
image = tf.cast(image, dtype=dtype)
box_targets = tf.nest.map_structure(
lambda box_target: tf.cast(box_target, dtype=dtype), box_targets)
return (image, cls_targets, box_targets, num_positives, source_id,
image_scale, boxes, is_crowds, areas, classes, image_masks)
@tf.autograph.experimental.do_not_convert
def process_example(self, params, batch_size, images, cls_targets,
box_targets, num_positives, source_ids, image_scales,
boxes, is_crowds, areas, classes, image_masks):
"""Processes one batch of data."""
labels = {}
# Count num_positives in a batch.
num_positives_batch = tf.reduce_mean(num_positives)
labels['mean_num_positives'] = tf.reshape(
tf.tile(tf.expand_dims(num_positives_batch, 0), [
batch_size,
]), [batch_size, 1])
if params['data_format'] == 'channels_first':
images = tf.transpose(images, [0, 3, 1, 2])
for level in range(params['min_level'], params['max_level'] + 1):
labels['cls_targets_%d' % level] = cls_targets[level]
labels['box_targets_%d' % level] = box_targets[level]
if params['data_format'] == 'channels_first':
labels['cls_targets_%d' % level] = tf.transpose(
labels['cls_targets_%d' % level], [0, 3, 1, 2])
labels['box_targets_%d' % level] = tf.transpose(
labels['box_targets_%d' % level], [0, 3, 1, 2])
# Concatenate groundtruth annotations to a tensor.
groundtruth_data = tf.concat([boxes, is_crowds, areas, classes], axis=2)
labels['source_ids'] = source_ids
labels['groundtruth_data'] = groundtruth_data
labels['image_scales'] = image_scales
labels['image_masks'] = image_masks
return images, labels
@property
def dataset_options(self):
options = tf.data.Options()
options.experimental_deterministic = not self._is_training
options.experimental_optimization.map_parallelization = self._enable_map_parallelization
options.experimental_optimization.parallel_batch = True
options.threading.private_threadpool_size = max(2, (multiprocessing.cpu_count() // hvd.local_size()) - 2)
return options
def __call__(self, params, input_context=None, batch_size=None):
input_anchors = anchors.Anchors(params['min_level'], params['max_level'],
params['num_scales'],
params['aspect_ratios'],
params['anchor_scale'],
params['image_size'])
anchor_labeler = anchors.AnchorLabeler(input_anchors, params['num_classes'])
example_decoder = tf_example_decoder.TfExampleDecoder(
include_mask='segmentation' in params['heads'],
regenerate_source_id=params['regenerate_source_id']
)
batch_size = batch_size or params['batch_size']
dataset = tf.data.Dataset.list_files(self._file_pattern, shuffle=False)
if self._is_training:
dataset = dataset.shard(get_world_size(), get_rank())
dataset.shuffle(buffer_size=1024)
# Prefetch data from files.
def _prefetch_dataset(filename):
if params.get('dataset_type', None) == 'sstable':
pass
else:
dataset = tf.data.TFRecordDataset(filename).prefetch(1)
return dataset
dataset = dataset.interleave(
_prefetch_dataset, cycle_length=10, block_length=16,
num_parallel_calls=tf.data.experimental.AUTOTUNE)
dataset = dataset.with_options(self.dataset_options)
if self._is_training:
dataset = dataset.shuffle(params['shuffle_buffer'])
# Parse the fetched records to input tensors for model function.
# pylint: disable=g-long-lambda
if params.get('dataset_type', None) == 'sstable':
map_fn = lambda key, value: self.dataset_parser(value, example_decoder,
anchor_labeler, params)
else:
map_fn = lambda value: self.dataset_parser(value, example_decoder,
anchor_labeler, params)
# pylint: enable=g-long-lambda
dataset = dataset.map(
map_fn, num_parallel_calls=tf.data.experimental.AUTOTUNE)
# dataset = dataset.prefetch(batch_size)
dataset = dataset.batch(batch_size, drop_remainder=params['drop_remainder'])
dataset = dataset.map(
lambda *args: self.process_example(params, batch_size, *args))
dataset = dataset.prefetch(tf.data.experimental.AUTOTUNE)
if self._is_training:
dataset = dataset.repeat()
if self._use_fake_data:
# Turn this dataset into a semi-fake dataset which always loop at the
# first batch. This reduces variance in performance and is useful in
# testing.
dataset = dataset.take(1).cache().repeat()
return dataset
| DeepLearningExamples-master | TensorFlow2/Detection/Efficientdet/model/dataloader.py |
# Copyright 2020 Google Research. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Common utils."""
from typing import Text
import tensorflow as tf
def srelu_fn(x):
"""Smooth relu: a smooth version of relu."""
with tf.name_scope('srelu'):
beta = tf.Variable(20.0, name='srelu_beta', dtype=tf.float32)**2
beta = tf.cast(beta**2, x.dtype)
safe_log = tf.math.log(tf.where(x > 0., beta * x + 1., tf.ones_like(x)))
return tf.where((x > 0.), x - (1. / beta) * safe_log, tf.zeros_like(x))
def activation_fn(features: tf.Tensor, act_type: Text):
"""Customized non-linear activation type."""
if act_type in ('silu', 'swish'):
return tf.nn.swish(features)
elif act_type == 'swish_native':
return features * tf.sigmoid(features)
elif act_type == 'hswish':
return features * tf.nn.relu6(features + 3) / 6
elif act_type == 'relu':
return tf.nn.relu(features)
elif act_type == 'relu6':
return tf.nn.relu6(features)
elif act_type == 'mish':
return features * tf.math.tanh(tf.math.softplus(features))
elif act_type == 'identity':
return tf.identity(features)
elif act_type == 'srelu':
return srelu_fn(features)
else:
raise ValueError('Unsupported act_type {}'.format(act_type)) | DeepLearningExamples-master | TensorFlow2/Detection/Efficientdet/model/activation_builder.py |
# Copyright 2020 Google Research. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""COCO-style evaluation metrics.
Implements the interface of COCO API and metric_fn in tf.TPUEstimator.
COCO API: github.com/cocodataset/cocoapi/
"""
import json
import os
from absl import logging
import numpy as np
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
import tensorflow as tf
import horovod.tensorflow.keras as hvd
from model import label_util
class EvaluationMetric():
"""COCO evaluation metric class.
This class cannot inherit from tf.keras.metrics.Metric due to numpy.
"""
def __init__(self, filename=None, testdev_dir=None, label_map=None):
"""Constructs COCO evaluation class.
The class provides the interface to metrics_fn in TPUEstimator. The
_update_op() takes detections from each image and push them to
self.detections. The _evaluate() loads a JSON file in COCO annotation format
as the groundtruth and runs COCO evaluation.
Args:
filename: Ground truth JSON file name. If filename is None, use
groundtruth data passed from the dataloader for evaluation. filename is
ignored if testdev_dir is not None.
testdev_dir: folder name for testdev data. If None, run eval without
groundtruth, and filename will be ignored.
label_map: a dict from id to class name. Used for per-class AP.
"""
self.label_map = label_map
self.filename = filename
self.testdev_dir = testdev_dir
self.metric_names = ['AP', 'AP50', 'AP75', 'APs', 'APm', 'APl', 'ARmax1',
'ARmax10', 'ARmax100', 'ARs', 'ARm', 'ARl']
self.reset_states()
def reset_states(self):
"""Reset COCO API object."""
self.detections = []
self.dataset = {
'images': [],
'annotations': [],
'categories': []
}
self.image_id = 1
self.annotation_id = 1
self.category_ids = []
self.metric_values = None
def evaluate(self):
"""Evaluates with detections from all images with COCO API.
Returns:
coco_metric: float numpy array with shape [12] representing the
coco-style evaluation metrics.
"""
if self.filename:
coco_gt = COCO(self.filename)
else:
coco_gt = COCO()
coco_gt.dataset = self.dataset
coco_gt.createIndex()
if self.testdev_dir:
# Run on test-dev dataset.
box_result_list = []
for det in self.detections:
box_result_list.append({
'image_id': int(det[0]),
'category_id': int(det[6]),
'bbox': np.around(
det[1:5].astype(np.float64), decimals=2).tolist(),
'score': float(np.around(det[5], decimals=3)),
})
json.encoder.FLOAT_REPR = lambda o: format(o, '.3f')
# Must be in the formst of 'detections_test-dev2017_xxx_results'.
fname = 'detections_test-dev2017_test_results'
output_path = os.path.join(self.testdev_dir, fname + '.json')
logging.info('Writing output json file to: %s', output_path)
with tf.io.gfile.GFile(output_path, 'w') as fid:
json.dump(box_result_list, fid)
return np.array([-1.], dtype=np.float32)
else:
# Run on validation dataset.
detections = np.array(self.detections)
image_ids = list(set(detections[:, 0]))
coco_dt = coco_gt.loadRes(detections)
coco_eval = COCOeval(coco_gt, coco_dt, iouType='bbox')
coco_eval.params.imgIds = image_ids
coco_eval.evaluate()
coco_eval.accumulate()
coco_eval.summarize()
coco_metrics = coco_eval.stats
if self.label_map:
# Get per_class AP, see pycocotools/cocoeval.py:334
# TxRxKxAxM: iouThrs x recThrs x catIds x areaRng x maxDets
# Use areaRng_id=0 ('all') and maxDets_id=-1 (200) in default
precision = coco_eval.eval['precision'][:, :, :, 0, -1]
# Ideally, label_map should match the eval set, but it is possible that
# some classes has no data in the eval set.
ap_perclass = [0] * max(precision.shape[-1], len(self.label_map))
for c in range(precision.shape[-1]): # iterate over all classes
precision_c = precision[:, :, c]
# Only consider values if > -1.
precision_c = precision_c[precision_c > -1]
ap_c = np.mean(precision_c) if precision_c.size else -1.
ap_perclass[c] = ap_c
coco_metrics = np.concatenate((coco_metrics, ap_perclass))
# Return the concat normal and per-class AP.
return np.array(coco_metrics, dtype=np.float32)
def result(self):
"""Return the metric values (and compute it if needed)."""
if self.metric_values is None:
self.metric_values = self.evaluate()
return self.metric_values
def update_state(self, groundtruth_data, detections):
"""Update detection results and groundtruth data.
Append detection results to self.detections to aggregate results from
all validation set. The groundtruth_data is parsed and added into a
dictionary with the same format as COCO dataset, which can be used for
evaluation.
Args:
groundtruth_data: Groundtruth annotations in a tensor with each row
representing [y1, x1, y2, x2, is_crowd, area, class].
detections: Detection results in a tensor with each row representing
[image_id, x, y, width, height, score, class].
"""
for i, det in enumerate(detections):
# Filter out detections with predicted class label = -1.
indices = np.where(det[:, -1] > -1)[0]
det = det[indices]
if det.shape[0] == 0:
continue
# Append groundtruth annotations to create COCO dataset object.
# Add images.
image_id = det[0, 0]
if image_id == -1:
image_id = self.image_id
det[:, 0] = image_id
self.detections.extend(det)
if not self.filename and not self.testdev_dir:
# process groudtruth data only if filename is empty and no test_dev.
self.dataset['images'].append({
'id': int(image_id),
})
# Add annotations.
indices = np.where(groundtruth_data[i, :, -1] > -1)[0]
for data in groundtruth_data[i, indices]:
box = data[0:4]
is_crowd = data[4]
area = (box[3] - box[1]) * (box[2] - box[0])
category_id = data[6]
if category_id < 0:
break
self.dataset['annotations'].append({
'id': int(self.annotation_id),
'image_id': int(image_id),
'category_id': int(category_id),
'bbox': [box[1], box[0], box[3] - box[1], box[2] - box[0]],
'area': area,
'iscrowd': int(is_crowd)
})
self.annotation_id += 1
self.category_ids.append(category_id)
self.image_id += 1
if not self.filename:
self.category_ids = list(set(self.category_ids))
self.dataset['categories'] = [
{'id': int(category_id)} for category_id in self.category_ids
]
def gather(self):
self.detections = hvd.allgather(self.detections)
def estimator_metric_fn(self, detections, groundtruth_data):
"""Constructs the metric function for tf.TPUEstimator.
For each metric, we return the evaluation op and an update op; the update op
is shared across all metrics and simply appends the set of detections to the
`self.detections` list. The metric op is invoked after all examples have
been seen and computes the aggregate COCO metrics. Please find details API
in: https://www.tensorflow.org/api_docs/python/tf/contrib/learn/MetricSpec
Args:
detections: Detection results in a tensor with each row representing
[image_id, x, y, width, height, score, class]
groundtruth_data: Groundtruth annotations in a tensor with each row
representing [y1, x1, y2, x2, is_crowd, area, class].
Returns:
metrics_dict: A dictionary mapping from evaluation name to a tuple of
operations (`metric_op`, `update_op`). `update_op` appends the
detections for the metric to the `self.detections` list.
"""
with tf.name_scope('coco_metric'):
if self.testdev_dir:
update_op = tf.numpy_function(self.update_state,
[groundtruth_data, detections], [])
metrics = tf.numpy_function(self.result, [], tf.float32)
metrics_dict = {'AP': (metrics, update_op)}
return metrics_dict
else:
update_op = tf.numpy_function(self.update_state,
[groundtruth_data, detections], [])
metrics = tf.numpy_function(self.result, [], tf.float32)
metrics_dict = {}
for i, name in enumerate(self.metric_names):
metrics_dict[name] = (metrics[i], update_op)
if self.label_map:
# process per-class AP.
label_map = label_util.get_label_map(self.label_map)
for i, cid in enumerate(sorted(label_map.keys())):
name = 'AP_/%s' % label_map[cid]
metrics_dict[name] = (metrics[i + len(self.metric_names)],
update_op)
return metrics_dict
| DeepLearningExamples-master | TensorFlow2/Detection/Efficientdet/model/coco_metric.py |
# Copyright 2020 Google Research. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Base target assigner module.
The job of a TargetAssigner is, for a given set of anchors (bounding boxes) and
groundtruth detections (bounding boxes), to assign classification and regression
targets to each anchor as well as weights to each anchor (specifying, e.g.,
which anchors should not contribute to training loss).
It assigns classification/regression targets by performing the following steps:
1) Computing pairwise similarity between anchors and groundtruth boxes using a
provided RegionSimilarity Calculator
2) Computing a matching based on the similarity matrix using a provided Matcher
3) Assigning regression targets based on the matching and a provided BoxCoder
4) Assigning classification targets based on the matching and groundtruth labels
Note that TargetAssigners only operate on detections from a single
image at a time, so any logic for applying a TargetAssigner to multiple
images must be handled externally.
"""
import tensorflow.compat.v1 as tf
from object_detection import box_list
from object_detection import shape_utils
KEYPOINTS_FIELD_NAME = 'keypoints'
class TargetAssigner(object):
"""Target assigner to compute classification and regression targets."""
def __init__(self, similarity_calc, matcher, box_coder,
negative_class_weight=1.0, unmatched_cls_target=None):
"""Construct Object Detection Target Assigner.
Args:
similarity_calc: a RegionSimilarityCalculator
matcher: Matcher used to match groundtruth to anchors.
box_coder: BoxCoder used to encode matching groundtruth boxes with
respect to anchors.
negative_class_weight: classification weight to be associated to negative
anchors (default: 1.0). The weight must be in [0., 1.].
unmatched_cls_target: a float32 tensor with shape [d_1, d_2, ..., d_k]
which is consistent with the classification target for each
anchor (and can be empty for scalar targets). This shape must thus be
compatible with the groundtruth labels that are passed to the "assign"
function (which have shape [num_gt_boxes, d_1, d_2, ..., d_k]).
If set to None, unmatched_cls_target is set to be [0] for each anchor.
Raises:
ValueError: if similarity_calc is not a RegionSimilarityCalculator or
if matcher is not a Matcher or if box_coder is not a BoxCoder
"""
self._similarity_calc = similarity_calc
self._matcher = matcher
self._box_coder = box_coder
self._negative_class_weight = negative_class_weight
if unmatched_cls_target is None:
self._unmatched_cls_target = tf.constant([0], tf.float32)
else:
self._unmatched_cls_target = unmatched_cls_target
@property
def box_coder(self):
return self._box_coder
def assign(self, anchors, groundtruth_boxes, groundtruth_labels=None,
groundtruth_weights=None, **params):
"""Assign classification and regression targets to each anchor.
For a given set of anchors and groundtruth detections, match anchors
to groundtruth_boxes and assign classification and regression targets to
each anchor as well as weights based on the resulting match (specifying,
e.g., which anchors should not contribute to training loss).
Anchors that are not matched to anything are given a classification target
of self._unmatched_cls_target which can be specified via the constructor.
Args:
anchors: a BoxList representing N anchors
groundtruth_boxes: a BoxList representing M groundtruth boxes
groundtruth_labels: a tensor of shape [M, d_1, ... d_k]
with labels for each of the ground_truth boxes. The subshape
[d_1, ... d_k] can be empty (corresponding to scalar inputs). When set
to None, groundtruth_labels assumes a binary problem where all
ground_truth boxes get a positive label (of 1).
groundtruth_weights: a float tensor of shape [M] indicating the weight to
assign to all anchors match to a particular groundtruth box. The weights
must be in [0., 1.]. If None, all weights are set to 1.
**params: Additional keyword arguments for specific implementations of
the Matcher.
Returns:
cls_targets: a float32 tensor with shape [num_anchors, d_1, d_2 ... d_k],
where the subshape [d_1, ..., d_k] is compatible with groundtruth_labels
which has shape [num_gt_boxes, d_1, d_2, ... d_k].
cls_weights: a float32 tensor with shape [num_anchors]
reg_targets: a float32 tensor with shape [num_anchors, box_code_dimension]
reg_weights: a float32 tensor with shape [num_anchors]
match: a matcher.Match object encoding the match between anchors and
groundtruth boxes, with rows corresponding to groundtruth boxes
and columns corresponding to anchors.
Raises:
ValueError: if anchors or groundtruth_boxes are not of type
box_list.BoxList
"""
if not isinstance(anchors, box_list.BoxList):
raise ValueError('anchors must be an BoxList')
if not isinstance(groundtruth_boxes, box_list.BoxList):
raise ValueError('groundtruth_boxes must be an BoxList')
if groundtruth_labels is None:
groundtruth_labels = tf.ones(tf.expand_dims(groundtruth_boxes.num_boxes(),
0))
groundtruth_labels = tf.expand_dims(groundtruth_labels, -1)
unmatched_shape_assert = shape_utils.assert_shape_equal(
shape_utils.combined_static_and_dynamic_shape(groundtruth_labels)[1:],
shape_utils.combined_static_and_dynamic_shape(
self._unmatched_cls_target))
labels_and_box_shapes_assert = shape_utils.assert_shape_equal(
shape_utils.combined_static_and_dynamic_shape(
groundtruth_labels)[:1],
shape_utils.combined_static_and_dynamic_shape(
groundtruth_boxes.get())[:1])
if groundtruth_weights is None:
num_gt_boxes = groundtruth_boxes.num_boxes_static()
if not num_gt_boxes:
num_gt_boxes = groundtruth_boxes.num_boxes()
groundtruth_weights = tf.ones([num_gt_boxes], dtype=tf.float32)
with tf.control_dependencies(
[unmatched_shape_assert, labels_and_box_shapes_assert]):
match_quality_matrix = self._similarity_calc.compare(groundtruth_boxes,
anchors)
match = self._matcher.match(match_quality_matrix, **params)
reg_targets = self._create_regression_targets(anchors,
groundtruth_boxes,
match)
cls_targets = self._create_classification_targets(groundtruth_labels,
match)
reg_weights = self._create_regression_weights(match, groundtruth_weights)
cls_weights = self._create_classification_weights(match,
groundtruth_weights)
num_anchors = anchors.num_boxes_static()
if num_anchors is not None:
reg_targets = self._reset_target_shape(reg_targets, num_anchors)
cls_targets = self._reset_target_shape(cls_targets, num_anchors)
reg_weights = self._reset_target_shape(reg_weights, num_anchors)
cls_weights = self._reset_target_shape(cls_weights, num_anchors)
return cls_targets, cls_weights, reg_targets, reg_weights, match
def _reset_target_shape(self, target, num_anchors):
"""Sets the static shape of the target.
Args:
target: the target tensor. Its first dimension will be overwritten.
num_anchors: the number of anchors, which is used to override the target's
first dimension.
Returns:
A tensor with the shape info filled in.
"""
target_shape = target.get_shape().as_list()
target_shape[0] = num_anchors
target.set_shape(target_shape)
return target
def _create_regression_targets(self, anchors, groundtruth_boxes, match):
"""Returns a regression target for each anchor.
Args:
anchors: a BoxList representing N anchors
groundtruth_boxes: a BoxList representing M groundtruth_boxes
match: a matcher.Match object
Returns:
reg_targets: a float32 tensor with shape [N, box_code_dimension]
"""
matched_gt_boxes = match.gather_based_on_match(
groundtruth_boxes.get(),
unmatched_value=tf.zeros(4),
ignored_value=tf.zeros(4))
matched_gt_boxlist = box_list.BoxList(matched_gt_boxes)
if groundtruth_boxes.has_field(KEYPOINTS_FIELD_NAME):
groundtruth_keypoints = groundtruth_boxes.get_field(KEYPOINTS_FIELD_NAME)
matched_keypoints = match.gather_based_on_match(
groundtruth_keypoints,
unmatched_value=tf.zeros(groundtruth_keypoints.get_shape()[1:]),
ignored_value=tf.zeros(groundtruth_keypoints.get_shape()[1:]))
matched_gt_boxlist.add_field(KEYPOINTS_FIELD_NAME, matched_keypoints)
matched_reg_targets = self._box_coder.encode(matched_gt_boxlist, anchors)
match_results_shape = shape_utils.combined_static_and_dynamic_shape(
match.match_results)
# Zero out the unmatched and ignored regression targets.
unmatched_ignored_reg_targets = tf.tile(
self._default_regression_target(), [match_results_shape[0], 1])
matched_anchors_mask = match.matched_column_indicator()
reg_targets = tf.where(matched_anchors_mask,
matched_reg_targets,
unmatched_ignored_reg_targets)
return reg_targets
def _default_regression_target(self):
"""Returns the default target for anchors to regress to.
Default regression targets are set to zero (though in
this implementation what these targets are set to should
not matter as the regression weight of any box set to
regress to the default target is zero).
Returns:
default_target: a float32 tensor with shape [1, box_code_dimension]
"""
return tf.constant([self._box_coder.code_size*[0]], tf.float32)
def _create_classification_targets(self, groundtruth_labels, match):
"""Create classification targets for each anchor.
Assign a classification target of for each anchor to the matching
groundtruth label that is provided by match. Anchors that are not matched
to anything are given the target self._unmatched_cls_target
Args:
groundtruth_labels: a tensor of shape [num_gt_boxes, d_1, ... d_k]
with labels for each of the ground_truth boxes. The subshape
[d_1, ... d_k] can be empty (corresponding to scalar labels).
match: a matcher.Match object that provides a matching between anchors
and groundtruth boxes.
Returns:
a float32 tensor with shape [num_anchors, d_1, d_2 ... d_k], where the
subshape [d_1, ..., d_k] is compatible with groundtruth_labels which has
shape [num_gt_boxes, d_1, d_2, ... d_k].
"""
return match.gather_based_on_match(
groundtruth_labels,
unmatched_value=self._unmatched_cls_target,
ignored_value=self._unmatched_cls_target)
def _create_regression_weights(self, match, groundtruth_weights):
"""Set regression weight for each anchor.
Only positive anchors are set to contribute to the regression loss, so this
method returns a weight of 1 for every positive anchor and 0 for every
negative anchor.
Args:
match: a matcher.Match object that provides a matching between anchors
and groundtruth boxes.
groundtruth_weights: a float tensor of shape [M] indicating the weight to
assign to all anchors match to a particular groundtruth box.
Returns:
a float32 tensor with shape [num_anchors] representing regression weights.
"""
return match.gather_based_on_match(
groundtruth_weights, ignored_value=0., unmatched_value=0.)
def _create_classification_weights(self,
match,
groundtruth_weights):
"""Create classification weights for each anchor.
Positive (matched) anchors are associated with a weight of
positive_class_weight and negative (unmatched) anchors are associated with
a weight of negative_class_weight. When anchors are ignored, weights are set
to zero. By default, both positive/negative weights are set to 1.0,
but they can be adjusted to handle class imbalance (which is almost always
the case in object detection).
Args:
match: a matcher.Match object that provides a matching between anchors
and groundtruth boxes.
groundtruth_weights: a float tensor of shape [M] indicating the weight to
assign to all anchors match to a particular groundtruth box.
Returns:
a float32 tensor with shape [num_anchors] representing classification
weights.
"""
return match.gather_based_on_match(
groundtruth_weights,
ignored_value=0.,
unmatched_value=self._negative_class_weight)
def get_box_coder(self):
"""Get BoxCoder of this TargetAssigner.
Returns:
BoxCoder object.
"""
return self._box_coder
| DeepLearningExamples-master | TensorFlow2/Detection/Efficientdet/object_detection/target_assigner.py |
# Copyright 2020 Google Research. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Matcher interface and Match class.
This module defines the Matcher interface and the Match object. The job of the
matcher is to match row and column indices based on the similarity matrix and
other optional parameters. Each column is matched to at most one row. There
are three possibilities for the matching:
1) match: A column matches a row.
2) no_match: A column does not match any row.
3) ignore: A column that is neither 'match' nor no_match.
The ignore case is regularly encountered in object detection: when an anchor has
a relatively small overlap with a ground-truth box, one neither wants to
consider this box a positive example (match) nor a negative example (no match).
The Match class is used to store the match results and it provides simple apis
to query the results.
"""
import abc
import tensorflow.compat.v1 as tf
class Match(object):
"""Class to store results from the matcher.
This class is used to store the results from the matcher. It provides
convenient methods to query the matching results.
"""
def __init__(self, match_results):
"""Constructs a Match object.
Args:
match_results: Integer tensor of shape [N] with (1) match_results[i]>=0,
meaning that column i is matched with row match_results[i].
(2) match_results[i]=-1, meaning that column i is not matched.
(3) match_results[i]=-2, meaning that column i is ignored.
Raises:
ValueError: if match_results does not have rank 1 or is not an
integer int32 scalar tensor
"""
if match_results.shape.ndims != 1:
raise ValueError('match_results should have rank 1')
if match_results.dtype != tf.int32:
raise ValueError('match_results should be an int32 or int64 scalar '
'tensor')
self._match_results = match_results
@property
def match_results(self):
"""The accessor for match results.
Returns:
the tensor which encodes the match results.
"""
return self._match_results
def matched_column_indices(self):
"""Returns column indices that match to some row.
The indices returned by this op are always sorted in increasing order.
Returns:
column_indices: int32 tensor of shape [K] with column indices.
"""
return self._reshape_and_cast(tf.where(tf.greater(self._match_results, -1)))
def matched_column_indicator(self):
"""Returns column indices that are matched.
Returns:
column_indices: int32 tensor of shape [K] with column indices.
"""
return tf.greater_equal(self._match_results, 0)
def num_matched_columns(self):
"""Returns number (int32 scalar tensor) of matched columns."""
return tf.shape(self.matched_column_indices())[0]
def unmatched_column_indices(self):
"""Returns column indices that do not match any row.
The indices returned by this op are always sorted in increasing order.
Returns:
column_indices: int32 tensor of shape [K] with column indices.
"""
return self._reshape_and_cast(tf.where(tf.equal(self._match_results, -1)))
def unmatched_column_indicator(self):
"""Returns column indices that are unmatched.
Returns:
column_indices: int32 tensor of shape [K] with column indices.
"""
return tf.equal(self._match_results, -1)
def num_unmatched_columns(self):
"""Returns number (int32 scalar tensor) of unmatched columns."""
return tf.shape(self.unmatched_column_indices())[0]
def ignored_column_indices(self):
"""Returns column indices that are ignored (neither Matched nor Unmatched).
The indices returned by this op are always sorted in increasing order.
Returns:
column_indices: int32 tensor of shape [K] with column indices.
"""
return self._reshape_and_cast(tf.where(self.ignored_column_indicator()))
def ignored_column_indicator(self):
"""Returns boolean column indicator where True means the column is ignored.
Returns:
column_indicator: boolean vector which is True for all ignored column
indices.
"""
return tf.equal(self._match_results, -2)
def num_ignored_columns(self):
"""Returns number (int32 scalar tensor) of matched columns."""
return tf.shape(self.ignored_column_indices())[0]
def unmatched_or_ignored_column_indices(self):
"""Returns column indices that are unmatched or ignored.
The indices returned by this op are always sorted in increasing order.
Returns:
column_indices: int32 tensor of shape [K] with column indices.
"""
return self._reshape_and_cast(tf.where(tf.greater(0, self._match_results)))
def matched_row_indices(self):
"""Returns row indices that match some column.
The indices returned by this op are ordered so as to be in correspondence
with the output of matched_column_indicator(). For example if
self.matched_column_indicator() is [0,2], and self.matched_row_indices() is
[7, 3], then we know that column 0 was matched to row 7 and column 2 was
matched to row 3.
Returns:
row_indices: int32 tensor of shape [K] with row indices.
"""
return self._reshape_and_cast(
tf.gather(self._match_results, self.matched_column_indices()))
def _reshape_and_cast(self, t):
return tf.cast(tf.reshape(t, [-1]), tf.int32)
def gather_based_on_match(self, input_tensor, unmatched_value,
ignored_value):
"""Gathers elements from `input_tensor` based on match results.
For columns that are matched to a row, gathered_tensor[col] is set to
input_tensor[match_results[col]]. For columns that are unmatched,
gathered_tensor[col] is set to unmatched_value. Finally, for columns that
are ignored gathered_tensor[col] is set to ignored_value.
Note that the input_tensor.shape[1:] must match with unmatched_value.shape
and ignored_value.shape
Args:
input_tensor: Tensor to gather values from.
unmatched_value: Constant tensor value for unmatched columns.
ignored_value: Constant tensor value for ignored columns.
Returns:
gathered_tensor: A tensor containing values gathered from input_tensor.
The shape of the gathered tensor is [match_results.shape[0]] +
input_tensor.shape[1:].
"""
input_tensor = tf.concat([tf.stack([ignored_value, unmatched_value]),
input_tensor], axis=0)
gather_indices = tf.maximum(self.match_results + 2, 0)
gathered_tensor = tf.gather(input_tensor, gather_indices)
return gathered_tensor
class Matcher(object):
"""Abstract base class for matcher.
"""
__metaclass__ = abc.ABCMeta
def match(self, similarity_matrix, scope=None, **params):
"""Computes matches among row and column indices and returns the result.
Computes matches among the row and column indices based on the similarity
matrix and optional arguments.
Args:
similarity_matrix: Float tensor of shape [N, M] with pairwise similarity
where higher value means more similar.
scope: Op scope name. Defaults to 'Match' if None.
**params: Additional keyword arguments for specific implementations of
the Matcher.
Returns:
A Match object with the results of matching.
"""
with tf.name_scope(scope, 'Match', [similarity_matrix, params]) as scope:
return Match(self._match(similarity_matrix, **params))
@abc.abstractmethod
def _match(self, similarity_matrix, **params):
"""Method to be overridden by implementations.
Args:
similarity_matrix: Float tensor of shape [N, M] with pairwise similarity
where higher value means more similar.
**params: Additional keyword arguments for specific implementations of
the Matcher.
Returns:
match_results: Integer tensor of shape [M]: match_results[i]>=0 means
that column i is matched to row match_results[i], match_results[i]=-1
means that the column is not matched. match_results[i]=-2 means that
the column is ignored (usually this happens when there is a very weak
match which one neither wants as positive nor negative example).
"""
pass
| DeepLearningExamples-master | TensorFlow2/Detection/Efficientdet/object_detection/matcher.py |
# Copyright 2020 Google Research. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Faster RCNN box coder.
Faster RCNN box coder follows the coding schema described below:
ty = (y - ya) / ha
tx = (x - xa) / wa
th = log(h / ha)
tw = log(w / wa)
where x, y, w, h denote the box's center coordinates, width and height
respectively. Similarly, xa, ya, wa, ha denote the anchor's center
coordinates, width and height. tx, ty, tw and th denote the anchor-encoded
center, width and height respectively.
See http://arxiv.org/abs/1506.01497 for details.
"""
import tensorflow.compat.v1 as tf
from object_detection import box_coder
from object_detection import box_list
EPSILON = 1e-8
class FasterRcnnBoxCoder(box_coder.BoxCoder):
"""Faster RCNN box coder."""
def __init__(self, scale_factors=None):
"""Constructor for FasterRcnnBoxCoder.
Args:
scale_factors: List of 4 positive scalars to scale ty, tx, th and tw.
If set to None, does not perform scaling. For Faster RCNN,
the open-source implementation recommends using [10.0, 10.0, 5.0, 5.0].
"""
if scale_factors:
assert len(scale_factors) == 4
for scalar in scale_factors:
assert scalar > 0
self._scale_factors = scale_factors
@property
def code_size(self):
return 4
def _encode(self, boxes, anchors):
"""Encode a box collection with respect to anchor collection.
Args:
boxes: BoxList holding N boxes to be encoded.
anchors: BoxList of anchors.
Returns:
a tensor representing N anchor-encoded boxes of the format
[ty, tx, th, tw].
"""
# Convert anchors to the center coordinate representation.
ycenter_a, xcenter_a, ha, wa = anchors.get_center_coordinates_and_sizes()
ycenter, xcenter, h, w = boxes.get_center_coordinates_and_sizes()
# Avoid NaN in division and log below.
ha = tf.maximum(EPSILON, ha)
wa = tf.maximum(EPSILON, wa)
h = tf.maximum(EPSILON, h)
w = tf.maximum(EPSILON, w)
tx = (xcenter - xcenter_a) / wa
ty = (ycenter - ycenter_a) / ha
tw = tf.log(w / wa)
th = tf.log(h / ha)
# Scales location targets as used in paper for joint training.
if self._scale_factors:
ty *= self._scale_factors[0]
tx *= self._scale_factors[1]
th *= self._scale_factors[2]
tw *= self._scale_factors[3]
return tf.transpose(tf.stack([ty, tx, th, tw]))
def _decode(self, rel_codes, anchors):
"""Decode relative codes to boxes.
Args:
rel_codes: a tensor representing N anchor-encoded boxes.
anchors: BoxList of anchors.
Returns:
boxes: BoxList holding N bounding boxes.
"""
ycenter_a, xcenter_a, ha, wa = anchors.get_center_coordinates_and_sizes()
ty, tx, th, tw = tf.unstack(tf.transpose(rel_codes))
if self._scale_factors:
ty /= self._scale_factors[0]
tx /= self._scale_factors[1]
th /= self._scale_factors[2]
tw /= self._scale_factors[3]
w = tf.exp(tw) * wa
h = tf.exp(th) * ha
ycenter = ty * ha + ycenter_a
xcenter = tx * wa + xcenter_a
ymin = ycenter - h / 2.
xmin = xcenter - w / 2.
ymax = ycenter + h / 2.
xmax = xcenter + w / 2.
return box_list.BoxList(tf.transpose(tf.stack([ymin, xmin, ymax, xmax])))
| DeepLearningExamples-master | TensorFlow2/Detection/Efficientdet/object_detection/faster_rcnn_box_coder.py |
# Copyright 2020 Google Research. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utils used to manipulate tensor shapes."""
import tensorflow.compat.v1 as tf
def assert_shape_equal(shape_a, shape_b):
"""Asserts that shape_a and shape_b are equal.
If the shapes are static, raises a ValueError when the shapes
mismatch.
If the shapes are dynamic, raises a tf InvalidArgumentError when the shapes
mismatch.
Args:
shape_a: a list containing shape of the first tensor.
shape_b: a list containing shape of the second tensor.
Returns:
Either a tf.no_op() when shapes are all static and a tf.assert_equal() op
when the shapes are dynamic.
Raises:
ValueError: When shapes are both static and unequal.
"""
if (all(isinstance(dim, int) for dim in shape_a) and
all(isinstance(dim, int) for dim in shape_b)):
if shape_a != shape_b:
raise ValueError('Unequal shapes {}, {}'.format(shape_a, shape_b))
else: return tf.no_op()
else:
return tf.assert_equal(shape_a, shape_b)
def combined_static_and_dynamic_shape(tensor):
"""Returns a list containing static and dynamic values for the dimensions.
Returns a list of static and dynamic values for shape dimensions. This is
useful to preserve static shapes when available in reshape operation.
Args:
tensor: A tensor of any type.
Returns:
A list of size tensor.shape.ndims containing integers or a scalar tensor.
"""
static_tensor_shape = tensor.shape.as_list()
dynamic_tensor_shape = tf.shape(tensor)
combined_shape = []
for index, dim in enumerate(static_tensor_shape):
if dim is not None:
combined_shape.append(dim)
else:
combined_shape.append(dynamic_tensor_shape[index])
return combined_shape
| DeepLearningExamples-master | TensorFlow2/Detection/Efficientdet/object_detection/shape_utils.py |
# Copyright 2020 Google Research. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Base box coder.
Box coders convert between coordinate frames, namely image-centric
(with (0,0) on the top left of image) and anchor-centric (with (0,0) being
defined by a specific anchor).
Users of a BoxCoder can call two methods:
encode: which encodes a box with respect to a given anchor
(or rather, a tensor of boxes wrt a corresponding tensor of anchors) and
decode: which inverts this encoding with a decode operation.
In both cases, the arguments are assumed to be in 1-1 correspondence already;
it is not the job of a BoxCoder to perform matching.
"""
from abc import ABCMeta
from abc import abstractmethod
from abc import abstractproperty
import tensorflow.compat.v1 as tf
# Box coder types.
FASTER_RCNN = 'faster_rcnn'
KEYPOINT = 'keypoint'
MEAN_STDDEV = 'mean_stddev'
SQUARE = 'square'
class BoxCoder(object):
"""Abstract base class for box coder."""
__metaclass__ = ABCMeta
@abstractproperty
def code_size(self):
"""Return the size of each code.
This number is a constant and should agree with the output of the `encode`
op (e.g. if rel_codes is the output of self.encode(...), then it should have
shape [N, code_size()]). This abstractproperty should be overridden by
implementations.
Returns:
an integer constant
"""
pass
def encode(self, boxes, anchors):
"""Encode a box list relative to an anchor collection.
Args:
boxes: BoxList holding N boxes to be encoded
anchors: BoxList of N anchors
Returns:
a tensor representing N relative-encoded boxes
"""
with tf.name_scope('Encode'):
return self._encode(boxes, anchors)
def decode(self, rel_codes, anchors):
"""Decode boxes that are encoded relative to an anchor collection.
Args:
rel_codes: a tensor representing N relative-encoded boxes
anchors: BoxList of anchors
Returns:
boxlist: BoxList holding N boxes encoded in the ordinary way (i.e.,
with corners y_min, x_min, y_max, x_max)
"""
with tf.name_scope('Decode'):
return self._decode(rel_codes, anchors)
@abstractmethod
def _encode(self, boxes, anchors):
"""Method to be overridden by implementations.
Args:
boxes: BoxList holding N boxes to be encoded
anchors: BoxList of N anchors
Returns:
a tensor representing N relative-encoded boxes
"""
pass
@abstractmethod
def _decode(self, rel_codes, anchors):
"""Method to be overridden by implementations.
Args:
rel_codes: a tensor representing N relative-encoded boxes
anchors: BoxList of anchors
Returns:
boxlist: BoxList holding N boxes encoded in the ordinary way (i.e.,
with corners y_min, x_min, y_max, x_max)
"""
pass
def batch_decode(encoded_boxes, box_coder, anchors):
"""Decode a batch of encoded boxes.
This op takes a batch of encoded bounding boxes and transforms
them to a batch of bounding boxes specified by their corners in
the order of [y_min, x_min, y_max, x_max].
Args:
encoded_boxes: a float32 tensor of shape [batch_size, num_anchors,
code_size] representing the location of the objects.
box_coder: a BoxCoder object.
anchors: a BoxList of anchors used to encode `encoded_boxes`.
Returns:
decoded_boxes: a float32 tensor of shape [batch_size, num_anchors,
coder_size] representing the corners of the objects in the order
of [y_min, x_min, y_max, x_max].
Raises:
ValueError: if batch sizes of the inputs are inconsistent, or if
the number of anchors inferred from encoded_boxes and anchors are
inconsistent.
"""
encoded_boxes.get_shape().assert_has_rank(3)
if encoded_boxes.get_shape()[1].value != anchors.num_boxes_static():
raise ValueError('The number of anchors inferred from encoded_boxes'
' and anchors are inconsistent: shape[1] of encoded_boxes'
' %s should be equal to the number of anchors: %s.' %
(encoded_boxes.get_shape()[1].value,
anchors.num_boxes_static()))
decoded_boxes = tf.stack([
box_coder.decode(boxes, anchors).get()
for boxes in tf.unstack(encoded_boxes)
])
return decoded_boxes
| DeepLearningExamples-master | TensorFlow2/Detection/Efficientdet/object_detection/box_coder.py |
# Copyright 2020 Google Research. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
| DeepLearningExamples-master | TensorFlow2/Detection/Efficientdet/object_detection/__init__.py |
# Copyright 2020 Google Research. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Preprocess images and bounding boxes for detection.
We perform two sets of operations in preprocessing stage:
(a) operations that are applied to both training and testing data,
(b) operations that are applied only to training data for the purpose of
data augmentation.
A preprocessing function receives a set of inputs,
e.g. an image and bounding boxes,
performs an operation on them, and returns them.
Some examples are: randomly cropping the image, randomly mirroring the image,
randomly changing the brightness, contrast, hue and
randomly jittering the bounding boxes.
The image is a rank 4 tensor: [1, height, width, channels] with
dtype=tf.float32. The groundtruth_boxes is a rank 2 tensor: [N, 4] where
in each row there is a box with [ymin xmin ymax xmax].
Boxes are in normalized coordinates meaning
their coordinate values range in [0, 1]
Important Note: In tensor_dict, images is a rank 4 tensor, but preprocessing
functions receive a rank 3 tensor for processing the image. Thus, inside the
preprocess function we squeeze the image to become a rank 3 tensor and then
we pass it to the functions. At the end of the preprocess we expand the image
back to rank 4.
"""
import tensorflow.compat.v1 as tf
from object_detection import box_list
def _flip_boxes_left_right(boxes):
"""Left-right flip the boxes.
Args:
boxes: rank 2 float32 tensor containing the bounding boxes -> [N, 4].
Boxes are in normalized form meaning their coordinates vary
between [0, 1].
Each row is in the form of [ymin, xmin, ymax, xmax].
Returns:
Flipped boxes.
"""
ymin, xmin, ymax, xmax = tf.split(value=boxes, num_or_size_splits=4, axis=1)
flipped_xmin = tf.subtract(1.0, xmax)
flipped_xmax = tf.subtract(1.0, xmin)
flipped_boxes = tf.concat([ymin, flipped_xmin, ymax, flipped_xmax], 1)
return flipped_boxes
def _flip_masks_left_right(masks):
"""Left-right flip masks.
Args:
masks: rank 3 float32 tensor with shape
[num_instances, height, width] representing instance masks.
Returns:
flipped masks: rank 3 float32 tensor with shape
[num_instances, height, width] representing instance masks.
"""
return masks[:, :, ::-1]
def keypoint_flip_horizontal(keypoints, flip_point, flip_permutation,
scope=None):
"""Flips the keypoints horizontally around the flip_point.
This operation flips the x coordinate for each keypoint around the flip_point
and also permutes the keypoints in a manner specified by flip_permutation.
Args:
keypoints: a tensor of shape [num_instances, num_keypoints, 2]
flip_point: (float) scalar tensor representing the x coordinate to flip the
keypoints around.
flip_permutation: rank 1 int32 tensor containing the keypoint flip
permutation. This specifies the mapping from original keypoint indices
to the flipped keypoint indices. This is used primarily for keypoints
that are not reflection invariant. E.g. Suppose there are 3 keypoints
representing ['head', 'right_eye', 'left_eye'], then a logical choice for
flip_permutation might be [0, 2, 1] since we want to swap the 'left_eye'
and 'right_eye' after a horizontal flip.
scope: name scope.
Returns:
new_keypoints: a tensor of shape [num_instances, num_keypoints, 2]
"""
with tf.name_scope(scope, 'FlipHorizontal'):
keypoints = tf.transpose(keypoints, [1, 0, 2])
keypoints = tf.gather(keypoints, flip_permutation)
v, u = tf.split(value=keypoints, num_or_size_splits=2, axis=2)
u = flip_point * 2.0 - u
new_keypoints = tf.concat([v, u], 2)
new_keypoints = tf.transpose(new_keypoints, [1, 0, 2])
return new_keypoints
def random_horizontal_flip(image,
boxes=None,
masks=None,
keypoints=None,
keypoint_flip_permutation=None,
seed=None):
"""Randomly flips the image and detections horizontally.
The probability of flipping the image is 50%.
Args:
image: rank 3 float32 tensor with shape [height, width, channels].
boxes: (optional) rank 2 float32 tensor with shape [N, 4]
containing the bounding boxes.
Boxes are in normalized form meaning their coordinates vary
between [0, 1].
Each row is in the form of [ymin, xmin, ymax, xmax].
masks: (optional) rank 3 float32 tensor with shape
[num_instances, height, width] containing instance masks. The masks
are of the same height, width as the input `image`.
keypoints: (optional) rank 3 float32 tensor with shape
[num_instances, num_keypoints, 2]. The keypoints are in y-x
normalized coordinates.
keypoint_flip_permutation: rank 1 int32 tensor containing the keypoint flip
permutation.
seed: random seed
Returns:
image: image which is the same shape as input image.
If boxes, masks, keypoints, and keypoint_flip_permutation are not None,
the function also returns the following tensors.
boxes: rank 2 float32 tensor containing the bounding boxes -> [N, 4].
Boxes are in normalized form meaning their coordinates vary
between [0, 1].
masks: rank 3 float32 tensor with shape [num_instances, height, width]
containing instance masks.
keypoints: rank 3 float32 tensor with shape
[num_instances, num_keypoints, 2]
Raises:
ValueError: if keypoints are provided but keypoint_flip_permutation is not.
"""
def _flip_image(image):
# flip image
image_flipped = tf.image.flip_left_right(image)
return image_flipped
if keypoints is not None and keypoint_flip_permutation is None:
raise ValueError(
'keypoints are provided but keypoints_flip_permutation is not provided')
with tf.name_scope('RandomHorizontalFlip', values=[image, boxes]):
result = []
# random variable defining whether to do flip or not
do_a_flip_random = tf.greater(tf.random_uniform([], seed=seed), 0.5)
# flip image
image = tf.cond(do_a_flip_random, lambda: _flip_image(image), lambda: image)
result.append(image)
# flip boxes
if boxes is not None:
boxes = tf.cond(do_a_flip_random, lambda: _flip_boxes_left_right(boxes),
lambda: boxes)
result.append(boxes)
# flip masks
if masks is not None:
masks = tf.cond(do_a_flip_random, lambda: _flip_masks_left_right(masks),
lambda: masks)
result.append(masks)
# flip keypoints
if keypoints is not None and keypoint_flip_permutation is not None:
permutation = keypoint_flip_permutation
keypoints = tf.cond(
do_a_flip_random,
lambda: keypoint_flip_horizontal(keypoints, 0.5, permutation),
lambda: keypoints)
result.append(keypoints)
return tuple(result)
def _compute_new_static_size(image, min_dimension, max_dimension):
"""Compute new static shape for resize_to_range method."""
image_shape = image.get_shape().as_list()
orig_height = image_shape[0]
orig_width = image_shape[1]
num_channels = image_shape[2]
orig_min_dim = min(orig_height, orig_width)
# Calculates the larger of the possible sizes
large_scale_factor = min_dimension / float(orig_min_dim)
# Scaling orig_(height|width) by large_scale_factor will make the smaller
# dimension equal to min_dimension, save for floating point rounding errors.
# For reasonably-sized images, taking the nearest integer will reliably
# eliminate this error.
large_height = int(round(orig_height * large_scale_factor))
large_width = int(round(orig_width * large_scale_factor))
large_size = [large_height, large_width]
if max_dimension:
# Calculates the smaller of the possible sizes, use that if the larger
# is too big.
orig_max_dim = max(orig_height, orig_width)
small_scale_factor = max_dimension / float(orig_max_dim)
# Scaling orig_(height|width) by small_scale_factor will make the larger
# dimension equal to max_dimension, save for floating point rounding
# errors. For reasonably-sized images, taking the nearest integer will
# reliably eliminate this error.
small_height = int(round(orig_height * small_scale_factor))
small_width = int(round(orig_width * small_scale_factor))
small_size = [small_height, small_width]
new_size = large_size
if max(large_size) > max_dimension:
new_size = small_size
else:
new_size = large_size
return tf.constant(new_size + [num_channels])
def _compute_new_dynamic_size(image, min_dimension, max_dimension):
"""Compute new dynamic shape for resize_to_range method."""
image_shape = tf.shape(image)
orig_height = tf.to_float(image_shape[0])
orig_width = tf.to_float(image_shape[1])
num_channels = image_shape[2]
orig_min_dim = tf.minimum(orig_height, orig_width)
# Calculates the larger of the possible sizes
min_dimension = tf.constant(min_dimension, dtype=tf.float32)
large_scale_factor = min_dimension / orig_min_dim
# Scaling orig_(height|width) by large_scale_factor will make the smaller
# dimension equal to min_dimension, save for floating point rounding errors.
# For reasonably-sized images, taking the nearest integer will reliably
# eliminate this error.
large_height = tf.to_int32(tf.round(orig_height * large_scale_factor))
large_width = tf.to_int32(tf.round(orig_width * large_scale_factor))
large_size = tf.stack([large_height, large_width])
if max_dimension:
# Calculates the smaller of the possible sizes, use that if the larger
# is too big.
orig_max_dim = tf.maximum(orig_height, orig_width)
max_dimension = tf.constant(max_dimension, dtype=tf.float32)
small_scale_factor = max_dimension / orig_max_dim
# Scaling orig_(height|width) by small_scale_factor will make the larger
# dimension equal to max_dimension, save for floating point rounding
# errors. For reasonably-sized images, taking the nearest integer will
# reliably eliminate this error.
small_height = tf.to_int32(tf.round(orig_height * small_scale_factor))
small_width = tf.to_int32(tf.round(orig_width * small_scale_factor))
small_size = tf.stack([small_height, small_width])
new_size = tf.cond(
tf.to_float(tf.reduce_max(large_size)) > max_dimension,
lambda: small_size, lambda: large_size)
else:
new_size = large_size
return tf.stack(tf.unstack(new_size) + [num_channels])
def resize_to_range(image,
masks=None,
min_dimension=None,
max_dimension=None,
method=tf.image.ResizeMethod.BILINEAR,
align_corners=False,
pad_to_max_dimension=False):
"""Resizes an image so its dimensions are within the provided value.
The output size can be described by two cases:
1. If the image can be rescaled so its minimum dimension is equal to the
provided value without the other dimension exceeding max_dimension,
then do so.
2. Otherwise, resize so the largest dimension is equal to max_dimension.
Args:
image: A 3D tensor of shape [height, width, channels]
masks: (optional) rank 3 float32 tensor with shape
[num_instances, height, width] containing instance masks.
min_dimension: (optional) (scalar) desired size of the smaller image
dimension.
max_dimension: (optional) (scalar) maximum allowed size
of the larger image dimension.
method: (optional) interpolation method used in resizing. Defaults to
BILINEAR.
align_corners: bool. If true, exactly align all 4 corners of the input
and output. Defaults to False.
pad_to_max_dimension: Whether to resize the image and pad it with zeros
so the resulting image is of the spatial size
[max_dimension, max_dimension]. If masks are included they are padded
similarly.
Returns:
Note that the position of the resized_image_shape changes based on whether
masks are present.
resized_image: A 3D tensor of shape [new_height, new_width, channels],
where the image has been resized (with bilinear interpolation) so that
min(new_height, new_width) == min_dimension or
max(new_height, new_width) == max_dimension.
resized_masks: If masks is not None, also outputs masks. A 3D tensor of
shape [num_instances, new_height, new_width].
resized_image_shape: A 1D tensor of shape [3] containing shape of the
resized image.
Raises:
ValueError: if the image is not a 3D tensor.
"""
if len(image.get_shape()) != 3:
raise ValueError('Image should be 3D tensor')
with tf.name_scope('ResizeToRange', values=[image, min_dimension]):
if image.get_shape().is_fully_defined():
new_size = _compute_new_static_size(image, min_dimension, max_dimension)
else:
new_size = _compute_new_dynamic_size(image, min_dimension, max_dimension)
new_image = tf.image.resize_images(
image, new_size[:-1], method=method, align_corners=align_corners)
if pad_to_max_dimension:
new_image = tf.image.pad_to_bounding_box(
new_image, 0, 0, max_dimension, max_dimension)
result = [new_image]
if masks is not None:
new_masks = tf.expand_dims(masks, 3)
new_masks = tf.image.resize_images(
new_masks,
new_size[:-1],
method=tf.image.ResizeMethod.NEAREST_NEIGHBOR,
align_corners=align_corners)
new_masks = tf.squeeze(new_masks, 3)
if pad_to_max_dimension:
new_masks = tf.image.pad_to_bounding_box(
new_masks, 0, 0, max_dimension, max_dimension)
result.append(new_masks)
result.append(new_size)
return result
def _copy_extra_fields(boxlist_to_copy_to, boxlist_to_copy_from):
"""Copies the extra fields of boxlist_to_copy_from to boxlist_to_copy_to.
Args:
boxlist_to_copy_to: BoxList to which extra fields are copied.
boxlist_to_copy_from: BoxList from which fields are copied.
Returns:
boxlist_to_copy_to with extra fields.
"""
for field in boxlist_to_copy_from.get_extra_fields():
boxlist_to_copy_to.add_field(field, boxlist_to_copy_from.get_field(field))
return boxlist_to_copy_to
def box_list_scale(boxlist, y_scale, x_scale, scope=None):
"""scale box coordinates in x and y dimensions.
Args:
boxlist: BoxList holding N boxes
y_scale: (float) scalar tensor
x_scale: (float) scalar tensor
scope: name scope.
Returns:
boxlist: BoxList holding N boxes
"""
with tf.name_scope(scope, 'Scale'):
y_scale = tf.cast(y_scale, tf.float32)
x_scale = tf.cast(x_scale, tf.float32)
y_min, x_min, y_max, x_max = tf.split(
value=boxlist.get(), num_or_size_splits=4, axis=1)
y_min = y_scale * y_min
y_max = y_scale * y_max
x_min = x_scale * x_min
x_max = x_scale * x_max
scaled_boxlist = box_list.BoxList(
tf.concat([y_min, x_min, y_max, x_max], 1))
return _copy_extra_fields(scaled_boxlist, boxlist)
def keypoint_scale(keypoints, y_scale, x_scale, scope=None):
"""Scales keypoint coordinates in x and y dimensions.
Args:
keypoints: a tensor of shape [num_instances, num_keypoints, 2]
y_scale: (float) scalar tensor
x_scale: (float) scalar tensor
scope: name scope.
Returns:
new_keypoints: a tensor of shape [num_instances, num_keypoints, 2]
"""
with tf.name_scope(scope, 'Scale'):
y_scale = tf.cast(y_scale, tf.float32)
x_scale = tf.cast(x_scale, tf.float32)
new_keypoints = keypoints * [[[y_scale, x_scale]]]
return new_keypoints
def scale_boxes_to_pixel_coordinates(image, boxes, keypoints=None):
"""Scales boxes from normalized to pixel coordinates.
Args:
image: A 3D float32 tensor of shape [height, width, channels].
boxes: A 2D float32 tensor of shape [num_boxes, 4] containing the bounding
boxes in normalized coordinates. Each row is of the form
[ymin, xmin, ymax, xmax].
keypoints: (optional) rank 3 float32 tensor with shape
[num_instances, num_keypoints, 2]. The keypoints are in y-x normalized
coordinates.
Returns:
image: unchanged input image.
scaled_boxes: a 2D float32 tensor of shape [num_boxes, 4] containing the
bounding boxes in pixel coordinates.
scaled_keypoints: a 3D float32 tensor with shape
[num_instances, num_keypoints, 2] containing the keypoints in pixel
coordinates.
"""
boxlist = box_list.BoxList(boxes)
image_height = tf.shape(image)[0]
image_width = tf.shape(image)[1]
scaled_boxes = box_list_scale(boxlist, image_height, image_width).get()
result = [image, scaled_boxes]
if keypoints is not None:
scaled_keypoints = keypoint_scale(keypoints, image_height, image_width)
result.append(scaled_keypoints)
return tuple(result)
| DeepLearningExamples-master | TensorFlow2/Detection/Efficientdet/object_detection/preprocessor.py |
# Copyright 2020 Google Research. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Bounding Box List definition.
BoxList represents a list of bounding boxes as tensorflow
tensors, where each bounding box is represented as a row of 4 numbers,
[y_min, x_min, y_max, x_max]. It is assumed that all bounding boxes
within a given list correspond to a single image. See also
box_list_ops.py for common box related operations (such as area, iou, etc).
Optionally, users can add additional related fields (such as weights).
We assume the following things to be true about fields:
* they correspond to boxes in the box_list along the 0th dimension
* they have inferable rank at graph construction time
* all dimensions except for possibly the 0th can be inferred
(i.e., not None) at graph construction time.
Some other notes:
* Following tensorflow conventions, we use height, width ordering,
and correspondingly, y,x (or ymin, xmin, ymax, xmax) ordering
* Tensors are always provided as (flat) [N, 4] tensors.
"""
import tensorflow.compat.v1 as tf
class BoxList(object):
"""Box collection."""
def __init__(self, boxes):
"""Constructs box collection.
Args:
boxes: a tensor of shape [N, 4] representing box corners
Raises:
ValueError: if invalid dimensions for bbox data or if bbox data is not in
float32 format.
"""
if len(boxes.get_shape()) != 2 or boxes.get_shape()[-1] != 4:
raise ValueError('Invalid dimensions for box data.')
if boxes.dtype != tf.float32:
raise ValueError('Invalid tensor type: should be tf.float32')
self.data = {'boxes': boxes}
def num_boxes(self):
"""Returns number of boxes held in collection.
Returns:
a tensor representing the number of boxes held in the collection.
"""
return tf.shape(self.data['boxes'])[0]
def num_boxes_static(self):
"""Returns number of boxes held in collection.
This number is inferred at graph construction time rather than run-time.
Returns:
Number of boxes held in collection (integer) or None if this is not
inferable at graph construction time.
"""
return self.data['boxes'].get_shape().as_list()[0]
def get_all_fields(self):
"""Returns all fields."""
return self.data.keys()
def get_extra_fields(self):
"""Returns all non-box fields (i.e., everything not named 'boxes')."""
return [k for k in self.data.keys() if k != 'boxes']
def add_field(self, field, field_data):
"""Add field to box list.
This method can be used to add related box data such as
weights/labels, etc.
Args:
field: a string key to access the data via `get`
field_data: a tensor containing the data to store in the BoxList
"""
self.data[field] = field_data
def has_field(self, field):
return field in self.data
def get(self):
"""Convenience function for accessing box coordinates.
Returns:
a tensor with shape [N, 4] representing box coordinates.
"""
return self.get_field('boxes')
def set(self, boxes):
"""Convenience function for setting box coordinates.
Args:
boxes: a tensor of shape [N, 4] representing box corners
Raises:
ValueError: if invalid dimensions for bbox data
"""
if len(boxes.get_shape()) != 2 or boxes.get_shape()[-1] != 4:
raise ValueError('Invalid dimensions for box data.')
self.data['boxes'] = boxes
def get_field(self, field):
"""Accesses a box collection and associated fields.
This function returns specified field with object; if no field is specified,
it returns the box coordinates.
Args:
field: this optional string parameter can be used to specify
a related field to be accessed.
Returns:
a tensor representing the box collection or an associated field.
Raises:
ValueError: if invalid field
"""
if not self.has_field(field):
raise ValueError('field ' + str(field) + ' does not exist')
return self.data[field]
def set_field(self, field, value):
"""Sets the value of a field.
Updates the field of a box_list with a given value.
Args:
field: (string) name of the field to set value.
value: the value to assign to the field.
Raises:
ValueError: if the box_list does not have specified field.
"""
if not self.has_field(field):
raise ValueError('field %s does not exist' % field)
self.data[field] = value
def get_center_coordinates_and_sizes(self, scope=None):
"""Computes the center coordinates, height and width of the boxes.
Args:
scope: name scope of the function.
Returns:
a list of 4 1-D tensors [ycenter, xcenter, height, width].
"""
with tf.name_scope(scope, 'get_center_coordinates_and_sizes'):
box_corners = self.get()
ymin, xmin, ymax, xmax = tf.unstack(tf.transpose(box_corners))
width = xmax - xmin
height = ymax - ymin
ycenter = ymin + height / 2.
xcenter = xmin + width / 2.
return [ycenter, xcenter, height, width]
def transpose_coordinates(self, scope=None):
"""Transpose the coordinate representation in a boxlist.
Args:
scope: name scope of the function.
"""
with tf.name_scope(scope, 'transpose_coordinates'):
y_min, x_min, y_max, x_max = tf.split(
value=self.get(), num_or_size_splits=4, axis=1)
self.set(tf.concat([x_min, y_min, x_max, y_max], 1))
def as_tensor_dict(self, fields=None):
"""Retrieves specified fields as a dictionary of tensors.
Args:
fields: (optional) list of fields to return in the dictionary.
If None (default), all fields are returned.
Returns:
tensor_dict: A dictionary of tensors specified by fields.
Raises:
ValueError: if specified field is not contained in boxlist.
"""
tensor_dict = {}
if fields is None:
fields = self.get_all_fields()
for field in fields:
if not self.has_field(field):
raise ValueError('boxlist must contain all specified fields')
tensor_dict[field] = self.get_field(field)
return tensor_dict
| DeepLearningExamples-master | TensorFlow2/Detection/Efficientdet/object_detection/box_list.py |
# Copyright 2020 Google Research. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Region Similarity Calculators for BoxLists.
Region Similarity Calculators compare a pairwise measure of similarity
between the boxes in two BoxLists.
"""
from abc import ABCMeta
from abc import abstractmethod
import tensorflow.compat.v1 as tf
def area(boxlist, scope=None):
"""Computes area of boxes.
Args:
boxlist: BoxList holding N boxes
scope: name scope.
Returns:
a tensor with shape [N] representing box areas.
"""
with tf.name_scope(scope, 'Area'):
y_min, x_min, y_max, x_max = tf.split(
value=boxlist.get(), num_or_size_splits=4, axis=1)
return tf.squeeze((y_max - y_min) * (x_max - x_min), [1])
def intersection(boxlist1, boxlist2, scope=None):
"""Compute pairwise intersection areas between boxes.
Args:
boxlist1: BoxList holding N boxes
boxlist2: BoxList holding M boxes
scope: name scope.
Returns:
a tensor with shape [N, M] representing pairwise intersections
"""
with tf.name_scope(scope, 'Intersection'):
y_min1, x_min1, y_max1, x_max1 = tf.split(
value=boxlist1.get(), num_or_size_splits=4, axis=1)
y_min2, x_min2, y_max2, x_max2 = tf.split(
value=boxlist2.get(), num_or_size_splits=4, axis=1)
all_pairs_min_ymax = tf.minimum(y_max1, tf.transpose(y_max2))
all_pairs_max_ymin = tf.maximum(y_min1, tf.transpose(y_min2))
intersect_heights = tf.maximum(0.0, all_pairs_min_ymax - all_pairs_max_ymin)
all_pairs_min_xmax = tf.minimum(x_max1, tf.transpose(x_max2))
all_pairs_max_xmin = tf.maximum(x_min1, tf.transpose(x_min2))
intersect_widths = tf.maximum(0.0, all_pairs_min_xmax - all_pairs_max_xmin)
return intersect_heights * intersect_widths
def iou(boxlist1, boxlist2, scope=None):
"""Computes pairwise intersection-over-union between box collections.
Args:
boxlist1: BoxList holding N boxes
boxlist2: BoxList holding M boxes
scope: name scope.
Returns:
a tensor with shape [N, M] representing pairwise iou scores.
"""
with tf.name_scope(scope, 'IOU'):
intersections = intersection(boxlist1, boxlist2)
areas1 = area(boxlist1)
areas2 = area(boxlist2)
unions = (
tf.expand_dims(areas1, 1) + tf.expand_dims(areas2, 0) - intersections)
return tf.where(
tf.equal(intersections, 0.0),
tf.zeros_like(intersections), tf.truediv(intersections, unions))
class RegionSimilarityCalculator(object):
"""Abstract base class for region similarity calculator."""
__metaclass__ = ABCMeta
def compare(self, boxlist1, boxlist2, scope=None):
"""Computes matrix of pairwise similarity between BoxLists.
This op (to be overridden) computes a measure of pairwise similarity between
the boxes in the given BoxLists. Higher values indicate more similarity.
Note that this method simply measures similarity and does not explicitly
perform a matching.
Args:
boxlist1: BoxList holding N boxes.
boxlist2: BoxList holding M boxes.
scope: Op scope name. Defaults to 'Compare' if None.
Returns:
a (float32) tensor of shape [N, M] with pairwise similarity score.
"""
with tf.name_scope(scope, 'Compare', [boxlist1, boxlist2]) as scope:
return self._compare(boxlist1, boxlist2)
@abstractmethod
def _compare(self, boxlist1, boxlist2):
pass
class IouSimilarity(RegionSimilarityCalculator):
"""Class to compute similarity based on Intersection over Union (IOU) metric.
This class computes pairwise similarity between two BoxLists based on IOU.
"""
def _compare(self, boxlist1, boxlist2):
"""Compute pairwise IOU similarity between the two BoxLists.
Args:
boxlist1: BoxList holding N boxes.
boxlist2: BoxList holding M boxes.
Returns:
A tensor with shape [N, M] representing pairwise iou scores.
"""
return iou(boxlist1, boxlist2)
| DeepLearningExamples-master | TensorFlow2/Detection/Efficientdet/object_detection/region_similarity_calculator.py |
# Copyright 2020 Google Research. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tensorflow Example proto decoder for object detection.
A decoder to decode string tensors containing serialized tensorflow.Example
protos for object detection.
"""
import tensorflow.compat.v1 as tf
def _get_source_id_from_encoded_image(parsed_tensors):
return tf.strings.as_string(
tf.strings.to_hash_bucket_fast(parsed_tensors['image/encoded'],
2**63 - 1))
class TfExampleDecoder(object):
"""Tensorflow Example proto decoder."""
def __init__(self, include_mask=False, regenerate_source_id=False):
self._include_mask = include_mask
self._regenerate_source_id = regenerate_source_id
self._keys_to_features = {
'image/encoded': tf.FixedLenFeature((), tf.string),
'image/source_id': tf.FixedLenFeature((), tf.string, ''),
'image/height': tf.FixedLenFeature((), tf.int64, -1),
'image/width': tf.FixedLenFeature((), tf.int64, -1),
'image/object/bbox/xmin': tf.VarLenFeature(tf.float32),
'image/object/bbox/xmax': tf.VarLenFeature(tf.float32),
'image/object/bbox/ymin': tf.VarLenFeature(tf.float32),
'image/object/bbox/ymax': tf.VarLenFeature(tf.float32),
'image/object/class/label': tf.VarLenFeature(tf.int64),
'image/object/area': tf.VarLenFeature(tf.float32),
'image/object/is_crowd': tf.VarLenFeature(tf.int64),
}
if include_mask:
self._keys_to_features.update({
'image/object/mask':
tf.VarLenFeature(tf.string),
})
def _decode_image(self, parsed_tensors):
"""Decodes the image and set its static shape."""
image = tf.io.decode_image(parsed_tensors['image/encoded'], channels=3)
image.set_shape([None, None, 3])
return image
def _decode_boxes(self, parsed_tensors):
"""Concat box coordinates in the format of [ymin, xmin, ymax, xmax]."""
xmin = parsed_tensors['image/object/bbox/xmin']
xmax = parsed_tensors['image/object/bbox/xmax']
ymin = parsed_tensors['image/object/bbox/ymin']
ymax = parsed_tensors['image/object/bbox/ymax']
return tf.stack([ymin, xmin, ymax, xmax], axis=-1)
def _decode_masks(self, parsed_tensors):
"""Decode a set of PNG masks to the tf.float32 tensors."""
def _decode_png_mask(png_bytes):
mask = tf.squeeze(
tf.io.decode_png(png_bytes, channels=1, dtype=tf.uint8), axis=-1)
mask = tf.cast(mask, dtype=tf.float32)
mask.set_shape([None, None])
return mask
height = parsed_tensors['image/height']
width = parsed_tensors['image/width']
masks = parsed_tensors['image/object/mask']
return tf.cond(
tf.greater(tf.shape(masks)[0], 0),
lambda: tf.map_fn(_decode_png_mask, masks, dtype=tf.float32),
lambda: tf.zeros([0, height, width], dtype=tf.float32))
def _decode_areas(self, parsed_tensors):
xmin = parsed_tensors['image/object/bbox/xmin']
xmax = parsed_tensors['image/object/bbox/xmax']
ymin = parsed_tensors['image/object/bbox/ymin']
ymax = parsed_tensors['image/object/bbox/ymax']
return tf.cond(
tf.greater(tf.shape(parsed_tensors['image/object/area'])[0], 0),
lambda: parsed_tensors['image/object/area'],
lambda: (xmax - xmin) * (ymax - ymin))
def decode(self, serialized_example):
"""Decode the serialized example.
Args:
serialized_example: a single serialized tf.Example string.
Returns:
decoded_tensors: a dictionary of tensors with the following fields:
- image: a uint8 tensor of shape [None, None, 3].
- source_id: a string scalar tensor.
- height: an integer scalar tensor.
- width: an integer scalar tensor.
- groundtruth_classes: a int64 tensor of shape [None].
- groundtruth_is_crowd: a bool tensor of shape [None].
- groundtruth_area: a float32 tensor of shape [None].
- groundtruth_boxes: a float32 tensor of shape [None, 4].
- groundtruth_instance_masks: a float32 tensor of shape
[None, None, None].
- groundtruth_instance_masks_png: a string tensor of shape [None].
"""
parsed_tensors = tf.io.parse_single_example(
serialized_example, self._keys_to_features)
for k in parsed_tensors:
if isinstance(parsed_tensors[k], tf.SparseTensor):
if parsed_tensors[k].dtype == tf.string:
parsed_tensors[k] = tf.sparse_tensor_to_dense(
parsed_tensors[k], default_value='')
else:
parsed_tensors[k] = tf.sparse_tensor_to_dense(
parsed_tensors[k], default_value=0)
image = self._decode_image(parsed_tensors)
boxes = self._decode_boxes(parsed_tensors)
areas = self._decode_areas(parsed_tensors)
decode_image_shape = tf.logical_or(
tf.equal(parsed_tensors['image/height'], -1),
tf.equal(parsed_tensors['image/width'], -1))
image_shape = tf.cast(tf.shape(image), dtype=tf.int64)
parsed_tensors['image/height'] = tf.where(decode_image_shape,
image_shape[0],
parsed_tensors['image/height'])
parsed_tensors['image/width'] = tf.where(decode_image_shape, image_shape[1],
parsed_tensors['image/width'])
is_crowds = tf.cond(
tf.greater(tf.shape(parsed_tensors['image/object/is_crowd'])[0], 0),
lambda: tf.cast(parsed_tensors['image/object/is_crowd'], dtype=tf.bool),
lambda: tf.zeros_like(parsed_tensors['image/object/class/label'], dtype=tf.bool)) # pylint: disable=line-too-long
if self._regenerate_source_id:
source_id = _get_source_id_from_encoded_image(parsed_tensors)
else:
source_id = tf.cond(
tf.greater(tf.strings.length(parsed_tensors['image/source_id']),
0), lambda: parsed_tensors['image/source_id'],
lambda: _get_source_id_from_encoded_image(parsed_tensors))
if self._include_mask:
masks = self._decode_masks(parsed_tensors)
decoded_tensors = {
'image': image,
'source_id': source_id,
'height': parsed_tensors['image/height'],
'width': parsed_tensors['image/width'],
'groundtruth_classes': parsed_tensors['image/object/class/label'],
'groundtruth_is_crowd': is_crowds,
'groundtruth_area': areas,
'groundtruth_boxes': boxes,
}
if self._include_mask:
decoded_tensors.update({
'groundtruth_instance_masks': masks,
'groundtruth_instance_masks_png': parsed_tensors['image/object/mask'],
})
return decoded_tensors
| DeepLearningExamples-master | TensorFlow2/Detection/Efficientdet/object_detection/tf_example_decoder.py |
# Copyright 2020 Google Research. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Argmax matcher implementation.
This class takes a similarity matrix and matches columns to rows based on the
maximum value per column. One can specify matched_thresholds and
to prevent columns from matching to rows (generally resulting in a negative
training example) and unmatched_theshold to ignore the match (generally
resulting in neither a positive or negative training example).
This matcher is used in Fast(er)-RCNN.
Note: matchers are used in TargetAssigners. There is a create_target_assigner
factory function for popular implementations.
"""
import tensorflow.compat.v1 as tf
from object_detection import matcher
from object_detection import shape_utils
class ArgMaxMatcher(matcher.Matcher):
"""Matcher based on highest value.
This class computes matches from a similarity matrix. Each column is matched
to a single row.
To support object detection target assignment this class enables setting both
matched_threshold (upper threshold) and unmatched_threshold (lower thresholds)
defining three categories of similarity which define whether examples are
positive, negative, or ignored:
(1) similarity >= matched_threshold: Highest similarity. Matched/Positive!
(2) matched_threshold > similarity >= unmatched_threshold: Medium similarity.
Depending on negatives_lower_than_unmatched, this is either
Unmatched/Negative OR Ignore.
(3) unmatched_threshold > similarity: Lowest similarity. Depending on flag
negatives_lower_than_unmatched, either Unmatched/Negative OR Ignore.
For ignored matches this class sets the values in the Match object to -2.
"""
def __init__(self,
matched_threshold,
unmatched_threshold=None,
negatives_lower_than_unmatched=True,
force_match_for_each_row=False):
"""Construct ArgMaxMatcher.
Args:
matched_threshold: Threshold for positive matches. Positive if
sim >= matched_threshold, where sim is the maximum value of the
similarity matrix for a given column. Set to None for no threshold.
unmatched_threshold: Threshold for negative matches. Negative if
sim < unmatched_threshold. Defaults to matched_threshold
when set to None.
negatives_lower_than_unmatched: Boolean which defaults to True. If True
then negative matches are the ones below the unmatched_threshold,
whereas ignored matches are in between the matched and unmatched
threshold. If False, then negative matches are in between the matched
and unmatched threshold, and everything lower than unmatched is ignored.
force_match_for_each_row: If True, ensures that each row is matched to
at least one column (which is not guaranteed otherwise if the
matched_threshold is high). Defaults to False. See
argmax_matcher_test.testMatcherForceMatch() for an example.
Raises:
ValueError: if unmatched_threshold is set but matched_threshold is not set
or if unmatched_threshold > matched_threshold.
"""
if (matched_threshold is None) and (unmatched_threshold is not None):
raise ValueError('Need to also define matched_threshold when'
'unmatched_threshold is defined')
self._matched_threshold = matched_threshold
if unmatched_threshold is None:
self._unmatched_threshold = matched_threshold
else:
if unmatched_threshold > matched_threshold:
raise ValueError('unmatched_threshold needs to be smaller or equal'
'to matched_threshold')
self._unmatched_threshold = unmatched_threshold
if not negatives_lower_than_unmatched:
if self._unmatched_threshold == self._matched_threshold:
raise ValueError('When negatives are in between matched and '
'unmatched thresholds, these cannot be of equal '
'value. matched: %s, unmatched: %s',
self._matched_threshold, self._unmatched_threshold)
self._force_match_for_each_row = force_match_for_each_row
self._negatives_lower_than_unmatched = negatives_lower_than_unmatched
def _match(self, similarity_matrix):
"""Tries to match each column of the similarity matrix to a row.
Args:
similarity_matrix: tensor of shape [N, M] representing any similarity
metric.
Returns:
Match object with corresponding matches for each of M columns.
"""
def _match_when_rows_are_empty():
"""Performs matching when the rows of similarity matrix are empty.
When the rows are empty, all detections are false positives. So we return
a tensor of -1's to indicate that the columns do not match to any rows.
Returns:
matches: int32 tensor indicating the row each column matches to.
"""
similarity_matrix_shape = shape_utils.combined_static_and_dynamic_shape(
similarity_matrix)
return -1 * tf.ones([similarity_matrix_shape[1]], dtype=tf.int32)
def _match_when_rows_are_non_empty():
"""Performs matching when the rows of similarity matrix are non empty.
Returns:
matches: int32 tensor indicating the row each column matches to.
"""
# Matches for each column
matches = tf.argmax(similarity_matrix, 0, output_type=tf.int32)
# Deal with matched and unmatched threshold
if self._matched_threshold is not None:
# Get logical indices of ignored and unmatched columns as tf.int64
matched_vals = tf.reduce_max(similarity_matrix, 0)
below_unmatched_threshold = tf.greater(self._unmatched_threshold,
matched_vals)
between_thresholds = tf.logical_and(
tf.greater_equal(matched_vals, self._unmatched_threshold),
tf.greater(self._matched_threshold, matched_vals))
if self._negatives_lower_than_unmatched:
matches = self._set_values_using_indicator(matches,
below_unmatched_threshold,
-1)
matches = self._set_values_using_indicator(matches,
between_thresholds,
-2)
else:
matches = self._set_values_using_indicator(matches,
below_unmatched_threshold,
-2)
matches = self._set_values_using_indicator(matches,
between_thresholds,
-1)
if self._force_match_for_each_row:
similarity_matrix_shape = shape_utils.combined_static_and_dynamic_shape(
similarity_matrix)
force_match_column_ids = tf.argmax(similarity_matrix, 1,
output_type=tf.int32)
force_match_column_indicators = tf.one_hot(
force_match_column_ids, depth=similarity_matrix_shape[1])
force_match_row_ids = tf.argmax(force_match_column_indicators, 0,
output_type=tf.int32)
force_match_column_mask = tf.cast(
tf.reduce_max(force_match_column_indicators, 0), tf.bool)
final_matches = tf.where(force_match_column_mask,
force_match_row_ids, matches)
return final_matches
else:
return matches
if similarity_matrix.shape.is_fully_defined():
if similarity_matrix.shape[0] == 0:
return _match_when_rows_are_empty()
else:
return _match_when_rows_are_non_empty()
else:
return tf.cond(
tf.greater(tf.shape(similarity_matrix)[0], 0),
_match_when_rows_are_non_empty, _match_when_rows_are_empty)
def _set_values_using_indicator(self, x, indicator, val):
"""Set the indicated fields of x to val.
Args:
x: tensor.
indicator: boolean with same shape as x.
val: scalar with value to set.
Returns:
modified tensor.
"""
indicator = tf.cast(indicator, x.dtype)
return x * (1 - indicator) + val * indicator
| DeepLearningExamples-master | TensorFlow2/Detection/Efficientdet/object_detection/argmax_matcher.py |
# SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: MIT
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import matplotlib.pyplot as plt
import matplotlib.font_manager as fm
# Used to plot the Co-ordinates
def gen_plot(df):
plt.figure(figsize=(11, 11))
plt.scatter(
df["xcord"][:1],
df["ycord"][:1],
label="Depot",
color="Green",
marker="o",
s=25,
)
plt.scatter(
df["xcord"][1::],
df["ycord"][1::],
label="Locations",
color="Red",
marker="o",
s=25,
)
plt.xlabel("x - axis")
# frequency label
plt.ylabel("y - axis")
# plot title
plt.title("Simplified Map")
# showing legend
plt.legend()
for i, label in enumerate(df.index.values):
plt.annotate(
label,
(df["xcord"][i], df["ycord"][i]),
fontproperties=fm.FontProperties(size=12),
)
return plt
# Used to plot arrows
def add_arrows(df, routes, plt, color="green"):
prev_cord = ()
for i, label in enumerate(routes["route"].to_numpy()):
if i > 0:
plt.annotate(
"",
xy=(df["xcord"][label], df["ycord"][label]),
xytext=prev_cord,
arrowprops=dict(
arrowstyle="simple, head_length=0.5, head_width=0.5, tail_width=0.15", # noqa
connectionstyle="arc3",
color=color,
mutation_scale=20,
ec="black",
),
label="vehicle-1",
)
prev_cord = df["xcord"][label], df["ycord"][label]
return plt
# Prints vehicle routes
def show_vehicle_routes(routes, locations):
vehicles = routes.truck_id.unique().to_numpy()
for id in vehicles:
print("For vehicle -", id, "route is: \n")
route = routes[routes.truck_id == id]
path = ""
route_ids = route.route.to_numpy()
for index, route_id in enumerate(route_ids):
path += locations[route_id]
type(route_ids)
if index != (len(route_ids) - 1):
path += "->"
print(path + "\n\n")
def map_vehicle_routes(df, route, colors):
plt = gen_plot(df)
veh_ids = route.truck_id.unique().to_numpy()
idx = 0
vid_map = {}
for v_id in veh_ids:
vid_map[v_id] = idx
idx = idx + 1
for v_id in veh_ids:
plt = add_arrows(
df, route[route.truck_id == v_id], plt, color=colors[vid_map[v_id]]
)
return plt
| cuOpt-Resources-branch-22.12 | notebooks/routing/python/notebook_utils/notebook_helpers.py |
# SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: MIT
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import matplotlib.pyplot as plt
import matplotlib.font_manager as fm
import pandas as pd
# Used to plot the Co-ordinates
def gen_plot(df):
plt.figure(figsize=(11, 11))
plt.scatter(
df["xcord"][:1],
df["ycord"][:1],
label="Depot",
color="Green",
marker="o",
s=100,
)
plt.scatter(
df["xcord"][1::],
df["ycord"][1::],
label="Locations",
color="Red",
marker="o",
s=100,
)
plt.xlabel("x - axis")
# frequency label
plt.ylabel("y - axis")
# plot title
plt.title("Simplified Map")
# showing legend
plt.legend()
for i, label in enumerate(df.index.values):
plt.annotate(
label,
(df["xcord"][i], df["ycord"][i]),
fontproperties=fm.FontProperties(size=16),
)
return plt
# Used to plot arrows
def add_arrows(df, route, plt, color="green"):
prev_cord = ()
for i, label in enumerate(route):
if i > 0:
plt.annotate(
"",
xy=(df["xcord"][label], df["ycord"][label]),
xytext=prev_cord,
arrowprops=dict(
arrowstyle="simple, head_length=0.5, head_width=0.5, tail_width=0.15", # noqa
connectionstyle="arc3",
color=color,
mutation_scale=20,
ec="black",
),
label="vehicle-1",
)
prev_cord = df["xcord"][label], df["ycord"][label]
return plt
# Convert the solver response from the server to a cuDF dataframe
# for waypoint graph problems
def get_solution_df(resp):
solution = resp["vehicle_data"]
df = {}
df["route"] = []
df["truck_id"] = []
df["location"] = []
types = []
for vid, route in solution.items():
df["location"] = df["location"] + route["route"]
df["truck_id"] = df["truck_id"] + [vid] * len(route["route"])
if "type" in list(route.keys()):
types = types + route["type"]
if len(types) != 0:
df["types"] = types
df["route"] = df["location"]
return pd.DataFrame(df)
# Prints vehicle routes
def show_vehicle_routes(resp, locations):
solution = resp["vehicle_data"]
for id in list(solution.keys()):
route = solution[id]["route"]
print("For vehicle -", id, "route is: \n")
path = ""
for index, route_id in enumerate(route):
path += locations[route_id]
if index != (len(route) - 1):
path += "->"
print(path + "\n\n")
# Map vehicle routes
def map_vehicle_routes(df, resp, colors):
solution = resp["vehicle_data"]
plt = gen_plot(df)
veh_ids = list(solution.keys())
idx = 0
vid_map = {}
for v_id in veh_ids:
vid_map[v_id] = idx
idx = idx + 1
for v_id in veh_ids:
plt = add_arrows(
df, solution[v_id]["route"], plt, color=colors[vid_map[v_id]]
)
return plt
def create_from_file(file_path, is_pdp=False):
node_list = []
with open(file_path, "rt") as f:
count = 1
for line in f:
if is_pdp and count == 1:
vehicle_num, vehicle_capacity, speed = line.split()
elif not is_pdp and count == 5:
vehicle_num, vehicle_capacity = line.split()
elif is_pdp:
node_list.append(line.split())
elif count >= 10:
node_list.append(line.split())
count += 1
# if count == 36:
# break
vehicle_num = int(vehicle_num)
vehicle_capacity = int(vehicle_capacity)
df = pd.DataFrame(
columns=[
"vertex",
"xcord",
"ycord",
"demand",
"earliest_time",
"latest_time",
"service_time",
"pickup_index",
"delivery_index",
]
)
for item in node_list:
row = {
"vertex": int(item[0]),
"xcord": float(item[1]),
"ycord": float(item[2]),
"demand": int(item[3]),
"earliest_time": int(item[4]),
"latest_time": int(item[5]),
"service_time": int(item[6]),
}
if is_pdp:
row["pickup_index"] = int(item[7])
row["delivery_index"] = int(item[8])
df = pd.concat([df, pd.DataFrame(row, index=[0])], ignore_index=True)
return df, vehicle_capacity, vehicle_num
def print_data(data, completed_tasks):
print("Completed tasks :", completed_tasks)
print("Pending tasks :", data["task_locations"])
print("Pickup indices :", data["pickup_indices"])
print("Delivery indices :", data["delivery_indices"])
print("Task Earliest :", data["task_earliest_time"])
print("Task Latest :", data["task_latest_time"])
print("Task Service :", data["task_service_time"])
print("Vehicle locations :", data["vehicle_locations"])
print("Vehicle earliest :", data["vehicle_earliest"])
print("Order vehicle match :", data["order_vehicle_match"])
def print_vehicle_data(response):
for veh_id, veh_data in response["vehicle_data"].items():
print("\nVehicle Id :", veh_id)
print("Route :", veh_data["route"])
print("Type :", veh_data["task_type"])
print("Task Id :", veh_data["task_id"])
print("Arrival Stamp :", veh_data["arrival_stamp"])
print("--------------------------------------------------------")
| cuOpt-Resources-branch-22.12 | notebooks/routing/service/notebook_utils/notebook_helpers.py |
#!/usr/bin/env python3
# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: MIT
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
# Converts OpenRM binhex-encoded images to Nouveau-compatible binary blobs
# See nouveau_firmware_layout.ods for documentation on the file format
import sys
import os
import argparse
import shutil
import re
import gzip
import struct
class MyException(Exception):
pass
def round_up_to_base(x, base = 10):
return x + (base - x) % base
def getbytes(filename, array):
"""Extract the bytes for the given array in the given file.
:param filename: the file to parse
:param array: the name of the array to parse
:returns: byte array
This function scans the file for the array and returns a bytearray of
its contents, uncompressing the data if it is tagged as compressed.
This function assumes that each array is immediately preceded with a comment
section that specifies whether the array is compressed and how many bytes of
data there should be. Example:
#if defined(BINDATA_INCLUDE_DATA)
//
// FUNCTION: ksec2GetBinArchiveSecurescrubUcode_AD10X("header_prod")
// FILE NAME: kernel/inc/securescrub/bin/ad10x/g_securescrubuc_sec2_ad10x_boot_from_hs_prod.h
// FILE TYPE: TEXT
// VAR NAME: securescrub_ucode_header_ad10x_boot_from_hs
// COMPRESSION: YES
// COMPLEX_STRUCT: NO
// DATA SIZE (bytes): 36
// COMPRESSED SIZE (bytes): 27
//
static BINDATA_CONST NvU8 ksec2BinArchiveSecurescrubUcode_AD10X_header_prod_data[] =
{
0x63, 0x60, 0x00, 0x02, 0x46, 0x20, 0x96, 0x02, 0x62, 0x66, 0x08, 0x13, 0x4c, 0x48, 0x42, 0x69,
0x20, 0x00, 0x00, 0x30, 0x39, 0x0a, 0xfc, 0x24, 0x00, 0x00, 0x00,
};
#endif // defined(BINDATA_INCLUDE_DATA)
"""
with open(filename) as f:
for line in f:
if "COMPRESSION: NO" in line:
compressed = False
if "COMPRESSION: YES" in line:
compressed = True
m = re.search("DATA SIZE \(bytes\): (\d+)", line)
if m:
data_size = int(m.group(1))
m = re.search("COMPRESSED SIZE \(bytes\): (\d+)", line)
if m:
compressed_size = int(m.group(1))
if "static BINDATA_CONST NvU8 " + array in line:
break
else:
raise MyException(f"array {array} not found in {filename}")
output = b''
for line in f:
if "};" in line:
break
bytes = [int(b, 16) for b in re.findall('0x[0-9a-f][0-9a-f]', line)]
if len(bytes) > 0:
output += struct.pack(f"{len(bytes)}B", *bytes)
if len(output) == 0:
raise MyException(f"no data found for {array}")
if compressed:
if len(output) != compressed_size:
raise MyException(f"compressed array {array} in {filename} should be {compressed_size} bytes but is actually {len(output)}.")
gzipheader = struct.pack("<4BL2B", 0x1f, 0x8b, 8, 0, 0, 0, 3)
output = gzip.decompress(gzipheader + output)
if len(output) != data_size:
raise MyException(f"array {array} in {filename} decompressed to {len(output)} bytes but should have been {data_size} bytes.")
return output
else:
if len(output) != data_size:
raise MyException(f"array {array} in {filename} should be {compressed_size} bytes but is actually {len(output)}.")
return output
# GSP bootloader
def bootloader(gpu, type):
global outputpath
global version
GPU=gpu.upper()
filename = f"src/nvidia/generated/g_bindata_kgspGetBinArchiveGspRmBoot_{GPU}.c"
print(f"Creating nvidia/{gpu}/gsp/bootloader-{version}.bin")
os.makedirs(f"{outputpath}/nvidia/{gpu}/gsp/", exist_ok = True)
with open(f"{outputpath}/nvidia/{gpu}/gsp/bootloader-{version}.bin", "wb") as f:
# Extract the actual bootloader firmware
array = f"kgspBinArchiveGspRmBoot_{GPU}_ucode_image{type}data"
firmware = getbytes(filename, array)
firmware_size = len(firmware)
# Extract the descriptor (RM_RISCV_UCODE_DESC)
array = f"kgspBinArchiveGspRmBoot_{GPU}_ucode_desc{type}data"
descriptor = getbytes(filename, array)
descriptor_size = len(descriptor)
# First, add the nvfw_bin_hdr header
total_size = round_up_to_base(24 + firmware_size + descriptor_size, 256)
firmware_offset = 24 + descriptor_size
f.write(struct.pack("<6L", 0x10de, 1, total_size, 24, firmware_offset, firmware_size))
# Second, add the descriptor
f.write(descriptor)
# Finally, the actual bootloader image
f.write(firmware)
# GSP Booter load and unload
def booter(gpu, load, sigsize):
global outputpath
global version
GPU = gpu.upper()
LOAD = load.capitalize()
filename = f"src/nvidia/generated/g_bindata_kgspGetBinArchiveBooter{LOAD}Ucode_{GPU}.c"
print(f"Creating nvidia/{gpu}/gsp/booter_{load}-{version}.bin")
os.makedirs(f"{outputpath}/nvidia/{gpu}/gsp/", exist_ok = True)
with open(f"{outputpath}/nvidia/{gpu}/gsp/booter_{load}-{version}.bin", "wb") as f:
# Extract the actual scrubber firmware
array = f"kgspBinArchiveBooter{LOAD}Ucode_{GPU}_image_prod_data"
firmware = getbytes(filename, array)
firmware_size = len(firmware)
# Extract the signatures
array = f"kgspBinArchiveBooter{LOAD}Ucode_{GPU}_sig_prod_data"
signatures = getbytes(filename, array)
signatures_size = len(signatures)
if signatures_size % sigsize:
raise MyException(f"signature file size for {array} is uneven value of {sigsize}")
num_sigs = int(signatures_size / sigsize);
if num_sigs < 1:
raise MyException(f"invalid number of signatures {num_sigs}")
# First, add the nvfw_bin_hdr header
total_size = round_up_to_base(120 + signatures_size + firmware_size, 256)
firmware_offset = 120 + signatures_size
f.write(struct.pack("<6L", 0x10de, 1, total_size, 24, firmware_offset, firmware_size))
# Second, add the nvfw_hs_header_v2 header
patch_loc_offset = 60 + signatures_size
patch_sig_offset = patch_loc_offset + 4
meta_data_offset = patch_sig_offset + 4
num_sig_offset = meta_data_offset + 12
header_offset = num_sig_offset + 4
f.write(struct.pack("<9L", 60, signatures_size, patch_loc_offset,
patch_sig_offset, meta_data_offset, 12,
num_sig_offset, header_offset, 36))
# Third, the actual signatures
f.write(signatures)
# Extract the patch location
array = f"kgspBinArchiveBooter{LOAD}Ucode_{GPU}_patch_loc_data"
bytes = getbytes(filename, array)
patchloc = struct.unpack("<L", bytes)[0]
# Extract the patch meta variables
array = f"kgspBinArchiveBooter{LOAD}Ucode_{GPU}_patch_meta_data"
bytes = getbytes(filename, array)
fuse_ver, engine_id, ucode_id = struct.unpack("<LLL", bytes)
# Fourth, patch_loc[], patch_sig[], fuse_ver, engine_id, ucode_id, and num_sigs
f.write(struct.pack("<6L", patchloc, 0, fuse_ver, engine_id, ucode_id, num_sigs))
# Extract the descriptor (nvkm_gsp_booter_fw_hdr)
array = f"kgspBinArchiveBooter{LOAD}Ucode_{GPU}_header_prod_data"
descriptor = getbytes(filename, array)
# Fifth, the descriptor
f.write(descriptor)
# And finally, the actual scrubber image
f.write(firmware)
# GPU memory scrubber, needed for some GPUs and configurations
def scrubber(gpu, sigsize):
global outputpath
global version
# Unfortunately, RM breaks convention with the scrubber image and labels
# the files and arrays with AD10X instead of AD102.
GPUX = f"{gpu[:-1].upper()}X"
filename = f"src/nvidia/generated/g_bindata_ksec2GetBinArchiveSecurescrubUcode_{GPUX}.c"
print(f"Creating nvidia/{gpu}/gsp/scrubber-{version}.bin")
os.makedirs(f"{outputpath}/nvidia/{gpu}/gsp/", exist_ok = True)
with open(f"{outputpath}/nvidia/{gpu}/gsp/scrubber-{version}.bin", "wb") as f:
# Extract the actual scrubber firmware
array = f"ksec2BinArchiveSecurescrubUcode_{GPUX}_image_prod_data[]"
firmware = getbytes(filename, array)
firmware_size = len(firmware)
# Extract the signatures
array = f"ksec2BinArchiveSecurescrubUcode_{GPUX}_sig_prod_data"
signatures = getbytes(filename, array)
signatures_size = len(signatures)
if signatures_size % sigsize:
raise MyException(f"signature file size for {array} is uneven value of {sigsize}")
num_sigs = int(signatures_size / sigsize);
if num_sigs < 1:
raise MyException(f"invalid number of signatures {num_sigs}")
# First, add the nvfw_bin_hdr header
total_size = round_up_to_base(120 + signatures_size + firmware_size, 256)
firmware_offset = 120 + signatures_size
f.write(struct.pack("<6L", 0x10de, 1, total_size, 24, firmware_offset, firmware_size))
# Second, add the nvfw_hs_header_v2 header
patch_loc_offset = 60 + signatures_size
patch_sig_offset = patch_loc_offset + 4
meta_data_offset = patch_sig_offset + 4
num_sig_offset = meta_data_offset + 12
header_offset = num_sig_offset + 4
f.write(struct.pack("<9L", 60, signatures_size, patch_loc_offset,
patch_sig_offset, meta_data_offset, 12,
num_sig_offset, header_offset, 36))
# Third, the actual signatures
f.write(signatures)
# Extract the patch location
array = f"ksec2BinArchiveSecurescrubUcode_{GPUX}_patch_loc_data"
bytes = getbytes(filename, array)
patchloc = struct.unpack("<L", bytes)[0]
# Extract the patch meta variables
array = f"ksec2BinArchiveSecurescrubUcode_{GPUX}_patch_meta_data"
bytes = getbytes(filename, array)
fuse_ver, engine_id, ucode_id = struct.unpack("<LLL", bytes)
# Fourth, patch_loc[], patch_sig[], fuse_ver, engine_id, ucode_id, and num_sigs
f.write(struct.pack("<6L", patchloc, 0, fuse_ver, engine_id, ucode_id, num_sigs))
# Extract the descriptor (nvkm_gsp_booter_fw_hdr)
array = f"ksec2BinArchiveSecurescrubUcode_{GPUX}_header_prod_data"
descriptor = getbytes(filename, array)
# Fifth, the descriptor
f.write(descriptor)
# And finally, the actual scrubber image
f.write(firmware)
def main():
global outputpath
global version
parser = argparse.ArgumentParser(
description = 'Extract firmware binaries from the OpenRM git repository'
' in a format expected by the Nouveau device driver.')
parser.add_argument('-i', '--input', default = os.getcwd(),
help = 'Path to source directory (where version.mk exists)')
parser.add_argument('-o', '--output', default = os.path.abspath(os.getcwd() + '/_out'),
help = 'Path to target directory (where files will be written)')
args = parser.parse_args()
os.chdir(args.input)
with open("version.mk") as f:
version = re.search(r'^NVIDIA_VERSION = ([^\s]+)', f.read(), re.MULTILINE).group(1)
print(f"Generating files for version {version}")
# Normal version strings are of the format xxx.yy.zz, which are all
# numbers. If it's a normal version string, convert it to a single number,
# as Nouveau currently expects. Otherwise, leave it alone.
if set(version) <= set('0123456789.'):
version = version.replace(".", "")
outputpath = args.output;
print(f"Writing files to {outputpath}")
os.makedirs(f"{outputpath}/nvidia", exist_ok = True)
booter("tu102", "load", 16)
booter("tu102", "unload", 16)
bootloader("tu102", "_")
booter("tu116", "load", 16)
booter("tu116", "unload", 16)
# TU11x uses the same bootloader as TU10x
booter("ga100", "load", 384)
booter("ga100", "unload", 384)
bootloader("ga100", "_")
booter("ga102", "load", 384)
booter("ga102", "unload", 384)
bootloader("ga102", "_prod_")
booter("ad102", "load", 384)
booter("ad102", "unload", 384)
bootloader("ad102", "_prod_")
# scrubber("ad102", 384) # Not currently used by Nouveau
if __name__ == "__main__":
main()
| open-gpu-kernel-modules-main | nouveau/extract-firmware-nouveau.py |
"""
This file is from https://github.com/mlpen/Nystromformer
"""
import torch
import torch.nn as nn
import math
import json
from torch.utils.checkpoint import checkpoint
import pdb
class SoftmaxAttention(nn.Module):
def __init__(self, config):
super().__init__()
self.drop_attn = torch.nn.Dropout(p=config.attention_dropout)
self.head_dim = config.head_dim
def forward(self, Q, K, V, mask):
dot = torch.matmul(Q, torch.transpose(K, -2, -1))
dot = dot / math.sqrt(self.head_dim)
dot = dot - 1e6 * (1 - mask[:, None, None, :])
attn = nn.functional.softmax(dot, dim = -1)
attn = self.drop_attn(attn)
X = torch.matmul(attn, V)
return X
class NoneAttention(nn.Module):
def __init__(self, config):
super().__init__()
def forward(self, Q, K, V, mask):
return V
class Attention(nn.Module):
def __init__(self, config):
super().__init__()
self.grad_checkpointing = config.attention_grad_checkpointing
self.dim = config.transformer_dim
self.head_dim = config.head_dim
self.num_head = config.num_head
self.attn_type = config.attn_type
self.W_q = nn.Linear(self.dim, self.num_head * self.head_dim)
self.W_k = nn.Linear(self.dim, self.num_head * self.head_dim)
self.W_v = nn.Linear(self.dim, self.num_head * self.head_dim)
self.dconv_fc = None
if self.attn_type == "softmax":
self.attn = SoftmaxAttention(config)
elif self.attn_type == "none":
self.attn = NoneAttention(config)
elif self.attn_type.startswith("linformer"):
from attention_linformer import LinformerAttention
self.attn = LinformerAttention(config)
elif self.attn_type.startswith("reformer"):
from attention_reformer import LSHAttention
self.attn = LSHAttention(config, self.W_q, self.W_k, self.W_v)
elif self.attn_type.startswith("nystrom"):
from attention_nystrom import NystromAttention
self.attn = NystromAttention(config)
elif self.attn_type.startswith("performer"):
from attention_performer import PerformerAttention
self.attn = PerformerAttention(config)
elif self.attn_type.startswith("linear"):
from attention_linear import LinearAttention
self.attn = LinearAttention(config)
self.ff = nn.Linear(self.num_head * self.head_dim, self.dim)
def forward(self, X, mask):
if self.attn_type.startswith("longformer") or self.attn_type.startswith("reformer"):
with torch.cuda.amp.autocast(enabled = False):
attn_out = self.attn(X.float(), mask.float())
else:
Q = self.split_heads(self.W_q(X))
K = self.split_heads(self.W_k(X))
V = self.split_heads(self.W_v(X))
with torch.cuda.amp.autocast(enabled = False):
if self.grad_checkpointing:
attn_out = checkpoint(self.attn, Q.float(), K.float(), V.float(), mask.float())
else:
attn_out = self.attn(Q.float(), K.float(), V.float(), mask.float())
attn_out = self.combine_heads(attn_out)
out = self.ff(attn_out)
return out
def combine_heads(self, X):
X = X.transpose(1, 2)
X = X.reshape(X.size(0), X.size(1), self.num_head * self.head_dim)
return X
def split_heads(self, X):
X = X.reshape(X.size(0), X.size(1), self.num_head, self.head_dim)
X = X.transpose(1, 2)
return X
| transformer-ls-master | lra/attention.py |
"""
This file is from https://github.com/mlpen/Nystromformer
"""
import torch
import torch.nn as nn
import math
class NystromAttention(nn.Module):
def __init__(self, config):
super().__init__()
self.head_dim = config.head_dim
self.num_head = config.num_head
self.num_landmarks = config.num_landmarks
self.seq_len = config.max_seq_len
if "inv_coeff_init_option" in config:
self.init_option = config.inv_coeff_init_option
else:
self.init_option = "original"
self.use_conv = hasattr(config, "conv_kernel_size" ) and getattr(config, "conv_kernel_size", '-1') > 0
if self.use_conv:
self.conv = nn.Conv2d(
in_channels=self.num_head, out_channels=self.num_head,
kernel_size=(config.conv_kernel_size, 1), padding=(config.conv_kernel_size // 2, 0),
bias=False,
groups=self.num_head)
def forward(self, Q, K, V, mask):
Q = Q * mask[:, None, :, None] / math.sqrt(math.sqrt(self.head_dim))
K = K * mask[:, None, :, None] / math.sqrt(math.sqrt(self.head_dim))
if self.num_landmarks == self.seq_len:
attn = torch.nn.functional.softmax(torch.matmul(Q, K.transpose(-1, -2)) - 1e9 * (1 - mask[:, None, None, :]), dim = -1)
X = torch.matmul(attn, V)
else:
Q_landmarks = Q.reshape(-1, self.num_head, self.num_landmarks, self.seq_len // self.num_landmarks, self.head_dim).mean(dim = -2)
K_landmarks = K.reshape(-1, self.num_head, self.num_landmarks, self.seq_len // self.num_landmarks, self.head_dim).mean(dim = -2)
kernel_1 = torch.nn.functional.softmax(torch.matmul(Q, K_landmarks.transpose(-1, -2)), dim = -1)
kernel_2 = torch.nn.functional.softmax(torch.matmul(Q_landmarks, K_landmarks.transpose(-1, -2)), dim = -1)
kernel_3 = torch.nn.functional.softmax(torch.matmul(Q_landmarks, K.transpose(-1, -2)) - 1e9 * (1 - mask[:, None, None, :]), dim = -1)
X = torch.matmul(torch.matmul(kernel_1, self.iterative_inv(kernel_2)), torch.matmul(kernel_3, V))
if self.use_conv:
X += self.conv(V * mask[:, None, :, None])
return X
def iterative_inv(self, mat, n_iter = 6):
I = torch.eye(mat.size(-1), device=mat.device)
K = mat
if self.init_option == "original":
V = 1 / torch.max(torch.sum(K, dim=-2)) * K.transpose(-1, -2)
else:
V = 1 / torch.max(torch.sum(K, dim=-2), dim=-1).values[:, :, None, None] * K.transpose(-1, -2)
for _ in range(n_iter):
KV = torch.matmul(K, V)
V = torch.matmul(0.25 * V, 13 * I - torch.matmul(KV, 15 * I - torch.matmul(KV, 7 * I - KV)))
return V
def extra_repr(self):
return f'num_landmarks={self.num_landmarks}, seq_len={self.seq_len}'
| transformer-ls-master | lra/attention_nystrom.py |
"""
This file is from https://github.com/mlpen/Nystromformer
"""
import torch
import torch.nn as nn
import math
from performer_pytorch import FastAttention
class PerformerAttention(nn.Module):
def __init__(self, config):
super().__init__()
self.head_dim = config.head_dim
self.rp_dim = config.rp_dim
self.kernel_type = "relu" #config.kernel_type
if self.kernel_type == "relu":
self.attn_fn = FastAttention(dim_heads = self.head_dim, nb_features = self.rp_dim, causal = False, kernel_fn = nn.ReLU())
elif self.kernel_type == "exp":
self.attn_fn = FastAttention(dim_heads = self.head_dim, nb_features = self.rp_dim, causal = False, kernel_fn = torch.exp)
def forward(self, Q, K, V, mask):
return self.attn_fn(
Q / math.sqrt(math.sqrt(self.head_dim)),
K / math.sqrt(math.sqrt(self.head_dim)) * mask[:, None, :, None],
V * mask[:, None, :, None])
def extra_repr(self):
return f'rp_dim={self.rp_dim}, kernel_type={self.kernel_type}'
| transformer-ls-master | lra/attention_performer.py |
"""
This file is from https://github.com/mlpen/Nystromformer
"""
import torch
import torch.nn as nn
import numpy as np
import math
from torch.utils.checkpoint import checkpoint
from attention import Attention
from attention_transformer_ls import AttentionLS
import pdb
class Embeddings(nn.Module):
def __init__(self, config):
super().__init__()
assert config.embedding_dim == config.transformer_dim
self.dim = config.embedding_dim
self.word_embeddings = nn.Embedding(config.vocab_size, config.embedding_dim)
torch.nn.init.normal_(self.word_embeddings.weight, std=0.02)
self.position_embeddings = nn.Embedding(config.max_seq_len, config.embedding_dim)
torch.nn.init.normal_(self.position_embeddings.weight, std=0.02)
if config.debug:
self.word_embeddings.weight[-1].data[:] = 0
self.position_embeddings.weight[0].data[:] = 0
self.dropout = torch.nn.Dropout(p=config.dropout_prob)
def fixed_pos_emb(self, seq_len, device):
position = torch.arange(0, seq_len, device=device)[:, np.newaxis]
div_term = torch.exp(torch.arange(0, self.dim, 2, device=device) * -(math.log(10000.0) / self.dim))
pos_embed = torch.stack([torch.sin(position * div_term), torch.cos(position * div_term)], -1).reshape(seq_len, -1)
return pos_embed
def forward(self, input_ids):
batch_size, seq_len = input_ids.size()
X_token = self.word_embeddings(input_ids)
position_ids = torch.arange(seq_len, dtype=torch.long, device=input_ids.device)[None, :].repeat(batch_size, 1)
X_pos = self.position_embeddings(position_ids)
X = X_token + X_pos
X = self.dropout(X)
return X
class Transformer(nn.Module):
def __init__(self, config):
super().__init__()
self.norm1 = nn.LayerNorm(config.transformer_dim)
if config.attn_type == 'lsta':
self.mha = AttentionLS(config)
else:
self.mha = Attention(config)
self.dropout1 = torch.nn.Dropout(p=config.dropout_prob)
self.norm2 = nn.LayerNorm(config.transformer_dim)
self.debug = config.debug
self.mlpblock = nn.Sequential(
nn.Linear(config.transformer_dim, config.transformer_hidden_dim),
nn.GELU(),
torch.nn.Dropout(p=config.dropout_prob),
nn.Linear(config.transformer_hidden_dim, config.transformer_dim),
torch.nn.Dropout(p=config.dropout_prob)
)
def forward(self, X, mask, cls_embed=None):
if cls_embed is None:
X = self.dropout1(self.mha(self.norm1(X), mask)) + X
else:
if cls_embed.shape[0] == 1:
cls_embed = cls_embed.expand(X.shape[0], -1, -1)
X_prepend = torch.cat([cls_embed, X], dim=1)
if self.debug:
cls_embed = self.norm1(cls_embed)
X = self.dropout1(self.mha(self.norm1(X), mask, cls_embed)) + X_prepend
X = self.mlpblock(self.norm2(X)) + X
return X
class Model(nn.Module):
def __init__(self, config):
super().__init__()
self.num_layers = config.num_layers
self.tied_weights = config.tied_weights
self.cls_last_layer = config.cls_last_layer
self.embeddings = Embeddings(config)
if config.cls_token or self.cls_last_layer:
self.cls_embed = nn.Parameter(torch.zeros(1, 1, config.transformer_dim))
else:
self.cls_embed = None
if self.tied_weights:
self.transformer = Transformer(config)
else:
for idx in range(self.num_layers):
setattr(self, f"transformer_{idx}", Transformer(config))
self.norm = nn.LayerNorm(config.transformer_dim)
def forward(self, input_ids, mask=None):
X = self.embeddings(input_ids)
cls_embed = self.cls_embed if not self.cls_last_layer else None
if mask is None:
mask = torch.ones_like(input_ids)
if self.tied_weights:
for idx in range(self.num_layers):
if self.cls_last_layer and idx == self.num_layers - 1:
cls_embed = self.cls_embed
X = self.transformer(X, mask, cls_embed)
if cls_embed is not None:
# We always prepend the cls token into the first token
cls_embed = X[:, :1]
X = X[:, 1:]
else:
for idx in range(self.num_layers):
if self.cls_last_layer and idx == self.num_layers - 1:
cls_embed = self.cls_embed
X = getattr(self, f"transformer_{idx}")(X, mask, cls_embed)
if cls_embed is not None:
# We always prepend the cls token into the first token
cls_embed = X[:, :1]
X = X[:, 1:]
if cls_embed is not None:
cls_embed = self.norm(cls_embed)
return cls_embed
else:
X = self.norm(X) * mask[:, :, None]
return X
| transformer-ls-master | lra/model.py |
"""
This file is from https://github.com/mlpen/Nystromformer
"""
import torch
import torch.nn as nn
import math
from model import Model
def pooling(inp, mode):
# pdb.set_trace()
if mode.lower() == "cls":
pooled = inp[:, 0, :]
elif mode.lower() == "mean":
pooled = inp.mean(dim=1)
else:
raise Exception()
return pooled
def append_cls(inp, mask, vocab_size):
batch_size = inp.size(0)
cls_id = ((vocab_size - 1) * torch.ones(batch_size, dtype=inp.dtype, device=inp.device))#.long()
cls_mask = torch.ones(batch_size, dtype=mask.dtype, device=mask.device)
# inp = torch.cat([cls_id[:, None], inp[:, :-1]], dim=-1)
# mask = torch.cat([cls_mask[:, None], mask[:, :-1]], dim=-1)
inp = torch.cat([cls_id[:, None], inp], dim=-1)
mask = torch.cat([cls_mask[:, None], mask], dim=-1)
return inp, mask
class SCHead(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.pooling_mode = config.pooling_mode
if config.cls_last_layer:
self.pooling_mode = 'CLS'
self.mlpblock = nn.Sequential(
nn.Linear(config.transformer_dim, config.transformer_hidden_dim),
nn.ReLU(),
nn.Linear(config.transformer_hidden_dim, config.num_classes)
)
def forward(self, inp):
if self.config.cls_token:
seq_score = self.mlpblock(inp[:, 0, :])
else:
seq_score = self.mlpblock(pooling(inp, self.pooling_mode))
return seq_score
class ModelForSC(nn.Module):
def __init__(self, config):
super().__init__()
self.enable_amp = config.mixed_precision
self.pooling_mode = config.pooling_mode # something specific for Nystromformer?
assert not (config.pooling_mode.lower() == 'cls' and config.cls_last_layer )
self.cls_last_layer = config.cls_last_layer
self.vocab_size = config.vocab_size
self.model = Model(config)
self.seq_classifer = SCHead(config)
def forward(self, input_ids_0, mask_0, label):
with torch.cuda.amp.autocast(enabled=self.enable_amp):
if self.pooling_mode == "CLS" and not self.cls_last_layer:
input_ids_0, mask_0 = append_cls(input_ids_0, mask_0, self.vocab_size)
token_out = self.model(input_ids_0, mask_0)
seq_scores = self.seq_classifer(token_out)
seq_loss = torch.nn.CrossEntropyLoss(reduction="none")(seq_scores, label)
seq_accu = (seq_scores.argmax(dim=-1) == label).to(torch.float32)
outputs = {}
outputs["loss"] = seq_loss
outputs["accu"] = seq_accu
return outputs
class ModelForSCProbing(nn.Module):
def __init__(self, config):
super().__init__()
self.enable_amp = config.mixed_precision
self.pooling_mode = config.pooling_mode # something specific for Nystromformer?
assert not (config.pooling_mode.lower() == 'cls' and config.cls_last_layer )
self.cls_last_layer = config.cls_last_layer
self.vocab_size = config.vocab_size
self.model = Model(config)
self.seq_classifer = SCHead(config)
def forward(self, input_list):
input_ids_0, mask_0, label = input_list
with torch.cuda.amp.autocast(enabled=self.enable_amp):
if self.pooling_mode == "CLS" and not self.cls_last_layer:
input_ids_0, mask_0 = append_cls(input_ids_0, mask_0, self.vocab_size)
token_out = self.model(input_ids_0, mask_0)
seq_scores = self.seq_classifer(token_out)
seq_loss = torch.nn.CrossEntropyLoss(reduction="none")(seq_scores, label)
seq_accu = (seq_scores.argmax(dim=-1) == label).to(torch.float32)
outputs = {}
outputs["loss"] = seq_loss
outputs["accu"] = seq_accu
return outputs
class SCHeadDual(nn.Module):
def __init__(self, config):
super().__init__()
self.pooling_mode = config.pooling_mode
self.mlpblock = nn.Sequential(
nn.Linear(config.transformer_dim * 4, config.transformer_hidden_dim),
nn.ReLU(),
nn.Linear(config.transformer_hidden_dim, config.num_classes)
)
def forward(self, inp_0, inp_1):
X_0 = pooling(inp_0, self.pooling_mode)
X_1 = pooling(inp_1, self.pooling_mode)
seq_score = self.mlpblock(torch.cat([X_0, X_1, X_0 * X_1, X_0 - X_1], dim=-1))
return seq_score
class ModelForSCDual(nn.Module):
def __init__(self, config):
super().__init__()
self.enable_amp = config.mixed_precision
self.pooling_mode = config.pooling_mode
self.vocab_size = config.vocab_size
self.model = Model(config)
self.seq_classifer = SCHeadDual(config)
def forward(self, input_ids_0, input_ids_1, mask_0, mask_1, label):
with torch.cuda.amp.autocast(enabled=self.enable_amp):
if self.pooling_mode == "CLS":
input_ids_0, mask_0 = append_cls(input_ids_0, mask_0, self.vocab_size)
input_ids_1, mask_1 = append_cls(input_ids_1, mask_1, self.vocab_size)
token_out_0 = self.model(input_ids_0, mask_0)
token_out_1 = self.model(input_ids_1, mask_1)
seq_scores = self.seq_classifer(token_out_0, token_out_1)
seq_loss = torch.nn.CrossEntropyLoss(reduction="none")(seq_scores, label)
seq_accu = (seq_scores.argmax(dim=-1) == label).to(torch.float32)
outputs = {}
outputs["loss"] = seq_loss
outputs["accu"] = seq_accu
return outputs
class ModelForSCDualProbing(nn.Module):
def __init__(self, config):
super().__init__()
self.enable_amp = config.mixed_precision
self.pooling_mode = config.pooling_mode
self.vocab_size = config.vocab_size
self.model = Model(config)
self.seq_classifer = SCHeadDual(config)
def forward(self, input_list):
input_ids_0, input_ids_1, mask_0, mask_1, label = input_list
with torch.cuda.amp.autocast(enabled=self.enable_amp):
if self.pooling_mode == "CLS":
input_ids_0, mask_0 = append_cls(input_ids_0, mask_0, self.vocab_size)
input_ids_1, mask_1 = append_cls(input_ids_1, mask_1, self.vocab_size)
token_out_0 = self.model(input_ids_0, mask_0)
token_out_1 = self.model(input_ids_1, mask_1)
seq_scores = self.seq_classifer(token_out_0, token_out_1)
seq_loss = torch.nn.CrossEntropyLoss(reduction="none")(seq_scores, label)
seq_accu = (seq_scores.argmax(dim=-1) == label).to(torch.float32)
outputs = {}
outputs["loss"] = seq_loss
outputs["accu"] = seq_accu
return outputs
| transformer-ls-master | lra/model_wrapper.py |
"""
This file is from https://github.com/mlpen/Nystromformer
"""
import torch
import torch.nn as nn
import math
from torch.utils.data.dataset import Dataset
import sys
import os
import random
import json
import pickle
import numpy as np
import pdb
class LRADataset(Dataset):
def __init__(self, file_path, endless):
self.endless = endless
with open(file_path, "rb") as f:
self.examples = pickle.load(f)
random.shuffle(self.examples)
self.curr_idx = 0
print(f"Loaded {file_path}... size={len(self.examples)}", flush = True)
def __len__(self):
if self.endless:
return 1000000000
else:
return len(self.examples)
def create_inst(self, inst):
output = {}
output["input_ids_0"] = torch.tensor(inst["input_ids_0"], dtype = torch.long)
output["mask_0"] = (output["input_ids_0"] != 0).float()
if "input_ids_1" in inst:
output["input_ids_1"] = torch.tensor(inst["input_ids_1"], dtype = torch.long)
output["mask_1"] = (output["input_ids_1"] != 0).float()
output["label"] = torch.tensor(inst["label"], dtype = torch.long)
return output
def __getitem__(self, i):
if not self.endless:
return self.create_inst(self.examples[i])
if self.curr_idx >= len(self.examples):
random.shuffle(self.examples)
self.curr_idx = 0
inst = self.examples[self.curr_idx]
self.curr_idx += 1
return self.create_inst(inst)
| transformer-ls-master | lra/dataset.py |
"""
This file is from https://github.com/mlpen/Nystromformer
"""
import torch
import torch.nn as nn
from transformers.models.reformer.modeling_reformer import LSHSelfAttention, ReformerConfig
class LSHAttention(LSHSelfAttention):
def __init__(self, config, query, key, value):
self.num_hash = config.num_hash
reformer_config = ReformerConfig()
reformer_config.attention_head_size = config.head_dim
reformer_config.num_attention_heads = config.num_head
reformer_config.attn_layers = ["lsh"]
reformer_config.num_hashes = config.num_hash
reformer_config.is_decoder = False
reformer_config.max_position_embeddings = config.max_seq_len
reformer_config.hidden_size = config.transformer_dim
super().__init__(reformer_config)
self.query_key.weight = query.weight
self.value.weight = value.weight
def forward(self, X, mask):
return super().forward(hidden_states = X, attention_mask = mask).hidden_states
def extra_repr(self):
return f'num_hash={self.num_hash}'
| transformer-ls-master | lra/attention_reformer.py |
"""
Adapted from https://github.com/mlpen/Nystromformer
"""
from fvcore.nn import FlopCountAnalysis
from model_wrapper import ModelForSC, ModelForSCDual, ModelForSCProbing, ModelForSCDualProbing
from dataset import LRADataset
import torch
import random
import torch.backends.cudnn as cudnn
from torch.utils.data import DataLoader
import torch
import torch.nn as nn
import time
import os
import json
import pickle
import numpy as np
import argparse
import math
import itertools
parser = argparse.ArgumentParser()
parser.add_argument("--model", type=str, help="model", dest="model", required=True)
parser.add_argument("--task", type=str, help="task", dest="task", required = False)
parser.add_argument("--skip_train", type = int, help = "skip_train", dest = "skip_train", default = 0)
parser.add_argument("--logging", action='store_true', default=False)
parser.add_argument("--expname", type=str, default="default")
# Model configs
parser.add_argument("--attention_grad_checkpointing", default=False, action="store_true")
parser.add_argument("--num_landmarks", default=128, type=int)
parser.add_argument("--window_size", default=129, type=int)
parser.add_argument("--conv_kernel_size", default=-1, type=int)
parser.add_argument("--learn_pos_emb", default=1, type=int,
help="Use 0 or 1 to represent false and true")
parser.add_argument("--tied_weights", default=False, action="store_true")
parser.add_argument("--embedding_dim", default=64, type=int)
parser.add_argument("--transformer_dim", default=64, type=int)
parser.add_argument("--transformer_hidden_dim", default=128, type=int)
parser.add_argument("--head_dim", default=32, type=int)
parser.add_argument("--num_head", default=2, type=int)
parser.add_argument("--num_layers", default=2, type=int)
parser.add_argument("--vocab_size", default=512, type=int)
parser.add_argument("--max_seq_len", default=4096, type=int)
parser.add_argument("--dropout_prob", default=0.1, type=float)
parser.add_argument("--attention_dropout", default=0.1, type=float)
parser.add_argument("--pooling_mode", default="MEAN", type=str)
parser.add_argument("--num_classes", default=2, type=int)
parser.add_argument("--cls_token", default=False, action='store_true')
# Training configs
parser.add_argument("--batch_size", default=32, type=int)
parser.add_argument("--learning_rate", default=1e-4, type=float)
parser.add_argument("--warmup", default=8000, type=int)
parser.add_argument("--lr_decay", default="linear", type=str)
parser.add_argument("--fixed_lr", default=False, action='store_true')
parser.add_argument("--weight_decay", default=0, type=float)
parser.add_argument("--adam_eps", default=1e-6, type=float)
parser.add_argument("--eval_frequency", default=500, type=int)
parser.add_argument("--num_train_steps", default=20000, type=int)
parser.add_argument("--num_eval_steps", default=781, type=int)
parser.add_argument("--fp32_attn", default=False, action='store_true')
parser.add_argument("--conv_zero_init", default=False, action='store_true')
# Dataset Configs
parser.add_argument("--n_train_samples", default=25000, type=int)
parser.add_argument("--n_dev_samples", default=25000, type=int)
parser.add_argument("--n_test_samples", default=25000, type=int)
parser.add_argument("--debug", default=False, action='store_true')
parser.add_argument("--cls_last_layer", default=False, action='store_true')
parser.add_argument("--seed", default=1234, type=int)
parser.add_argument("--linformer_k", default=256, type=int)
parser.add_argument("--rp_dim", default=256, type=int)
parser.add_argument("--num_hash", default=2, type=int)
parser.add_argument("--chk_path", default="LRA_chks", type=str)
parser.add_argument("--test_flops", default=False, action='store_true')
args = parser.parse_args()
random.seed(args.seed)
torch.manual_seed(args.seed)
# cudnn.deterministic = True
args.attn_type = args.model # remove attn_type in the future
args.mixed_precision = True # bool(args.mixed_precision)
task = args.task
checkpoint_dir = args.chk_path
print(args)
device_ids = list(range(torch.cuda.device_count()))
print(f"GPU list: {device_ids}")
if task == "retrieval":
if args.test_flops:
model = ModelForSCDualProbing(args)
else:
model = ModelForSCDual(args)
else:
if args.test_flops:
model = ModelForSCProbing(args)
else:
model = ModelForSC(args)
print(model)
print(f"parameter_size: {[weight.size() for weight in model.parameters()]}", flush=True)
print(f"num_parameter: {np.sum([np.prod(weight.size()) for weight in model.parameters()])}", flush=True)
model = model.cuda()
model = nn.DataParallel(model, device_ids = device_ids)
data_path = 'datasets'
ds_iter = {
"train":enumerate(DataLoader(LRADataset(f"{data_path}/{task}.train.pickle", True), batch_size=args.batch_size, drop_last=True)),
"dev":enumerate(DataLoader(LRADataset(f"{data_path}/{task}.dev.pickle", True), batch_size=args.batch_size, drop_last=True)),
"test":enumerate(DataLoader(LRADataset(f"{data_path}/{task}.test.pickle", False), batch_size=args.batch_size, drop_last=True)),
}
optimizer = torch.optim.AdamW(
model.parameters(),
lr=args.learning_rate,
betas=(0.9, 0.999), eps=args.adam_eps, weight_decay=args.weight_decay
)
lr_scheduler = torch.optim.lr_scheduler.OneCycleLR(
optimizer=optimizer,
max_lr=args.learning_rate,
pct_start=args.warmup / args.num_train_steps,
anneal_strategy=args.lr_decay,
total_steps=args.num_train_steps
)
amp_scaler = torch.cuda.amp.GradScaler() if args.mixed_precision else None
def step(component, step_idx):
t0 = time.time()
optimizer.zero_grad()
_, batch = next(ds_iter[component])
for key in batch:
batch[key] = batch[key].cuda()
if (args.model == 'nystrom' or args.model == 'reformer') and args.pooling_mode.lower() == 'cls':
for key in batch:
if 'input_ids' in key or 'mask' in key:
batch[key] = batch[key][:, :-1].contiguous()
if component == "train":
outputs = {}
partial_inputs_list = [{} for _ in range(accumu_steps)]
for key in batch:
for idx, inp in enumerate(torch.chunk(batch[key], accumu_steps, dim = 0)):
partial_inputs_list[idx][key] = inp
for partial_inputs in partial_inputs_list:
if args.test_flops:
if 'input_ids_1' in partial_inputs:
flops = FlopCountAnalysis(
model, [partial_inputs['input_ids_0'][:1], partial_inputs['input_ids_1'][:1],
partial_inputs['mask_0'][:1], partial_inputs['mask_1'][:1], partial_inputs['label'][:1]])
else:
flops = FlopCountAnalysis(
model, [partial_inputs['input_ids_0'][:1], partial_inputs['mask_0'][:1], partial_inputs['label'][:1]])
print(f"Flops of {args.model}: {flops.total()/1e9:.2f} G")
exit()
partial_outputs = model(**partial_inputs)
for key in partial_outputs:
partial_outputs[key] = partial_outputs[key].mean() / accumu_steps
if key not in outputs:
outputs[key] = partial_outputs[key]
else:
outputs[key] += partial_outputs[key]
amp_scaler.scale(partial_outputs["loss"]).backward()
amp_scaler.step(optimizer)
amp_scaler.update()
if (not args.fixed_lr) or step_idx < args.warmup:
lr_scheduler.step()
else:
with torch.no_grad():
outputs = {}
partial_inputs_list = [{} for _ in range(accumu_steps)]
for key in batch:
for idx, inp in enumerate(torch.chunk(batch[key], accumu_steps, dim = 0)):
partial_inputs_list[idx][key] = inp
for partial_inputs in partial_inputs_list:
partial_outputs = model(**partial_inputs)
for key in partial_outputs:
partial_outputs[key] = partial_outputs[key].mean() / accumu_steps
if key not in outputs:
outputs[key] = partial_outputs[key]
else:
outputs[key] += partial_outputs[key]
t1 = time.time()
batch_size = batch[list(batch.keys())[0]].size(0)
t_escape = t1 - t0
learning_rate = optimizer.param_groups[0]["lr"]
loss = outputs["loss"].data.item()
accu = outputs["accu"].data.item()
time_since_start = time.time() - init_t
print(f"step={step_idx}, tt={time_since_start:.1f}, t={t_escape:.3f}, bs={batch_size}, lr={learning_rate:.6f}, loss={loss:.4f}, accu={accu:.4f}\t\t\t\t", end = "\r", flush = True)
summary[component]["t"] += t_escape
summary[component]["loss"].append(loss)
summary[component]["accu"].append(accu)
def print_summary(summary, save_if_improved, train_step_idx, subset):
# subset: str, the subset to report the result
summary["loss"] = np.mean(summary["loss"])
summary["accu"] = np.mean(summary["accu"])
print()
if summary["accu"] > summary["best_accu"]:
summary["best_accu"] = summary["accu"]
if save_if_improved:
best_accu = summary["best_accu"]
torch.save({"model_state_dict":model.module.state_dict()}, log_f_path.replace(".log", ".model"))
print(f"best_accu={best_accu}. Saved best model")
summary_round = {"train_step_idx":train_step_idx}
for key in summary:
if type(summary[key]) is str:
summary_round[key+f"_{subset}"] = summary[key]
else:
summary_round[key+f"_{subset}"] = round(summary[key], 4)
print(summary_round, flush=True)
log_f.write(json.dumps(summary_round, sort_keys = True) + "\n")
log_f.flush()
summary["t"] = 0
summary["loss"] = []
summary["accu"] = []
init_t = time.time()
log_f_path = os.path.join(checkpoint_dir, f"{args.expname}_output.log")
log_f = open(log_f_path, "a+")
summary = {
component:{"t":0, "loss":[], "accu":[], "best_accu":0, "component":component}
for component in ["train", "dev", "test"]
}
# accumu_steps = max(training_config["batch_size"] // len(device_ids) // gpu_memory_config[attn_type], 1)
accumu_steps = max(args.batch_size // len(device_ids) // 32, 1)
print(f"accumu_steps={accumu_steps}")
if args.skip_train == 0:
try:
model.train()
for train_step_idx in range(args.num_train_steps):
outputs = step("train", train_step_idx)
if (train_step_idx + 1) % args.eval_frequency == 0:
print_summary(summary["train"], False, train_step_idx, 'train')
model.eval()
for dev_step_idx in range(args.num_eval_steps):
outputs = step("dev", dev_step_idx)
print_summary(summary["dev"], True, train_step_idx, 'dev')
model.train()
except KeyboardInterrupt as e:
print(e)
checkpoint = torch.load(log_f_path.replace(".log", ".model"), map_location="cpu")
model.module.load_state_dict(checkpoint["model_state_dict"])
model.eval()
try:
for test_step_idx in itertools.count():
outputs = step("test", test_step_idx)
except StopIteration:
print_summary(summary["test"], False, train_step_idx, 'test')
| transformer-ls-master | lra/run_tasks.py |
# Copyright (c) 2021 NVIDIA CORPORATION. Licensed under the MIT license.
# Written by Chen Zhu during an internship at NVIDIA, [email protected]
import torch
import torch.nn as nn
import torch.nn.functional as F
import math
class AttentionLS(nn.Module):
"""The long-short term attention for bidirectional language modelling
"""
def __init__(self, config):
super().__init__()
assert not (config.pooling_mode.lower() == 'cls' and config.cls_token)
self.cls_from_seq = config.pooling_mode.lower() == 'cls'
self.num_head = config.num_head
self.head_dim = config.head_dim
self.num_landmarks = config.num_landmarks
self.seq_len = config.max_seq_len
self.dim = config.transformer_dim
self.drop_attn = torch.nn.Dropout(p=config.attention_dropout)
self.window_size = config.window_size
self.W_q = nn.Linear(self.dim, self.num_head * self.head_dim)
self.W_k = nn.Linear(self.dim, self.num_head * self.head_dim)
self.W_v = nn.Linear(self.dim, self.num_head * self.head_dim)
self.W_o = nn.Linear(self.dim, self.num_head * self.head_dim)
self.fp32 = config.fp32_attn
self.dual_ln_s = nn.LayerNorm(self.num_head * self.head_dim)
self.dual_ln_l = nn.LayerNorm(self.num_head * self.head_dim)
self.dconv_fc = nn.Linear(self.dim, self.num_head * self.num_landmarks)
self.use_conv = getattr(config, "conv_kernel_size", -1) > 0
if self.use_conv:
self.conv = nn.Conv1d(
in_channels=self.num_head, out_channels=self.num_head,
kernel_size=(config.conv_kernel_size, 1), padding=(config.conv_kernel_size // 2, 0),
bias=False,
groups=self.num_head)
nn.init.zeros_(self.conv.weight)
def get_tiles(self, x, transpose=False):
# x: bsz x n_heads x seqlen x d_head
bsz, n_heads, seqlen, d_h = x.shape
out_shape = (bsz, n_heads, seqlen//self.window_size-1, 2 * self.window_size, d_h)
in_strides = x.stride()
out_strides = (in_strides[0], in_strides[1], in_strides[2]*self.window_size, in_strides[2], 1)
x_main = x.as_strided(size=out_shape, stride=out_strides)
x_last = x[:, :, None, -2*self.window_size:, :]
x = torch.cat([x_main, x_last], dim=2)
if transpose:
return x.transpose(-1, -2)
else:
# bsz x n_heads x seqlen//wlen x 2*wlen x d_h
return x
def get_tiled_mask(self, mask):
bsz, seqlen = mask.shape
out_shape = (bsz, seqlen//self.window_size-1, 2*self.window_size)
in_stride = mask.stride()
out_stride = (in_stride[0], in_stride[1]*self.window_size, in_stride[1])
mask_main = mask.as_strided(size=out_shape, stride=out_stride)[:, None, :, :]
mask_last = mask[:, None, None, -2*self.window_size:]
return torch.cat([mask_main, mask_last], dim=2)[:, :, :, None, :]
def sliding_chunks_matmul_qk(self, Q, K, padding_mask):
# Q, K: bsz x num_heads x seqlen x d_head
# padding_mask: bsz x seqlen
bsz, num_heads, seqlen, d_h = Q.shape
mask_tiles = self.get_tiled_mask(padding_mask)
K_tiles = self.get_tiles(K, transpose=True)
Q_tiles = Q.view(bsz, num_heads, seqlen//self.window_size, self.window_size, d_h)
# bsz x num_heads x seqlen//winsize x winsize x 2winsize
qk_scores = Q_tiles.matmul(K_tiles)
qk_scores.masked_fill_(mask_tiles, float('-inf'))
return qk_scores.view(bsz, num_heads, seqlen, 2*self.window_size)
def get_tiles_v2(self, x, transpose=False):
if self.window_size <= 0:
return x
bsz, n_heads, seqlen, d_h = x.shape
n_groups = seqlen // self.window_size
ext_len = max(self.window_size//2, 1)
x = F.pad(x, (0, 0, ext_len, ext_len), value=0)
strides = x.stride()
if transpose:
out_shape = (bsz, n_heads, n_groups, d_h, 2 * ext_len + self.window_size)
out_stride = (strides[0], strides[1], self.window_size * strides[2], strides[3], strides[2])
else:
out_shape = (bsz, n_heads, n_groups, 2 * ext_len + self.window_size, d_h)
out_stride = (strides[0], strides[1], self.window_size * strides[2], strides[2], strides[3])
return torch.as_strided(x, size=out_shape, stride=out_stride)
def get_tiled_mask_v2(self, mask):
# only mask along the key dimension
bsz, seqlen = mask.shape
ext_len = max(self.window_size//2, 1)
mask = F.pad(mask, (ext_len, ext_len), value=True)
out_shape = (bsz, seqlen//self.window_size, 2*ext_len + self.window_size)
in_stride = mask.stride()
out_stride = (in_stride[0], in_stride[1]*self.window_size, in_stride[1])
return mask.as_strided(size=out_shape, stride=out_stride)[:, None, :, None, :]
def sliding_chunks_matmul_qk_v2(self, Q, K, padding_mask):
bsz, num_heads, seqlen, d_h = Q.shape
if self.window_size > 0:
# Q, K: bsz x num_heads x seqlen x d_head
# padding_mask: bsz x seqlen
mask_tiles = self.get_tiled_mask_v2(padding_mask)
K_tiles = self.get_tiles_v2(K, transpose=True)
Q_tiles = Q.view(bsz, num_heads, seqlen//self.window_size, self.window_size, d_h)
# bsz x num_heads x seqlen//winsize x winsize x 2winsize
qk_scores = Q_tiles.matmul(K_tiles)
qk_scores = qk_scores.masked_fill(mask_tiles, float('-inf'))
return qk_scores.view(bsz, num_heads, seqlen, -1)
else:
qk_scores = torch.sum(Q*K, dim=-1, keepdim=True)
return qk_scores
def forward(self, X, mask, cls_embed=None):
assert not (self.num_landmarks <= 0 and cls_embed is None and self.window_size <= 0)
if self.cls_from_seq:
cls_embed = X[:,:1].contiguous()
X = X[:,1:].contiguous()
mask = mask[:,1:].contiguous()
bsz, seqlen, d_model = X.shape
# bsz x n_head x length x head_dim
Q = self.split_heads(self.W_q(X)).mul(1./math.sqrt(self.head_dim))
K = self.split_heads(self.dual_ln_l(self.W_k(X)))
V = self.split_heads(self.dual_ln_l(self.W_v(X)))
if self.fp32:
Q, K, V = Q.float(), K.float(), V.float()
# bsz x length x num_head*num_lms
padding_mask = ~mask.bool()
K_compress = V_compress = None
if self.num_landmarks > 0:
head_scores = self.dconv_fc(X).masked_fill(padding_mask[:, :, None], float('-inf'))
head_scores = F.softmax(head_scores, dim=1, dtype=torch.float32) #.to(X)
if not self.fp32:
head_scores = head_scores.to(X)
# bsz x num_head x num_lms x length
head_scores = head_scores.view(bsz, seqlen, self.num_head, self.num_landmarks).permute(0, 2, 3, 1)
K_compress = head_scores.matmul(K)
V_compress = head_scores.matmul(V)
if cls_embed is not None:
Q_cls = self.split_heads(self.W_q(cls_embed)).mul(1. / math.sqrt(self.head_dim))
K_cls = self.split_heads(self.W_k(cls_embed))
V_cls = self.split_heads(self.W_v(cls_embed))
if self.num_landmarks > 0:
K_compress = torch.cat([K_cls, K_compress], dim=2)
V_compress = torch.cat([V_cls, V_compress], dim=2)
else:
K_compress = K_cls
V_compress = V_cls
if self.dual_ln_s is not None and K_compress is not None:
K_compress = self.dual_ln_s(K_compress.transpose(1, 2).contiguous().view(bsz, -1, d_model))
K_compress = self.split_heads(K_compress)
V_compress = self.dual_ln_s(V_compress.transpose(1, 2).contiguous().view(bsz, -1, d_model))
V_compress = self.split_heads(V_compress)
if self.num_landmarks > 0 or (cls_embed is not None):
# bsz x num_head x length x num_lms
attn_compress = Q.matmul(K_compress.transpose(-1, -2))
else:
attn_compress = None
if self.window_size > 0 or self.num_landmarks == 0:
# First, compute the compressed part, or the attentions on the landmarks
# First use window attention to attend to the diagonals
# V: bsize, self.seq_len, self.num_head, self.head_dim
# win_attn_weights = self.sliding_chunks_matmul_qk(Q, K, padding_mask)
win_attn_weights = self.sliding_chunks_matmul_qk_v2(Q, K, padding_mask)
else:
win_attn_weights = None
if attn_compress is None:
all_attn_ = win_attn_weights
elif win_attn_weights is None:
all_attn_ = attn_compress
else:
all_attn_ = torch.cat([attn_compress, win_attn_weights], dim=-1)
all_attn = all_attn_.float().softmax(dim=-1).to(win_attn_weights)
# If one of the rows are all -inf, then it will be NaN!
all_attn = all_attn.masked_fill(padding_mask[:,None,:,None], 0)
if not self.fp32:
all_attn = all_attn.to(X)
all_attn = self.drop_attn(all_attn)
C = 0
if attn_compress is not None:
C += all_attn[:,:,:,:K_compress.shape[2]].matmul(V_compress)
if win_attn_weights is not None:
win_attn_probs = all_attn[:,:,:,-win_attn_weights.shape[-1]:]
if self.window_size > 0:
win_attn_probs = win_attn_probs.view(bsz, self.num_head, seqlen // self.window_size, self.window_size,-1)
V_tiles = self.get_tiles_v2(V, transpose=False)
C += win_attn_probs.matmul(V_tiles).view(bsz, self.num_head, seqlen, self.head_dim)
else:
C += win_attn_probs * V
if cls_embed is not None:
if self.fp32:
Q_cls, K_cls, V_cls = Q_cls.float(), K_cls.float(), V_cls.float()
# bsz x n_heads x 1 x (1+seqlen)
cls_scores = torch.cat([Q_cls.matmul(K_cls.transpose(-1, -2)),
Q_cls.matmul(C.transpose(-1, -2)).masked_fill(padding_mask[:,None,None,:], float('-inf'))],
dim=-1)
cls_probs = torch.softmax(cls_scores, dim=-1, dtype=torch.float32)#.to(X)
if not self.fp32:
cls_probs = cls_probs.to(X)
out_cls_embed = V_cls * cls_probs[:,:,:,:1] + cls_probs[:,:,:,1:].matmul(C)
if self.use_conv:
V = V.masked_fill(padding_mask[:, None, :, None], 0)
C = C + self.conv(V)
if cls_embed is not None:
C = torch.cat([out_cls_embed, C], dim=2)
if self.fp32:
# Finally convert it back, same as Nystromformer
C = C.to(X)
out = self.W_o(self.combine_heads(C))
return out
def extra_repr(self):
return f'num_landmarks={self.num_landmarks}, window_size={self.window_size}'
def combine_heads(self, X):
X = X.transpose(1, 2)
X = X.reshape(X.size(0), X.size(1), self.num_head * self.head_dim)
return X
def split_heads(self, X):
X = X.reshape(X.size(0), X.size(1), self.num_head, self.head_dim)
X = X.transpose(1, 2)
return X
| transformer-ls-master | lra/attention_transformer_ls.py |
"""
Adapted from https://github.com/mlpen/Nystromformer
Add dynamic convolution which is not included in Linformer.
"""
import torch
import torch.nn as nn
import math
import pdb
class LinformerAttention(nn.Module):
projection_matrix = None
def __init__(self, config):
super().__init__()
self.num_head = config.num_head
self.head_dim = config.head_dim
self.linformer_k = config.linformer_k
self.seq_len = config.max_seq_len
self.n_sparse = getattr(config, "n_sparse", 0)
self.dynamic_conv = getattr(config, "dynamic_conv", False)
if not self.dynamic_conv:
if LinformerAttention.projection_matrix is not None:
self.E = LinformerAttention.projection_matrix
else:
LinformerAttention.projection_matrix = nn.Parameter(torch.Tensor(self.num_head, self.linformer_k, self.seq_len))
torch.nn.init.normal_(LinformerAttention.projection_matrix, std = 0.02)
self.E = LinformerAttention.projection_matrix
self.use_conv = config.conv_kernel_size > 0
if self.use_conv:
self.conv = nn.Conv2d(
in_channels=self.num_head, out_channels=self.num_head,
kernel_size=(config.conv_kernel_size, 1), padding=(config.conv_kernel_size // 2, 0),
bias=False,
groups=self.num_head)
def forward(self, Q, K, V, mask, dconv_weight=None):
if self.dynamic_conv:
# V: bsize, self.num_head, self.seq_len, self.head_dim
# dconv_weight: bsize x num_head x k x seqlen
E = dconv_weight
else:
E = self.E
V_orig = V
if self.n_sparse > 0:
# sample the sparse tokens to attend to
sample_probs = mask / torch.sum(mask, dim=1, keepdim=True)
sample_probs = sample_probs.unsqueeze(1).expand(-1, self.num_head, -1).reshape(-1, self.seq_len)
sample_idxes = torch.multinomial(sample_probs, self.n_sparse, replacement=False).reshape(
-1, self.num_head, self.n_sparse)
sparse_mask = torch.zeros((Q.shape[0], self.num_head, self.seq_len), dtype=torch.bool).to(Q.device)
# sparse_mask: bsize x self.num_head x seqlen
sparse_mask.scatter_(2, sample_idxes, True)
K_samples = K.masked_select(sparse_mask.unsqueeze(-1)).reshape(
-1, self.num_head, self.n_sparse, self.head_dim)
V_samples = V.masked_select(sparse_mask.unsqueeze(-1)).reshape(
-1, self.num_head, self.n_sparse, self.head_dim)
K = torch.cat([E.matmul(K * mask[:, None, :, None]), K_samples], dim=-2)
V = torch.cat([E.matmul(V * mask[:, None, :, None]), V_samples], dim=-2)
else:
K = torch.matmul(E, K * mask[:, None, :, None])
V = torch.matmul(E, V * mask[:, None, :, None])
dot = torch.matmul(Q, torch.transpose(K, -2, -1))
dot = dot / math.sqrt(self.head_dim)
attn = nn.functional.softmax(dot, dim = -1)
X = torch.matmul(attn, V)
if self.use_conv:
X += self.conv(V_orig * mask[:, None, :, None])
return X
def extra_repr(self):
return f'linformer_k={self.linformer_k}'
| transformer-ls-master | lra/attention_linformer.py |
"""
This file is from https://github.com/mlpen/Nystromformer
"""
import sys
sys.path.append("./long-range-arena/lra_benchmarks/listops/")
import input_pipeline
import numpy as np
import pickle
train_ds, eval_ds, test_ds, encoder = input_pipeline.get_datasets(
n_devices = 1, task_name = "basic", data_dir = "./lra_release/listops-1000/",
batch_size = 1, max_length = 2000)
mapping = {"train":train_ds, "dev": eval_ds, "test":test_ds}
for component in mapping:
ds_list = []
for idx, inst in enumerate(iter(mapping[component])):
ds_list.append({
"input_ids_0":np.concatenate([inst["inputs"].numpy()[0], np.zeros(48, dtype = np.int32)]),
"label":inst["targets"].numpy()[0]
})
if idx % 100 == 0:
print(f"{idx}\t\t", end = "\r")
with open(f"listops.{component}.pickle", "wb") as f:
pickle.dump(ds_list, f)
| transformer-ls-master | lra/datasets/listops.py |
"""
This file is from https://github.com/mlpen/Nystromformer
"""
import sys
sys.path.append("./long-range-arena/lra_benchmarks/text_classification/")
import input_pipeline
import numpy as np
import pickle
train_ds, eval_ds, test_ds, encoder = input_pipeline.get_tc_datasets(
n_devices = 1, task_name = "imdb_reviews", data_dir = None,
batch_size = 1, fixed_vocab = None, max_length = 4000)
mapping = {"train":train_ds, "dev": eval_ds, "test":test_ds}
for component in mapping:
ds_list = []
for idx, inst in enumerate(iter(mapping[component])):
ds_list.append({
"input_ids_0":np.concatenate([inst["inputs"].numpy()[0], np.zeros(96, dtype = np.int32)]),
"label":inst["targets"].numpy()[0]
})
if idx % 100 == 0:
print(f"{idx}\t\t", end = "\r")
with open(f"text.{component}.pickle", "wb") as f:
pickle.dump(ds_list, f)
| transformer-ls-master | lra/datasets/text.py |
"""
This file is from https://github.com/mlpen/Nystromformer
"""
import sys
sys.path.append("./long-range-arena/lra_benchmarks/matching/")
import input_pipeline
import numpy as np
import pickle
datapath = './datasets'
train_ds, eval_ds, test_ds, encoder = input_pipeline.get_matching_datasets(
n_devices = 1, task_name = None, data_dir = f"{datapath}/lra_release/lra_release/tsv_data/",
batch_size = 1, fixed_vocab = None, max_length = 4096, tokenizer = "char",
vocab_file_path = None)
mapping = {"train":train_ds, "dev": eval_ds, "test":test_ds}
for component in mapping:
ds_list = []
for idx, inst in enumerate(iter(mapping[component])):
ds_list.append({
"input_ids_0":np.concatenate([inst["inputs1"].numpy()[0]]),
"input_ids_1":np.concatenate([inst["inputs2"].numpy()[0]]),
"label":inst["targets"].numpy()[0]
})
if idx % 100 == 0:
print(f"{idx}\t\t", end = "\r")
with open(f"retrieval.{component}.pickle", "wb") as f:
pickle.dump(ds_list, f)
| transformer-ls-master | lra/datasets/retrieval.py |
transformer-ls-master | lra/datasets/long-range-arena/__init__.py |
|
transformer-ls-master | lra/datasets/long-range-arena/lra_benchmarks/__init__.py |
|
# Copyright 2021 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# https://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Build vocab and cache it so we don't have to keep running."""
import collections
from absl import app
from absl import flags
from absl import logging
import tensorflow.compat.v1 as tf
import tensorflow_datasets as tfds
flags.DEFINE_string('vocab_file_path', '/tmp/lra_data/aan',
'Path for vocab file output.')
FLAGS = flags.FLAGS
DATASET_PATHS = '/tmp/dataset'
def whitespace_tokenize(text):
"""Splits an input into tokens by whitespace."""
return text.strip().split()
def build_vocab(datasets,
special_tokens=(b'<pad>', b'<unk>', b'<s>', b'</s>'),
min_freq=10,
text_keys=None):
"""Returns a vocabulary of tokens with optional minimum frequency."""
# Count the tokens in the datasets.
logging.info('Building Vocab...')
counter = collections.Counter()
num_processed = 0
for dataset in datasets:
for example in tfds.as_numpy(dataset):
# logging.info(example)
for k in text_keys[:1]:
# logging.info(example[k])
counter.update(whitespace_tokenize(example[k][:100]))
num_processed += 1
if num_processed % 100 == 0:
logging.info('Processed %d', num_processed)
# Add special tokens to the start of vocab.
vocab = collections.OrderedDict()
for token in special_tokens:
vocab[token] = len(vocab)
# Add all other tokens to the vocab if their frequency is >= min_freq.
for token in sorted(list(counter.keys())):
if counter[token] >= min_freq:
vocab[token] = len(vocab)
logging.info('Number of unfiltered tokens: %d', len(counter))
logging.info('Vocabulary size: %d', len(vocab))
return vocab
def get_tsv_dataset(file_path, batch_size):
"""Preprocess dataset."""
tf.logging.info(file_path)
# sel_cols = ['label', 'id1', 'id2']
col_defaults = [tf.string, tf.string, tf.string, tf.string, tf.string]
col_names = ['label', 'id1', 'id2', 'text1', 'text2']
ds = tf.data.experimental.make_csv_dataset([file_path],
batch_size,
column_names=col_names,
column_defaults=col_defaults,
use_quote_delim=False,
field_delim='\t',
shuffle=False,
header=False,
num_epochs=1)
ds = ds.unbatch()
return ds
def get_dataset(batch_size):
"""Get dataset from matching datasets converts into src/tgt pairs."""
train_fps = DATASET_PATHS + '.train.tsv'
train = get_tsv_dataset(train_fps, batch_size)
def adapt_example(example):
return {
'Source1': example['text1'],
'Source2': example['text2'],
'Target': example['label']
}
train = train.map(adapt_example)
train = train.prefetch(tf.data.experimental.AUTOTUNE)
return train
def main(argv):
if len(argv) > 1:
raise app.UsageError('Too many command-line arguments.')
train = get_dataset(1)
logging.info('Building/loading subword tokenizer')
encoder = tfds.deprecated.text.SubwordTextEncoder.build_from_corpus(
(en['Source1'].numpy() for en in train), target_vocab_size=2**13)
encoder.save_to_file(FLAGS.vocab_file_path)
logging.info('Saved')
if __name__ == '__main__':
app.run(main)
| transformer-ls-master | lra/datasets/long-range-arena/lra_benchmarks/matching/build_vocab.py |
# Copyright 2020 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# https://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.\
"""Input pipeline for the imdb dataset."""
import os
from absl import logging
import tensorflow.compat.v1 as tf
import tensorflow_datasets as tfds
AUTOTUNE = tf.data.experimental.AUTOTUNE
DATASET_PATHS = '/tmp/dataset'
SHUFFLE_BUFFER_SIZE = 2048
def get_tsv_dataset(file_path, batch_size):
"""Preprocess dataset."""
tf.logging.info(file_path)
# sel_cols = ['label', 'id1', 'id2']
col_defaults = [tf.float32, tf.string, tf.string, tf.string, tf.string]
col_names = ['label', 'id1', 'id2', 'text1', 'text2']
ds = tf.data.experimental.make_csv_dataset([file_path],
batch_size,
column_names=col_names,
column_defaults=col_defaults,
use_quote_delim=False,
field_delim='\t',
header=False,
shuffle=True,
shuffle_buffer_size=10000,
num_epochs=1)
ds = ds.unbatch()
return ds
def get_dataset(batch_size, data_path):
"""Get dataset from matching datasets converts into src/tgt pairs."""
train_fps = data_path + '.train.tsv'
valid_fps = data_path + '.eval.tsv'
test_fps = data_path + '.test.tsv'
train = get_tsv_dataset(train_fps, batch_size)
valid = get_tsv_dataset(valid_fps, batch_size)
test = get_tsv_dataset(test_fps, batch_size)
# Print an example.
logging.info('Data sample: %s', next(iter(tfds.as_numpy(test.skip(4)))))
def adapt_example(example):
return {
'Source1': example['text1'],
'Source2': example['text2'],
'Target': example['label']
}
train = train.map(adapt_example)
valid = valid.map(adapt_example)
test = test.map(adapt_example)
return train, valid, test
def get_matching_datasets(n_devices,
task_name,
data_dir=None,
batch_size=256,
fixed_vocab=None,
max_length=512,
tokenizer='subword',
vocab_file_path=None):
"""Get text matching classification datasets."""
if batch_size % n_devices:
raise ValueError("Batch size %d isn't divided evenly by n_devices %d" %
(batch_size, n_devices))
del task_name # not used but may be used in the future.
if data_dir is None:
data_path = DATASET_PATHS
else:
data_path = os.path.join(data_dir, 'new_aan_pairs')
train_dataset, val_dataset, test_dataset = get_dataset(batch_size, data_path)
tf.logging.info('Finished getting dataset.')
if tokenizer == 'char':
logging.info('Using char-level/byte dataset..')
encoder = tfds.deprecated.text.ByteTextEncoder()
elif tokenizer == 'subword':
logging.info('Building/loading subword tokenizer')
if vocab_file_path is None:
raise ValueError('tokenizer=subword requires vocab_file_path')
if tf.io.gfile.exists(vocab_file_path + '.subwords'):
logging.info('Found vocab..already exists. loading..')
encoder = tfds.deprecated.text.SubwordTextEncoder.load_from_file(
vocab_file_path)
logging.info('Loaded encoder')
else:
encoder = tfds.deprecated.text.SubwordTextEncoder.build_from_corpus(
(en['Source1'].numpy() for en in train_dataset),
target_vocab_size=2**13)
encoder.save_to_file(vocab_file_path)
logging.info('Saved!')
else:
if fixed_vocab is None:
tf.logging.info('Building vocab')
# build vocab
vocab_set = set()
tokenizer = tfds.deprecated.text.Tokenizer()
i = 0
for example in tfds.as_numpy(train_dataset):
# examples = data['Source1']
examples = tokenizer.tokenize(example['Source1'])
# examples = np.reshape(examples, (-1)).tolist()
vocab_set.update(examples)
if i % 1000 == 0:
tf.logging.info('Processed {}'.format(i))
i += 1
tf.logging.info(len(vocab_set))
vocab_set = list(set(vocab_set))
tf.logging.info('Finished processing vocab size={}'.format(
len(vocab_set)))
else:
vocab_set = list(set(fixed_vocab))
vocab_set = ['<pad>'] + vocab_set
encoder = tfds.deprecated.text.TokenTextEncoder(vocab_set)
def tf_encode(x):
result = tf.py_function(
lambda s: tf.constant(encoder.encode(s.numpy()[:10000])), [
x,
], tf.int32)
result.set_shape([None])
return result
def tokenize(d):
return {
'inputs1': tf_encode(d['Source1'])[:max_length],
'inputs2': tf_encode(d['Source2'])[:max_length],
'targets': tf.cast(d['Target'], tf.int32)
}
train_dataset = train_dataset.map(tokenize, num_parallel_calls=AUTOTUNE)
val_dataset = val_dataset.map(tokenize, num_parallel_calls=AUTOTUNE)
test_dataset = test_dataset.map(tokenize, num_parallel_calls=AUTOTUNE)
max_shape = {'inputs1': [max_length], 'inputs2': [max_length], 'targets': []}
train_dataset = train_dataset.padded_batch(
batch_size, padded_shapes=max_shape, drop_remainder=True)
val_dataset = val_dataset.padded_batch(
batch_size, padded_shapes=max_shape, drop_remainder=True)
test_dataset = test_dataset.padded_batch(
batch_size, padded_shapes=max_shape, drop_remainder=True)
train_dataset = train_dataset.prefetch(tf.data.experimental.AUTOTUNE)
val_dataset = val_dataset.prefetch(tf.data.experimental.AUTOTUNE)
test_dataset = test_dataset.prefetch(tf.data.experimental.AUTOTUNE)
return train_dataset, val_dataset, test_dataset, encoder
| transformer-ls-master | lra/datasets/long-range-arena/lra_benchmarks/matching/input_pipeline.py |
# Copyright 2021 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# https://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Main script for document matching in dual encoder style with AAN dataset."""
import functools
import itertools
import json
import os
import time
from absl import app
from absl import flags
from absl import logging
from flax import jax_utils
from flax import nn
from flax import optim
from flax.metrics import tensorboard
from flax.training import checkpoints
from flax.training import common_utils
import jax
from jax import random
import jax.nn
import jax.numpy as jnp
from lra_benchmarks.matching import input_pipeline
from lra_benchmarks.models.transformer import transformer
from lra_benchmarks.utils import train_utils
from ml_collections import config_flags
import tensorflow.compat.v2 as tf
FLAGS = flags.FLAGS
config_flags.DEFINE_config_file(
'config', None, 'Training configuration.', lock_config=True)
flags.DEFINE_string(
'model_dir', default=None, help='Directory to store model data.')
flags.DEFINE_string(
'task_name', default='aan', help='Directory to store model data.')
flags.DEFINE_string(
'data_dir', default=None, help='Directory containing datasets.')
flags.DEFINE_string(
'vocab_file_path',
default='/tmp/lra_data/aan',
help='Path for vocab file. Output of `build_vocab`.')
flags.DEFINE_bool(
'test_only', default=False, help='Run the evaluation on the test data.')
def create_model(key, flax_module, input1_shape, input2_shape, model_kwargs):
"""Creates and initializes the model."""
@functools.partial(jax.jit, backend='cpu')
def _create_model(key):
module = flax_module.partial(**model_kwargs)
with nn.stochastic(key):
_, initial_params = module.init_by_shape(key,
[(input1_shape, jnp.float32),
(input2_shape, jnp.float32)])
model = nn.Model(module, initial_params)
return model
return _create_model(key)
def create_optimizer(model, learning_rate, weight_decay):
optimizer_def = optim.Adam(
learning_rate, beta1=0.9, beta2=0.98, eps=1e-9, weight_decay=weight_decay)
optimizer = optimizer_def.create(model)
return optimizer
def compute_metrics(logits, labels, weights):
"""Compute summary metrics."""
loss, weight_sum = train_utils.compute_weighted_cross_entropy(
logits, labels, num_classes=2, weights=None)
acc, _ = train_utils.compute_weighted_accuracy(logits, labels, weights)
metrics = {
'loss': loss,
'accuracy': acc,
'denominator': weight_sum,
}
metrics = jax.lax.psum(metrics, 'batch')
return metrics
def train_step(optimizer, batch, learning_rate_fn, dropout_rng=None):
"""Perform a single training step."""
train_keys = ['inputs1', 'inputs2', 'targets']
(inputs1, inputs2, targets) = [batch.get(k, None) for k in train_keys]
# We handle PRNG splitting inside the top pmap, rather
# than handling it outside in the training loop - doing the
# latter can add some stalls to the devices.
dropout_rng, new_dropout_rng = random.split(dropout_rng)
def loss_fn(model):
"""Loss function used for training."""
with nn.stochastic(dropout_rng):
logits = model(inputs1, inputs2, train=True)
loss, weight_sum = train_utils.compute_weighted_cross_entropy(
logits, targets, num_classes=2, weights=None)
mean_loss = loss / weight_sum
return mean_loss, logits
step = optimizer.state.step
lr = learning_rate_fn(step)
grad_fn = jax.value_and_grad(loss_fn, has_aux=True)
(_, logits), grad = grad_fn(optimizer.target)
grad = jax.lax.pmean(grad, 'batch')
new_optimizer = optimizer.apply_gradient(grad, learning_rate=lr)
metrics = compute_metrics(logits, targets, None)
metrics['learning_rate'] = lr
return new_optimizer, metrics, new_dropout_rng
def eval_step(model, batch):
eval_keys = ['inputs1', 'inputs2', 'targets']
(inputs1, inputs2, targets) = [batch.get(k, None) for k in eval_keys]
logits = model(inputs1, inputs2, train=False)
return compute_metrics(logits, targets, None)
def main(argv):
if len(argv) > 1:
raise app.UsageError('Too many command-line arguments.')
tf.enable_v2_behavior()
config = FLAGS.config
logging.info('===========Config Dict============')
logging.info(config)
batch_size = config.batch_size
learning_rate = config.learning_rate
num_train_steps = config.num_train_steps
num_eval_steps = config.num_eval_steps
eval_freq = config.eval_frequency
random_seed = config.random_seed
model_type = config.model_type
max_length = config.max_length
if jax.host_id() == 0:
summary_writer = tensorboard.SummaryWriter(
os.path.join(FLAGS.model_dir, 'summary'))
if batch_size % jax.device_count() > 0:
raise ValueError('Batch size must be divisible by the number of devices')
train_ds, eval_ds, test_ds, encoder = input_pipeline.get_matching_datasets(
n_devices=jax.local_device_count(),
task_name=FLAGS.task_name,
data_dir=FLAGS.data_dir,
batch_size=batch_size,
fixed_vocab=None,
max_length=max_length,
tokenizer=config.tokenizer,
vocab_file_path=FLAGS.vocab_file_path)
vocab_size = encoder.vocab_size
logging.info('Vocab Size: %d', vocab_size)
train_ds = train_ds.repeat()
train_iter = iter(train_ds)
input_shape = (batch_size, max_length)
model_kwargs = {
'vocab_size': vocab_size,
'emb_dim': config.emb_dim,
'num_heads': config.num_heads,
'num_layers': config.num_layers,
'qkv_dim': config.qkv_dim,
'mlp_dim': config.mlp_dim,
'max_len': max_length,
'classifier': True,
'num_classes': 2,
'classifier_pool': config.pooling_mode
}
rng = random.PRNGKey(random_seed)
rng = jax.random.fold_in(rng, jax.host_id())
rng, init_rng = random.split(rng)
# We init the first set of dropout PRNG keys, but update it afterwards inside
# the main pmap'd training update for performance.
dropout_rngs = random.split(rng, jax.local_device_count())
if model_type == 'transformer':
model = create_model(init_rng, transformer.TransformerDualEncoder,
input_shape, input_shape, model_kwargs)
else:
raise ValueError('Model type not supported.')
optimizer = create_optimizer(
model, learning_rate, weight_decay=FLAGS.config.weight_decay)
del model # Don't keep a copy of the initial model.
start_step = 0
if config.restore_checkpoints or FLAGS.test_only:
# Restore unreplicated optimizer + model state from last checkpoint.
optimizer = checkpoints.restore_checkpoint(FLAGS.model_dir, optimizer)
# Grab last step.
start_step = int(optimizer.state.step)
# Replicate optimizer.
optimizer = jax_utils.replicate(optimizer)
learning_rate_fn = train_utils.create_learning_rate_scheduler(
factors=config.factors,
base_learning_rate=learning_rate,
warmup_steps=config.warmup)
p_train_step = jax.pmap(
functools.partial(train_step, learning_rate_fn=learning_rate_fn),
axis_name='batch')
p_eval_step = jax.pmap(eval_step, axis_name='batch')
# p_pred_step = jax.pmap(predict_step, axis_name='batch')
def run_eval(eval_ds, num_eval_steps=-1):
eval_metrics = []
eval_iter = iter(eval_ds)
if num_eval_steps == -1:
num_iter = itertools.count()
else:
num_iter = range(num_eval_steps)
for _, eval_batch in zip(num_iter, eval_iter):
# pylint: disable=protected-access
eval_batch = common_utils.shard(
jax.tree_map(lambda x: x._numpy(), eval_batch))
# pylint: enable=protected-access
metrics = p_eval_step(optimizer.target, eval_batch)
eval_metrics.append(metrics)
eval_metrics = common_utils.get_metrics(eval_metrics)
eval_metrics_sums = jax.tree_map(jnp.sum, eval_metrics)
eval_denominator = eval_metrics_sums.pop('denominator')
eval_summary = jax.tree_map(
lambda x: x / eval_denominator, # pylint: disable=cell-var-from-loop
eval_metrics_sums)
# Calculate (clipped) perplexity after averaging log-perplexities:
eval_summary['perplexity'] = jnp.clip(
jnp.exp(eval_summary['loss']), a_max=1.0e4)
return eval_summary
if FLAGS.test_only:
with tf.io.gfile.GFile(os.path.join(FLAGS.model_dir, 'results.json'),
'w') as f:
test_summary = run_eval(test_ds)
json.dump(jax.tree_map(lambda x: x.tolist(), test_summary), f)
return
metrics_all = []
tick = time.time()
logging.info('Starting training')
logging.info('====================')
for step, batch in zip(range(start_step, num_train_steps), train_iter):
batch = common_utils.shard(jax.tree_map(lambda x: x._numpy(), batch)) # pylint: disable=protected-access
# logging.info(batch)
optimizer, metrics, dropout_rngs = p_train_step(
optimizer, batch, dropout_rng=dropout_rngs)
metrics_all.append(metrics)
logging.info('train in step: %d', step)
# Save a Checkpoint
if ((step % config.checkpoint_freq == 0 and step > 0) or
step == num_train_steps - 1):
if jax.host_id() == 0 and config.save_checkpoints:
# Save unreplicated optimizer + model state.
checkpoints.save_checkpoint(FLAGS.model_dir,
jax_utils.unreplicate(optimizer), step)
# Periodic metric handling.
if step % eval_freq == 0 and step > 0:
metrics_all = common_utils.get_metrics(metrics_all)
lr = metrics_all.pop('learning_rate').mean()
metrics_sums = jax.tree_map(jnp.sum, metrics_all)
denominator = metrics_sums.pop('denominator')
summary = jax.tree_map(lambda x: x / denominator, metrics_sums) # pylint: disable=cell-var-from-loop
summary['learning_rate'] = lr
# Calculate (clipped) perplexity after averaging log-perplexities:
summary['perplexity'] = jnp.clip(jnp.exp(summary['loss']), a_max=1.0e4)
logging.info('train in step: %d, loss: %.4f, acc: %.4f', step,
summary['loss'], summary['accuracy'])
if jax.host_id() == 0:
tock = time.time()
steps_per_sec = eval_freq / (tock - tick)
tick = tock
summary_writer.scalar('steps per second', steps_per_sec, step)
for key, val in summary.items():
summary_writer.scalar(f'train_{key}', val, step)
summary_writer.flush()
# Reset metric accumulation for next evaluation cycle.
metrics_all = []
# Eval Metrics
eval_summary = run_eval(eval_ds, num_eval_steps)
logging.info('eval in step: %d, loss: %.4f, acc: %.4f', step,
eval_summary['loss'], eval_summary['accuracy'])
if jax.host_id() == 0:
for key, val in eval_summary.items():
summary_writer.scalar(f'eval_{key}', val, step)
summary_writer.flush()
# Test eval
# Eval Metrics
logging.info('Testing...')
test_summary = run_eval(test_ds, num_eval_steps)
logging.info('test in step: %d, loss: %.4f, acc: %.4f', step,
test_summary['loss'], test_summary['accuracy'])
if jax.host_id() == 0:
for key, val in test_summary.items():
summary_writer.scalar(f'test_{key}', val, step)
summary_writer.flush()
if __name__ == '__main__':
app.run(main)
| transformer-ls-master | lra/datasets/long-range-arena/lra_benchmarks/matching/train.py |
# Copyright 2020 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# https://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.\
"""Base Configuration."""
import ml_collections
def get_config():
"""Get the default hyperparameter configuration."""
config = ml_collections.ConfigDict()
config.batch_size = 32
config.eval_frequency = 100
config.num_train_steps = 5000
config.num_eval_steps = -1
config.learning_rate = 0.05
config.weight_decay = 1e-1
config.max_target_length = 200 # ignored
config.max_eval_target_length = 200 # ignored
config.sampling_temperature = 0.6
config.sampling_top_k = 20
config.max_predict_token_length = 50
config.save_checkpoints = True
config.restore_checkpoints = True
config.checkpoint_freq = 10000
config.random_seed = 0
config.prompt = ""
config.factors = "constant * linear_warmup * rsqrt_decay"
config.warmup = 8000
config.max_length = 4000
config.pooling_mode = "CLS"
config.tokenizer = "char"
config.emb_dim = 128
config.num_heads = 4
config.num_layers = 4
config.qkv_dim = 128
config.mlp_dim = 512
config.trial = 0 # dummy for repeated runs.
return config
| transformer-ls-master | lra/datasets/long-range-arena/lra_benchmarks/matching/configs/base_match_config.py |
# Copyright 2020 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# https://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.\
"""Configuration and hyperparameter sweeps."""
from lra_benchmarks.matching.configs import base_match_config
def get_config():
"""Get the default hyperparameter configuration."""
config = base_match_config.get_config()
config.model_type = "transformer"
return config
def get_hyper(hyper):
return hyper.product([])
| transformer-ls-master | lra/datasets/long-range-arena/lra_benchmarks/matching/configs/transformer_base.py |
# Copyright 2021 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# https://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This contains utility functions for model training and evaluation."""
import functools
from flax import nn
from flax import optim
from flax.training import common_utils
import jax
import jax.numpy as jnp
import numpy as onp
def create_learning_rate_scheduler(
factors='constant * linear_warmup * rsqrt_decay',
base_learning_rate=0.5,
warmup_steps=1000,
decay_factor=0.5,
steps_per_decay=20000,
steps_per_cycle=100000):
"""Creates learning rate schedule.
Interprets factors in the factors string which can consist of:
* constant: interpreted as the constant value,
* linear_warmup: interpreted as linear warmup until warmup_steps,
* rsqrt_decay: divide by square root of max(step, warmup_steps)
* rsqrt_normalized_decay: divide by square root of max(step/warmup_steps, 1)
* decay_every: Every k steps decay the learning rate by decay_factor.
* cosine_decay: Cyclic cosine decay, uses steps_per_cycle parameter.
Args:
factors: string, factors separated by '*' that defines the schedule.
base_learning_rate: float, the starting constant for the lr schedule.
warmup_steps: int, how many steps to warm up for in the warmup schedule.
decay_factor: float, the amount to decay the learning rate by.
steps_per_decay: int, how often to decay the learning rate.
steps_per_cycle: int, steps per cycle when using cosine decay.
Returns:
a function learning_rate(step): float -> {'learning_rate': float}, the
step-dependent lr.
"""
factors = [n.strip() for n in factors.split('*')]
def step_fn(step):
"""Step to learning rate function."""
ret = 1.0
for name in factors:
if name == 'constant':
ret *= base_learning_rate
elif name == 'linear_warmup':
ret *= jnp.minimum(1.0, step / warmup_steps)
elif name == 'rsqrt_decay':
ret /= jnp.sqrt(jnp.maximum(step, warmup_steps))
elif name == 'rsqrt_normalized_decay':
ret *= jnp.sqrt(warmup_steps)
ret /= jnp.sqrt(jnp.maximum(step, warmup_steps))
elif name == 'decay_every':
ret *= (decay_factor**(step // steps_per_decay))
elif name == 'cosine_decay':
progress = jnp.maximum(0.0,
(step - warmup_steps) / float(steps_per_cycle))
ret *= jnp.maximum(0.0,
0.5 * (1.0 + jnp.cos(jnp.pi * (progress % 1.0))))
else:
raise ValueError('Unknown factor %s.' % name)
return jnp.asarray(ret, dtype=jnp.float32)
return step_fn
def create_model(key, flax_module, input_shape, target_shape, model_kwargs):
"""Creates and initializes the model."""
@functools.partial(jax.jit, backend='cpu')
def _create_model(key):
model_def = flax_module.partial(**model_kwargs)
with nn.attention.Cache().mutate() as cache_def:
_, initial_params = model_def.init_by_shape(
key, [(input_shape, jnp.float32), (target_shape, jnp.float32)],
cache=cache_def)
model = nn.Model(model_def, initial_params)
return model, cache_def
return _create_model(key)
def create_optimizer(model, learning_rate, weight_decay):
optimizer_def = optim.Adam(
learning_rate, beta1=0.9, beta2=0.98, eps=1e-9, weight_decay=weight_decay)
optimizer = optimizer_def.create(model)
return optimizer
def compute_weighted_cross_entropy(logits, targets, num_classes, weights=None):
"""Compute weighted cross entropy and entropy for log probs and targets.
Args:
logits: [batch, num_classes] float array.
targets: categorical targets [batch, length] int array.
num_classes: int, num classes of problem.
weights: None or array of shape [batch x length]
Returns:
Tuple of scalar loss and batch normalizing factor.
"""
onehot_targets = common_utils.onehot(targets, num_classes)
loss = -jnp.sum(onehot_targets * nn.log_softmax(logits), axis=-1)
normalizing_factor = onehot_targets.sum()
if weights is not None:
loss = loss * weights
normalizing_factor = weights.sum()
return loss.sum(), normalizing_factor
def compute_weighted_accuracy(logits, targets, weights=None):
"""Compute weighted accuracy for log probs and targets.
Args:
logits: [batch, num_classes] float array.
targets: categorical targets [batch] int array.
weights: None or array of shape [batch]
Returns:
Tuple of scalar accuracy and batch normalizing factor.
"""
if logits.ndim != targets.ndim + 1:
raise ValueError('Incorrect shapes. Got shape %s logits and %s targets' %
(str(logits.shape), str(targets.shape)))
loss = jnp.equal(jnp.argmax(logits, axis=-1), targets)
normalizing_factor = onp.prod(logits.shape[:-1])
if weights is not None:
loss = loss * weights
normalizing_factor = weights.sum()
return loss.sum(), normalizing_factor
| transformer-ls-master | lra/datasets/long-range-arena/lra_benchmarks/utils/train_utils.py |
# Copyright 2021 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# https://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This script contains utility functions for data preprocessing and output post-processing."""
from collections import defaultdict ## pylint: disable=g-importing-member
import tempfile
import time
from absl import logging
import jax
import nltk
import tensorflow.compat.v2 as tf
import tensorflow_datasets as tfds
import tensorflow_text as tftxt
from sentencepiece import SentencePieceTrainer
PAD_TOKEN = {"index": 0, "token": "<pad>"}
UNK_TOKEN = {"index": 1, "token": "<unk>"}
EOS_TOKEN = {"index": 2, "token": "<eos>"}
BOS_TOKEN = {"index": 3, "token": "<bos>"}
nltk.download("punkt")
def filter_non_ascii(s):
"""Filter non-ascii characters from a string."""
if isinstance(s, bytes):
s = s.decode("utf-8")
return s.encode("ascii", errors="ignore").decode("utf-8")
def nltk_tokenize(s: str):
"""Tokenize a string to a sequence of tokens with nltk tokenizer.
Args:
s: str: input string.
Returns:
A tokenized string.
"""
return nltk.word_tokenize(s)
def add_bos_token(s: str):
return BOS_TOKEN["token"] + " " + s
def add_eos_token(s: str):
return s + " " + EOS_TOKEN["token"]
def build_vocab(train_dataset, fields, vocab_size=None, min_freq=0):
"""Build word vocab from the train dataset.
Assume the dataset has been preprocessed , tokenized, lowercased properly.
Args:
train_dataset: tf.Dataset: the training dataset.
fields: List[str]: the data fields for building the vocab.
vocab_size: None or int.
min_freq: int: minimum token frequency to be kept in the vocab.
Returns:
A vocab dictionary.
"""
vocab = {
PAD_TOKEN["token"]: PAD_TOKEN["index"],
UNK_TOKEN["token"]: UNK_TOKEN["index"],
BOS_TOKEN["token"]: BOS_TOKEN["index"],
EOS_TOKEN["token"]: EOS_TOKEN["index"]
}
word_freqs = defaultdict(int)
for example in train_dataset:
for field in fields:
s = example[field].numpy().decode("utf-8")
for token in s.split():
word_freqs[token] += 1
# filter vocab by min_freq and vocab size.
sorted_word_freqs = sorted(
word_freqs.items(), key=lambda item: item[1], reverse=True)
if vocab_size:
sorted_word_freqs = sorted_word_freqs[:vocab_size]
for (token, freq) in sorted_word_freqs:
if freq >= min_freq:
if token not in vocab:
vocab[token] = len(vocab)
logging.info("Vocab size: before filtering (%d), after(%d)", len(word_freqs),
len(vocab))
# logging.info("Top 10 frequent tokens: ", sorted_word_freqs[:10])
# logging.info("Bottom 10 frequent tokens: ", sorted_word_freqs[-10:])
return vocab
# -----------------------------------------------------------------------------
# Train and Load SentencePiece Tokenizer.
# -----------------------------------------------------------------------------
def dump_chars_to_textfile(dataset,
maxchars=1e9,
data_keys=("inputs", "targets")):
"""Write part of a TFDS sentence dataset to lines in a text file.
Args:
dataset: tf.dataset containing string-data.
maxchars: int: approximate number of characters to save from dataset.
data_keys: Tuple[str]: what keys in dataset to dump from.
Returns:
name of temp file with dataset bytes, exact number of characters dumped.
"""
char_count = 0
processed_examples = 0
ds_iter = dataset.as_numpy_iterator()
with tempfile.NamedTemporaryFile(
delete=False, prefix="/tmp/ds_chars") as outfp:
while char_count < maxchars:
example = next(ds_iter, None)
processed_examples += 1
if example is None:
break
for k in data_keys:
line = example[k] + b"\n"
char_count += len(line)
outfp.write(line)
logging.info("%d examples processed for training sentencepiece tokenizer.",
processed_examples)
return outfp.name, char_count
def train_sentencepiece(dataset,
vocab_size,
maxchars=1e9,
character_coverage=1.0,
model_path="model",
model_type="unigram",
data_keys=("inputs", "targets")):
"""Train SentencePiece tokenizer from subset of tf dataset.
Args:
dataset: tf.dataset
vocab_size: int: size of vocab tokens to train.
maxchars: int: number of characters to use for sentencepiece training.
character_coverage: amount of characters covered by the model, good defaults
are 0.9995 for languages with rich character set like Japanese or Chinese
and 1.0 for other languages with small character set.
model_path: str: path of model file to save vocab model to.
model_type: str: type of sentencepiece vocab to train.
data_keys: Tuple[str]: keys of dataset to use for training.
Returns:
path to the trained sentencepiece vocabulary model.
"""
fname, _ = dump_chars_to_textfile(
dataset, maxchars=maxchars, data_keys=data_keys)
with tempfile.NamedTemporaryFile(
delete=False, prefix="/tmp/sp_tmp") as model_fp:
pass # we just want a prefix'd tmp-filename
argstr = " ".join([
f"--input={fname}", f"--vocab_size={vocab_size}",
f"--character_coverage={character_coverage}",
f"--model_prefix={model_fp.name}", f"--model_type={model_type}"
])
SentencePieceTrainer.Train(argstr)
if jax.host_id() == 0:
# Use an intermediate filename that is renamed to the target name to address
# create and fill delays.
copy_rename_path = model_path + ".rntmp"
tf.io.gfile.copy(model_fp.name + ".model", copy_rename_path, overwrite=True)
tf.io.gfile.rename(copy_rename_path, model_path, overwrite=True)
tf.io.gfile.copy(
model_fp.name + ".vocab", copy_rename_path + ".vocab", overwrite=True)
tf.io.gfile.rename(
copy_rename_path + ".vocab", model_path + ".vocab", overwrite=True)
logging.info("copied %s to %s", model_fp.name + ".model", model_path)
else:
while not tf.io.gfile.exists(model_path):
time.sleep(1)
time.sleep(1)
return model_path
def load_sentencepiece_tokenizer(model_path,
add_bos=True,
add_eos=True,
reverse=False):
"""Load a tf-text SentencePiece tokenizer from given model filepath."""
with tf.io.gfile.GFile(model_path, "rb") as model_fp:
sp_model = model_fp.read()
sp_tokenizer = tftxt.SentencepieceTokenizer(
model=sp_model, add_bos=add_bos, add_eos=add_eos, reverse=reverse)
return sp_tokenizer
def load_tfds_dataset(data_dir, dataset_name, split, shuffle=True):
"""Return train and evaluation datasets, feature info and supervised keys.
Args:
data_dir: directory where the data is located.
dataset_name: a string, the name of the TFDS dataset.
split: string: the split of the dataset, e.g., {train, validation, test}
shuffle: Boolean determining whether or not to shuffle the train files at
startup. Set to False if you want data determinism.
Returns:
a 3-tuple consisting of:
* the train tf.Dataset
* information about features: a python dictionary with feature names
as keys and an object as value that provides .shape and .n_classes.
* supervised_keys: information what's the input and what's the target,
ie., a pair of lists with input and target feature names.
"""
dataset_builder = tfds.builder(dataset_name, data_dir=data_dir)
info = dataset_builder.info
splits = dataset_builder.info.splits
if split not in splits:
raise ValueError(
f"{split} not exists in the dataset {data_dir}/{dataset_name}/{splits}."
)
dataset = tfds.load(
name=dataset_name, split=split, data_dir=data_dir, shuffle_files=shuffle)
keys = None
if info.supervised_keys:
keys = info.supervised_keys
return dataset, info.features, keys
def bin_and_batch(
dataset,
length_fn, # the length function of an input sample
training,
n_devices,
target_batch_size=256,
target_bucket_length=32,
buckets=None,
max_eval_length=None,
drop_remainder=False):
"""Batching function, can specify batch size directly or per-device.
Args:
dataset: tf dataset containing individual sequences.
length_fn: a function to determine the sample length.
training: bool: is this a train or eval dataset.
n_devices: number of devices this dataset will be run on.
target_batch_size: int: the target batch size for binned batches.
target_bucket_length: int: the target sequence length for binned batches.
buckets: (List[int], List[int]): manually specified length buckets and batch
sizes for bins.
max_eval_length: int: for eval set allow a extra long-sequence bin.
drop_remainder: bool: if true drop last batch if not divisible by batch
sizes. (e.g. not divisible by n_devices).
Returns:
Dynamically binned batches of sequence that roughly keep the total
number of tokens (target_batch_size * target_bucket_length) the same, while
insuring batch sizes are divisible by n_devices for distributed training.
"""
# Create heuristic buckets is none are specified.
if buckets is None:
logging.info("Heuristically bucketing based on shapes of examples.")
bucket_boundaries = [
target_bucket_length // 4, target_bucket_length // 2,
target_bucket_length, target_bucket_length * 2,
target_bucket_length * 4, target_bucket_length * 8,
target_bucket_length * 16
]
bucket_batch_sizes = [
target_batch_size * 4, target_batch_size * 2, target_batch_size,
target_batch_size // 2, target_batch_size // 4, target_batch_size // 8,
target_batch_size // 16
]
# allow for different evaluation max-length bucket and batchsize
if not training:
max_eval_length = max_eval_length or target_bucket_length * 32
bucket_boundaries[-1] = max_eval_length
bucket_batch_sizes[-1] = (
target_batch_size // (max_eval_length // target_bucket_length))
# We will pad to boundaries which pads to bucket_boundary-1: add 1 here.
bucket_boundaries = [b + 1 for b in bucket_boundaries]
# Make batch sizes divisible by n_devices.
bucket_batch_sizes = [
max(b // n_devices, 1) * n_devices for b in bucket_batch_sizes
]
buckets = (bucket_boundaries, bucket_batch_sizes)
logging.info("Bucketing with buckets %s.", str(buckets))
boundaries, batch_sizes = buckets
# bucket_by_sequence_length expects a final dummy 1 batch_size
batch_sizes.append(1)
dataset = dataset.apply(
tf.data.experimental.bucket_by_sequence_length(
length_fn,
boundaries,
batch_sizes,
pad_to_bucket_boundary=True,
drop_remainder=drop_remainder))
return dataset
| transformer-ls-master | lra/datasets/long-range-arena/lra_benchmarks/utils/data_utils.py |
# Copyright 2021 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# https://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Mapping tasks to data loaders."""
import functools
from lra_benchmarks.image import input_pipeline
TASK_DATA_DICT = {
'cifar10':
input_pipeline.get_cifar10_datasets,
'pathfinder32_easy':
functools.partial(
input_pipeline.get_pathfinder_base_datasets,
resolution=32,
split='easy'),
'pathfinder32_inter':
functools.partial(
input_pipeline.get_pathfinder_base_datasets,
resolution=32,
split='intermediate'),
'pathfinder32_hard':
functools.partial(
input_pipeline.get_pathfinder_base_datasets,
resolution=32,
split='hard'),
'pathfinder64_easy':
functools.partial(
input_pipeline.get_pathfinder_base_datasets,
resolution=64,
split='easy'),
'pathfinder64_inter':
functools.partial(
input_pipeline.get_pathfinder_base_datasets,
resolution=64,
split='intermediate'),
'pathfinder64_hard':
functools.partial(
input_pipeline.get_pathfinder_base_datasets,
resolution=64,
split='hard'),
'pathfinder128_easy':
functools.partial(
input_pipeline.get_pathfinder_base_datasets,
resolution=128,
split='easy'),
'pathfinder128_inter':
functools.partial(
input_pipeline.get_pathfinder_base_datasets,
resolution=128,
split='intermediate'),
'pathfinder128_hard':
functools.partial(
input_pipeline.get_pathfinder_base_datasets,
resolution=128,
split='hard'),
'pathfinder256_easy':
functools.partial(
input_pipeline.get_pathfinder_base_datasets,
resolution=256,
split='easy'),
'pathfinder256_inter':
functools.partial(
input_pipeline.get_pathfinder_base_datasets,
resolution=256,
split='intermediate'),
'pathfinder256_hard':
functools.partial(
input_pipeline.get_pathfinder_base_datasets,
resolution=256,
split='hard'),
}
| transformer-ls-master | lra/datasets/long-range-arena/lra_benchmarks/image/task_registry.py |
# Copyright 2021 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# https://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functions to get dataset pipeline for the image cls tasks."""
from lra_benchmarks.data import pathfinder
import tensorflow.compat.v1 as tf
import tensorflow_datasets as tfds
AUTOTUNE = tf.data.experimental.AUTOTUNE
def get_mnist_datasets(n_devices, batch_size=256, normalize=False):
"""Get MNIST dataset splits."""
if batch_size % n_devices:
raise ValueError("Batch size %d isn't divided evenly by n_devices %d" %
(batch_size, n_devices))
train_dataset = tfds.load('mnist', split='train[:90%]')
val_dataset = tfds.load('mnist', split='train[90%:]')
test_dataset = tfds.load('mnist', split='test')
def decode(x):
decoded = {
'inputs': tf.cast(x['image'], dtype=tf.int32),
'targets': x['label']
}
if normalize:
decoded['inputs'] = decoded['inputs'] / 255
return decoded
train_dataset = train_dataset.map(decode, num_parallel_calls=AUTOTUNE)
val_dataset = val_dataset.map(decode, num_parallel_calls=AUTOTUNE)
test_dataset = test_dataset.map(decode, num_parallel_calls=AUTOTUNE)
train_dataset = train_dataset.repeat()
train_dataset = train_dataset.batch(batch_size, drop_remainder=True)
val_dataset = val_dataset.batch(batch_size, drop_remainder=True)
test_dataset = test_dataset.batch(batch_size, drop_remainder=True)
train_dataset = train_dataset.shuffle(
buffer_size=256, reshuffle_each_iteration=True)
return train_dataset, val_dataset, test_dataset, 10, 256, (batch_size, 28, 28,
1)
def get_cifar10_datasets(n_devices, batch_size=256, normalize=False):
"""Get CIFAR-10 dataset splits."""
if batch_size % n_devices:
raise ValueError("Batch size %d isn't divided evenly by n_devices %d" %
(batch_size, n_devices))
train_dataset = tfds.load('cifar10', split='train[:90%]')
val_dataset = tfds.load('cifar10', split='train[90%:]')
test_dataset = tfds.load('cifar10', split='test')
def decode(x):
decoded = {
'inputs':
tf.cast(tf.image.rgb_to_grayscale(x['image']), dtype=tf.int32),
'targets':
x['label']
}
if normalize:
decoded['inputs'] = decoded['inputs'] / 255
return decoded
train_dataset = train_dataset.map(decode, num_parallel_calls=AUTOTUNE)
val_dataset = val_dataset.map(decode, num_parallel_calls=AUTOTUNE)
test_dataset = test_dataset.map(decode, num_parallel_calls=AUTOTUNE)
# train_dataset = train_dataset.repeat()
# train_dataset = train_dataset.batch(batch_size, drop_remainder=True)
# val_dataset = val_dataset.batch(batch_size, drop_remainder=True)
# test_dataset = test_dataset.batch(batch_size, drop_remainder=True)
train_dataset = train_dataset.batch(batch_size, drop_remainder=False)
val_dataset = val_dataset.batch(batch_size, drop_remainder=False)
test_dataset = test_dataset.batch(batch_size, drop_remainder=False)
# train_dataset = train_dataset.shuffle(
# buffer_size=256, reshuffle_each_iteration=True)
return train_dataset, val_dataset, test_dataset, 10, 256, (batch_size, 32, 32,
1)
def get_pathfinder_orig_datasets(n_devices, batch_size=256, normalize=False):
"""Get Pathfinder dataset splits."""
if batch_size % n_devices:
raise ValueError("Batch size %d isn't divided evenly by n_devices %d" %
(batch_size, n_devices))
builder = pathfinder.Pathfinder(data_dir=_PATHFINER_TFDS_PATH)
def get_split(split):
ds_p = builder.as_dataset(split=f'positive{split}')
ds_n = builder.as_dataset(split=f'negetive{split}')
ds = tf.data.experimental.sample_from_datasets([ds_p, ds_n],
weights=None,
seed=None)
return ds
train_dataset = get_split('[:80%]')
val_dataset = get_split('[80%:90%]')
test_dataset = get_split('[90%:]')
def decode(x):
decoded = {
'inputs':
tf.cast(tf.image.rgb_to_grayscale(x['image']), dtype=tf.int32),
'targets':
x['label']
}
if normalize:
decoded['inputs'] = decoded['inputs'] / 255
return decoded
train_dataset = train_dataset.map(decode, num_parallel_calls=AUTOTUNE)
val_dataset = val_dataset.map(decode, num_parallel_calls=AUTOTUNE)
test_dataset = test_dataset.map(decode, num_parallel_calls=AUTOTUNE)
train_dataset = train_dataset.repeat()
train_dataset = train_dataset.batch(batch_size, drop_remainder=True)
val_dataset = val_dataset.batch(batch_size, drop_remainder=True)
test_dataset = test_dataset.batch(batch_size, drop_remainder=True)
train_dataset = train_dataset.shuffle(
buffer_size=256, reshuffle_each_iteration=True)
return train_dataset, val_dataset, test_dataset, 2, 256, (batch_size, 300,
300, 1)
def get_pathfinder_base_datasets(n_devices,
batch_size=256,
resolution=32,
normalize=False,
split='easy'):
"""Get Pathfinder dataset splits."""
if batch_size % n_devices:
raise ValueError("Batch size %d isn't divided evenly by n_devices %d" %
(batch_size, n_devices))
if split not in ['easy', 'intermediate', 'hard']:
raise ValueError("split must be in ['easy', 'intermediate', 'hard'].")
if resolution == 32:
builder = pathfinder.Pathfinder32(data_dir=_PATHFINER_TFDS_PATH)
inputs_shape = (batch_size, 32, 32, 1)
elif resolution == 64:
builder = pathfinder.Pathfinder64(data_dir=_PATHFINER_TFDS_PATH)
inputs_shape = (batch_size, 64, 64, 1)
elif resolution == 128:
builder = pathfinder.Pathfinder128(data_dir=_PATHFINER_TFDS_PATH)
inputs_shape = (batch_size, 128, 128, 1)
elif resolution == 256:
builder = pathfinder.Pathfinder256(data_dir=_PATHFINER_TFDS_PATH)
inputs_shape = (batch_size, 256, 256, 1)
else:
raise ValueError('Resolution must be in [32, 64, 128, 256].')
def get_split(split):
ds = builder.as_dataset(
split=split, decoders={'image': tfds.decode.SkipDecoding()})
# Filter out examples with empty images:
ds = ds.filter(lambda x: tf.strings.length((x['image'])) > 0)
return ds
train_dataset = get_split(f'{split}[:80%]')
val_dataset = get_split(f'{split}[80%:90%]')
test_dataset = get_split(f'{split}[90%:]')
def decode(x):
decoded = {
'inputs': tf.cast(tf.image.decode_png(x['image']), dtype=tf.int32),
'targets': x['label']
}
if normalize:
decoded['inputs'] = decoded['inputs'] / 255
return decoded
train_dataset = train_dataset.map(decode, num_parallel_calls=AUTOTUNE)
val_dataset = val_dataset.map(decode, num_parallel_calls=AUTOTUNE)
test_dataset = test_dataset.map(decode, num_parallel_calls=AUTOTUNE)
train_dataset = train_dataset.repeat()
train_dataset = train_dataset.batch(batch_size, drop_remainder=True)
val_dataset = val_dataset.batch(batch_size, drop_remainder=True)
test_dataset = test_dataset.batch(batch_size, drop_remainder=True)
train_dataset = train_dataset.shuffle(
buffer_size=256 * 8, reshuffle_each_iteration=True)
return train_dataset, val_dataset, test_dataset, 2, 256, inputs_shape
| transformer-ls-master | lra/datasets/long-range-arena/lra_benchmarks/image/input_pipeline.py |
transformer-ls-master | lra/datasets/long-range-arena/lra_benchmarks/image/__init__.py |
|
# Copyright 2021 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# https://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Main training script for the image classification task."""
import functools
import itertools
import json
import os
import time
from absl import app
from absl import flags
from absl import logging
from flax import jax_utils
from flax import nn
from flax import optim
from flax.metrics import tensorboard
from flax.training import checkpoints
from flax.training import common_utils
import jax
from jax import random
import jax.nn
import jax.numpy as jnp
from lra_benchmarks.image import task_registry
from lra_benchmarks.models.transformer import transformer
from lra_benchmarks.utils import train_utils
from ml_collections import config_flags
import tensorflow.compat.v2 as tf
FLAGS = flags.FLAGS
config_flags.DEFINE_config_file(
'config', None, 'Training configuration.', lock_config=True)
flags.DEFINE_string(
'model_dir', default=None, help='Directory to store model data.')
flags.DEFINE_string('task_name', default='mnist', help='Name of the task')
flags.DEFINE_bool(
'eval_only', default=False, help='Run the evaluation on the test data.')
def create_model(key, flax_module, input_shape, model_kwargs):
"""Creates and initializes the model."""
@functools.partial(jax.jit, backend='cpu')
def _create_model(key):
module = flax_module.partial(**model_kwargs)
with nn.stateful() as init_state:
with nn.stochastic(key):
_, initial_params = module.init_by_shape(key,
[(input_shape, jnp.float32)])
model = nn.Model(module, initial_params)
return model, init_state
return _create_model(key)
def create_optimizer(model, learning_rate, weight_decay):
optimizer_def = optim.Adam(
learning_rate, beta1=0.9, beta2=0.98, eps=1e-9, weight_decay=weight_decay)
optimizer = optimizer_def.create(model)
return optimizer
def compute_metrics(logits, labels, num_classes, weights):
"""Compute summary metrics."""
loss, weight_sum = train_utils.compute_weighted_cross_entropy(
logits, labels, num_classes, weights=weights)
acc, _ = train_utils.compute_weighted_accuracy(logits, labels, weights)
metrics = {
'loss': loss,
'accuracy': acc,
'denominator': weight_sum,
}
metrics = jax.lax.psum(metrics, 'batch')
return metrics
def get_model(init_rng, input_shape, model_type, model_kwargs):
"""Create and initialize the model.
Args:
init_rng: float; Jax PRNG key.
input_shape: tuple; Tuple indicating input shape.
model_type: str; Type of Transformer model to create.
model_kwargs: keyword argument to the model.
Returns:
Initialized model.
"""
if model_type == 'transformer':
return create_model(init_rng, transformer.TransformerEncoder, input_shape,
model_kwargs)
else:
raise ValueError('Model type not supported')
def train_step(optimizer,
state,
batch,
learning_rate_fn,
num_classes,
flatten_input=True,
grad_clip_norm=None,
dropout_rng=None):
"""Perform a single training step."""
train_keys = ['inputs', 'targets']
(inputs, targets) = [batch.get(k, None) for k in train_keys]
if flatten_input:
inputs = inputs.reshape(inputs.shape[0], -1)
# We handle PRNG splitting inside the top pmap, rather
# than handling it outside in the training loop - doing the
# latter can add some stalls to the devices.
dropout_rng, new_dropout_rng = random.split(dropout_rng)
def loss_fn(model):
"""Loss function used for training."""
with nn.stateful(state) as new_state:
with nn.stochastic(dropout_rng):
logits = model(inputs, train=True)
loss, weight_sum = train_utils.compute_weighted_cross_entropy(
logits, targets, num_classes=num_classes, weights=None)
mean_loss = loss / weight_sum
return mean_loss, (new_state, logits)
step = optimizer.state.step
lr = learning_rate_fn(step)
grad_fn = jax.value_and_grad(loss_fn, has_aux=True)
(_, (new_state, logits)), grad = grad_fn(optimizer.target)
grad = jax.lax.pmean(grad, 'batch')
if grad_clip_norm:
# Optionally resize the global gradient to a maximum norm.
gradients, _ = jax.tree_flatten(grad)
g_l2 = jnp.sqrt(sum([jnp.vdot(p, p) for p in gradients]))
g_factor = jnp.minimum(1.0, grad_clip_norm / g_l2)
grad = jax.tree_map(lambda p: g_factor * p, grad)
new_optimizer = optimizer.apply_gradient(grad, learning_rate=lr)
metrics = compute_metrics(logits, targets, num_classes, weights=None)
metrics['learning_rate'] = lr
return new_optimizer, new_state, metrics, new_dropout_rng
def eval_step(model, state, batch, num_classes, flatten_input=True):
eval_keys = ['inputs', 'targets']
(inputs, targets) = [batch.get(k, None) for k in eval_keys]
if flatten_input:
inputs = inputs.reshape(inputs.shape[0], -1)
if jax.tree_leaves(state):
state = jax.lax.pmean(state, 'batch')
with nn.stateful(state, mutable=False):
logits = model(inputs, train=False)
return compute_metrics(logits, targets, num_classes, weights=None)
def test(optimizer, state, p_eval_step, step, test_ds, summary_writer,
model_dir):
"""Test the flax module in optimizer on test_ds.
Args:
optimizer: flax optimizer (contains flax module).
state: model state, e.g. batch statistics.
p_eval_step: fn; Pmapped evaluation step function.
step: int; Number of training steps passed so far.
test_ds: tf.dataset; Test dataset.
summary_writer: tensorflow summary writer.
model_dir: model directory.
"""
# Test Metrics
test_metrics = []
test_iter = iter(test_ds)
for _, test_batch in zip(itertools.repeat(1), test_iter):
# pylint: disable=protected-access
test_batch = common_utils.shard(
jax.tree_map(lambda x: x._numpy(), test_batch))
# pylint: enable=protected-access
metrics = p_eval_step(optimizer.target, state, test_batch)
test_metrics.append(metrics)
test_metrics = common_utils.get_metrics(test_metrics)
test_metrics_sums = jax.tree_map(jnp.sum, test_metrics)
test_denominator = test_metrics_sums.pop('denominator')
test_summary = jax.tree_map(
lambda x: x / test_denominator, # pylint: disable=cell-var-from-loop
test_metrics_sums)
logging.info('test in step: %d, loss: %.4f, acc: %.4f', step,
test_summary['loss'], test_summary['accuracy'])
if jax.host_id() == 0:
for key, val in test_summary.items():
summary_writer.scalar(f'test_{key}', val, step)
summary_writer.flush()
with tf.io.gfile.GFile(os.path.join(model_dir, 'results.json'), 'w') as f:
json.dump(jax.tree_map(lambda x: x.tolist(), test_summary), f)
def train_loop(config, dropout_rngs, eval_ds, eval_freq, num_eval_steps,
num_train_steps, optimizer, state, p_eval_step, p_train_step,
start_step, train_iter, summary_writer):
"""Training loop.
Args:
config: experiment config.
dropout_rngs: float array; Jax PRNG key.
eval_ds: tf.dataset; Evaluation dataset.
eval_freq: int; Evaluation frequency;
num_eval_steps: int; Number of evaluation steps.
num_train_steps: int; Number of training steps.
optimizer: flax optimizer.
state: model state, e.g. batch statistics.
p_eval_step: fn; Pmapped evaluation step function.
p_train_step: fn; Pmapped train step function.
start_step: int; global training step.
train_iter: iter(tf.dataset); Training data iterator.
summary_writer: tensorflow summary writer.
Returns:
optimizer, global training step
"""
metrics_all = []
tick = time.time()
logging.info('Starting training')
logging.info('====================')
step = 0
for step, batch in zip(range(start_step, num_train_steps), train_iter):
batch = common_utils.shard(jax.tree_map(lambda x: x._numpy(), batch)) # pylint: disable=protected-access
optimizer, state, metrics, dropout_rngs = p_train_step(
optimizer, state, batch, dropout_rng=dropout_rngs)
metrics_all.append(metrics)
# Save a Checkpoint
if ((step % config.checkpoint_freq == 0 and step > 0) or
step == num_train_steps - 1):
if jax.host_id() == 0 and config.save_checkpoints:
# Save unreplicated optimizer + model state.
checkpoints.save_checkpoint(
FLAGS.model_dir,
(jax_utils.unreplicate(optimizer), jax_utils.unreplicate(state)),
step)
# Periodic metric handling.
if step % eval_freq == 0 and step > 0:
metrics_all = common_utils.get_metrics(metrics_all)
lr = metrics_all.pop('learning_rate').mean()
metrics_sums = jax.tree_map(jnp.sum, metrics_all)
denominator = metrics_sums.pop('denominator')
summary = jax.tree_map(lambda x: x / denominator, metrics_sums) # pylint: disable=cell-var-from-loop
summary['learning_rate'] = lr
# Calculate (clipped) perplexity after averaging log-perplexities:
logging.info('train in step: %d, loss: %.4f, acc: %.4f', step,
summary['loss'], summary['accuracy'])
if jax.host_id() == 0:
tock = time.time()
steps_per_sec = eval_freq / (tock - tick)
tick = tock
summary_writer.scalar('examples_per_second',
steps_per_sec * config.batch_size, step)
for key, val in summary.items():
summary_writer.scalar(f'train_{key}', val, step)
summary_writer.flush()
# Reset metric accumulation for next evaluation cycle.
metrics_all = []
# Eval Metrics
eval_metrics = []
eval_iter = iter(eval_ds)
if num_eval_steps == -1:
num_iter = itertools.repeat(1)
else:
num_iter = range(num_eval_steps)
for _, eval_batch in zip(num_iter, eval_iter):
# pylint: disable=protected-access
eval_batch = common_utils.shard(
jax.tree_map(lambda x: x._numpy(), eval_batch))
# pylint: enable=protected-access
metrics = p_eval_step(optimizer.target, state, eval_batch)
eval_metrics.append(metrics)
eval_metrics = common_utils.get_metrics(eval_metrics)
eval_metrics_sums = jax.tree_map(jnp.sum, eval_metrics)
eval_denominator = eval_metrics_sums.pop('denominator')
eval_summary = jax.tree_map(
lambda x: x / eval_denominator, # pylint: disable=cell-var-from-loop
eval_metrics_sums)
logging.info('eval in step: %d, loss: %.4f, acc: %.4f', step,
eval_summary['loss'], eval_summary['accuracy'])
if jax.host_id() == 0:
for key, val in eval_summary.items():
summary_writer.scalar(f'val_{key}', val, step)
summary_writer.flush()
return optimizer, state, step
def main(argv):
if len(argv) > 1:
raise app.UsageError('Too many command-line arguments.')
tf.enable_v2_behavior()
config = FLAGS.config
logging.info('===========Config Dict============')
logging.info(config)
batch_size = config.batch_size
learning_rate = config.learning_rate
num_train_steps = config.num_train_steps
num_eval_steps = config.num_eval_steps
eval_freq = config.eval_frequency
random_seed = config.random_seed
model_type = config.model_type
if jax.host_id() == 0:
summary_writer = tensorboard.SummaryWriter(
os.path.join(FLAGS.model_dir, 'summary'))
else:
summary_writer = None
if batch_size % jax.device_count() > 0:
raise ValueError('Batch size must be divisible by the number of devices')
logging.info('Training on %s', FLAGS.task_name)
if model_type in ['wideresnet', 'resnet', 'simple_cnn']:
normalize = True
else: # transformer-based models
normalize = False
(train_ds, eval_ds, test_ds, num_classes, vocab_size,
input_shape) = task_registry.TASK_DATA_DICT[FLAGS.task_name](
n_devices=jax.local_device_count(),
batch_size=batch_size,
normalize=normalize)
train_iter = iter(train_ds)
model_kwargs = {}
flatten_input = True
if model_type in ['wideresnet', 'resnet', 'simple_cnn']:
model_kwargs.update({
'num_classes': num_classes,
})
flatten_input = False
else: # transformer models
# we will flatten the input
bs, h, w, c = input_shape
assert c == 1
input_shape = (bs, h * w * c)
model_kwargs.update({
'vocab_size': vocab_size,
'max_len': input_shape[1],
'classifier': True,
'num_classes': num_classes,
})
model_kwargs.update(config.model)
rng = random.PRNGKey(random_seed)
rng = jax.random.fold_in(rng, jax.host_id())
rng, init_rng = random.split(rng)
# We init the first set of dropout PRNG keys, but update it afterwards inside
# the main pmap'd training update for performance.
dropout_rngs = random.split(rng, jax.local_device_count())
model, state = get_model(init_rng, input_shape, model_type, model_kwargs)
optimizer = create_optimizer(model, learning_rate, config.weight_decay)
del model # Don't keep a copy of the initial model.
start_step = 0
if config.restore_checkpoints:
# Restore unreplicated optimizer + model state from last checkpoint.
optimizer, state = checkpoints.restore_checkpoint(FLAGS.model_dir,
(optimizer, state))
# Grab last step.
start_step = int(optimizer.state.step)
# Replicate optimizer and state
optimizer = jax_utils.replicate(optimizer)
state = jax_utils.replicate(state)
learning_rate_fn = train_utils.create_learning_rate_scheduler(
factors=config.factors,
base_learning_rate=learning_rate,
warmup_steps=config.warmup,
steps_per_cycle=config.get('steps_per_cycle', None),
)
p_train_step = jax.pmap(
functools.partial(
train_step,
learning_rate_fn=learning_rate_fn,
num_classes=num_classes,
grad_clip_norm=config.get('grad_clip_norm', None),
flatten_input=flatten_input),
axis_name='batch')
p_eval_step = jax.pmap(
functools.partial(
eval_step, num_classes=num_classes, flatten_input=flatten_input),
axis_name='batch',
)
optimizer, state, step = train_loop(config, dropout_rngs, eval_ds, eval_freq,
num_eval_steps, num_train_steps,
optimizer, state, p_eval_step,
p_train_step, start_step, train_iter,
summary_writer)
logging.info('Starting testing')
logging.info('====================')
test(optimizer, state, p_eval_step, step, test_ds, summary_writer,
FLAGS.model_dir)
if __name__ == '__main__':
app.run(main)
| transformer-ls-master | lra/datasets/long-range-arena/lra_benchmarks/image/train.py |
transformer-ls-master | lra/datasets/long-range-arena/lra_benchmarks/image/configs/__init__.py |
|
transformer-ls-master | lra/datasets/long-range-arena/lra_benchmarks/image/configs/pathfinder128/__init__.py |
|
# Copyright 2021 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# https://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Configuration and hyperparameter sweeps."""
from lra_benchmarks.image.configs.pathfinder128 import base_pathfinder128_config
def get_config():
"""Get the hyperparameter configuration."""
config = base_pathfinder128_config.get_config()
config.model_type = "transformer"
return config
def get_hyper(hyper):
return hyper.product([])
| transformer-ls-master | lra/datasets/long-range-arena/lra_benchmarks/image/configs/pathfinder128/transformer_base.py |
# Copyright 2021 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# https://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base Configuration."""
import ml_collections
NUM_EPOCHS = 200
TRAIN_EXAMPLES = 160000
VALID_EXAMPLES = 20000
def get_config():
"""Get the default hyperparameter configuration."""
config = ml_collections.ConfigDict()
config.batch_size = 64
config.eval_frequency = TRAIN_EXAMPLES // config.batch_size
config.num_train_steps = (TRAIN_EXAMPLES // config.batch_size) * NUM_EPOCHS
config.num_eval_steps = VALID_EXAMPLES // config.batch_size
config.weight_decay = 0.
config.grad_clip_norm = 1.
config.save_checkpoints = True
config.restore_checkpoints = True
config.checkpoint_freq = (TRAIN_EXAMPLES //
config.batch_size) * NUM_EPOCHS // 2
config.random_seed = 0
config.learning_rate = .001
config.factors = 'constant * linear_warmup * cosine_decay'
config.warmup = (TRAIN_EXAMPLES // config.batch_size) * 1
config.steps_per_cycle = (TRAIN_EXAMPLES // config.batch_size) * NUM_EPOCHS
# model params
config.model = ml_collections.ConfigDict()
config.model.num_layers = 1
config.model.num_heads = 2
config.model.emb_dim = 32
config.model.dropout_rate = 0.1
config.model.qkv_dim = config.model.emb_dim // 2
config.model.mlp_dim = config.model.qkv_dim * 2
config.model.attention_dropout_rate = 0.1
config.model.classifier_pool = 'MEAN'
config.model.learn_pos_emb = True
config.trial = 0 # dummy for repeated runs.
return config
| transformer-ls-master | lra/datasets/long-range-arena/lra_benchmarks/image/configs/pathfinder128/base_pathfinder128_config.py |
"""Base Configuration."""
# Copyright 2021 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# https://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import ml_collections
NUM_EPOCHS = 100
TRAIN_EXAMPLES = 160000
VALID_EXAMPLES = 20000
def get_config():
"""Get the default hyperparameter configuration."""
config = ml_collections.ConfigDict()
config.batch_size = 32
config.eval_frequency = TRAIN_EXAMPLES // config.batch_size
config.num_train_steps = (TRAIN_EXAMPLES // config.batch_size) * NUM_EPOCHS
config.num_eval_steps = VALID_EXAMPLES // config.batch_size
config.weight_decay = 0.
config.grad_clip_norm = None
config.save_checkpoints = True
config.restore_checkpoints = True
config.checkpoint_freq = (TRAIN_EXAMPLES //
config.batch_size) * NUM_EPOCHS // 2
config.random_seed = 0
config.learning_rate = .001
config.factors = 'constant * linear_warmup * cosine_decay'
config.warmup = (TRAIN_EXAMPLES // config.batch_size) * 1
config.steps_per_cycle = (TRAIN_EXAMPLES // config.batch_size) * NUM_EPOCHS
# model params
config.model = ml_collections.ConfigDict()
config.model.emb_dim = 32
config.model.num_heads = 4
config.model.num_layers = 2
config.model.qkv_dim = 32
config.model.mlp_dim = 32
config.model.dropout_rate = 0.1
config.model.attention_dropout_rate = 0.1
config.model.classifier_pool = 'CLS'
config.model.learn_pos_emb = True
config.trial = 0 # dummy for repeated runs.
return config
| transformer-ls-master | lra/datasets/long-range-arena/lra_benchmarks/image/configs/pathfinder256/base_pathfinder256_config.py |
transformer-ls-master | lra/datasets/long-range-arena/lra_benchmarks/image/configs/pathfinder256/__init__.py |
|
# Copyright 2021 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# https://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Configuration and hyperparameter sweeps."""
from lra_benchmarks.image.configs.pathfinder256 import base_pathfinder256_config
def get_config():
"""Get the hyperparameter configuration."""
config = base_pathfinder256_config.get_config()
config.model_type = "transformer"
return config
def get_hyper(hyper):
return hyper.product([])
| transformer-ls-master | lra/datasets/long-range-arena/lra_benchmarks/image/configs/pathfinder256/transformer_base.py |
# Copyright 2021 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# https://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base Configuration."""
import ml_collections
NUM_EPOCHS = 200
TRAIN_EXAMPLES = 160000
VALID_EXAMPLES = 20000
def get_config():
"""Get the default hyperparameter configuration."""
config = ml_collections.ConfigDict()
config.batch_size = 256
config.eval_frequency = TRAIN_EXAMPLES // config.batch_size
config.num_train_steps = (TRAIN_EXAMPLES // config.batch_size) * NUM_EPOCHS
config.num_eval_steps = VALID_EXAMPLES // config.batch_size
config.weight_decay = 0.
config.grad_clip_norm = None
config.save_checkpoints = True
config.restore_checkpoints = True
config.checkpoint_freq = (TRAIN_EXAMPLES //
config.batch_size) * NUM_EPOCHS // 2
config.random_seed = 0
config.learning_rate = .001
config.factors = 'constant * linear_warmup * cosine_decay'
config.warmup = (TRAIN_EXAMPLES // config.batch_size) * 1
config.steps_per_cycle = (TRAIN_EXAMPLES // config.batch_size) * NUM_EPOCHS
# model params
config.model = ml_collections.ConfigDict()
config.model.emb_dim = 32
config.model.num_heads = 4
config.model.num_layers = 2
config.model.qkv_dim = 32
config.model.mlp_dim = 32
config.model.dropout_rate = 0.1
config.model.attention_dropout_rate = 0.1
config.model.classifier_pool = 'CLS'
config.model.learn_pos_emb = True
config.trial = 0 # dummy for repeated runs.
return config
| transformer-ls-master | lra/datasets/long-range-arena/lra_benchmarks/image/configs/pathfinder64/base_pathfinder64_config.py |
transformer-ls-master | lra/datasets/long-range-arena/lra_benchmarks/image/configs/pathfinder64/__init__.py |
|
# Copyright 2021 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# https://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Configuration and hyperparameter sweeps."""
from lra_benchmarks.image.configs.pathfinder64 import base_pathfinder64_config
def get_config():
"""Get the hyperparameter configuration."""
config = base_pathfinder64_config.get_config()
config.model_type = "transformer"
return config
def get_hyper(hyper):
return hyper.product([])
| transformer-ls-master | lra/datasets/long-range-arena/lra_benchmarks/image/configs/pathfinder64/transformer_base.py |
# Copyright 2021 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# https://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base Configuration."""
import ml_collections
NUM_EPOCHS = 200
TRAIN_EXAMPLES = 160000
VALID_EXAMPLES = 20000
def get_config():
"""Get the default hyperparameter configuration."""
config = ml_collections.ConfigDict()
config.batch_size = 512
config.eval_frequency = TRAIN_EXAMPLES // config.batch_size
config.num_train_steps = (TRAIN_EXAMPLES // config.batch_size) * NUM_EPOCHS
config.num_eval_steps = VALID_EXAMPLES // config.batch_size
config.weight_decay = 0.
config.grad_clip_norm = None
config.save_checkpoints = True
config.restore_checkpoints = True
config.checkpoint_freq = (TRAIN_EXAMPLES //
config.batch_size) * NUM_EPOCHS // 2
config.random_seed = 0
config.learning_rate = .001
config.factors = 'constant * linear_warmup * cosine_decay'
config.warmup = (TRAIN_EXAMPLES // config.batch_size) * 1
config.steps_per_cycle = (TRAIN_EXAMPLES // config.batch_size) * NUM_EPOCHS
# model params
config.model = ml_collections.ConfigDict()
config.model.num_layers = 1
config.model.num_heads = 2
config.model.emb_dim = 32
config.model.dropout_rate = 0.1
config.model.qkv_dim = config.model.emb_dim // 2
config.model.mlp_dim = config.model.qkv_dim * 2
config.model.attention_dropout_rate = 0.1
config.model.classifier_pool = 'MEAN'
config.model.learn_pos_emb = False
config.trial = 0 # dummy for repeated runs.
return config
| transformer-ls-master | lra/datasets/long-range-arena/lra_benchmarks/image/configs/pathfinder32/base_pathfinder32_config.py |
transformer-ls-master | lra/datasets/long-range-arena/lra_benchmarks/image/configs/pathfinder32/__init__.py |
|
# Copyright 2021 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# https://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Configuration and hyperparameter sweeps."""
from lra_benchmarks.image.configs.pathfinder32 import base_pathfinder32_config
def get_config():
"""Get the hyperparameter configuration."""
config = base_pathfinder32_config.get_config()
config.model_type = "transformer"
config.model.num_layers = 1
config.model.num_heads = 4
config.model.emb_dim = 128
config.model.dropout_rate = 0.2
config.model.qkv_dim = config.model.emb_dim // 2
config.model.mlp_dim = config.model.qkv_dim * 2
return config
def get_hyper(hyper):
return hyper.product([])
| transformer-ls-master | lra/datasets/long-range-arena/lra_benchmarks/image/configs/pathfinder32/transformer_base.py |
# Copyright 2020 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# https://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.\
"""Base Configuration."""
import ml_collections
NUM_EPOCHS = 200
TRAIN_EXAMPLES = 45000
VALID_EXAMPLES = 10000
def get_config():
"""Get the default hyperparameter configuration."""
config = ml_collections.ConfigDict()
config.batch_size = 256
config.eval_frequency = TRAIN_EXAMPLES // config.batch_size
config.num_train_steps = (TRAIN_EXAMPLES // config.batch_size) * NUM_EPOCHS
config.num_eval_steps = VALID_EXAMPLES // config.batch_size
config.weight_decay = 0.
config.grad_clip_norm = None
config.save_checkpoints = True
config.restore_checkpoints = True
config.checkpoint_freq = (TRAIN_EXAMPLES //
config.batch_size) * NUM_EPOCHS // 2
config.random_seed = 0
config.learning_rate = .0005
config.factors = 'constant * linear_warmup * cosine_decay'
config.warmup = (TRAIN_EXAMPLES // config.batch_size) * 1
config.steps_per_cycle = (TRAIN_EXAMPLES // config.batch_size) * NUM_EPOCHS
# model params
config.model = ml_collections.ConfigDict()
config.model.emb_dim = 32
config.model.num_heads = 1
config.model.num_layers = 1
config.model.qkv_dim = 32
config.model.mlp_dim = 64
config.model.dropout_rate = 0.3
config.model.attention_dropout_rate = 0.2
config.model.classifier_pool = 'CLS'
config.model.learn_pos_emb = True
config.trial = 0 # dummy for repeated runs.
return config
| transformer-ls-master | lra/datasets/long-range-arena/lra_benchmarks/image/configs/cifar10/base_cifar10_config.py |
transformer-ls-master | lra/datasets/long-range-arena/lra_benchmarks/image/configs/cifar10/__init__.py |
|
# Copyright 2020 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# https://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.\
"""Configuration and hyperparameter sweeps."""
from lra_benchmarks.image.configs.cifar10 import base_cifar10_config
def get_config():
"""Get the hyperparameter configuration."""
config = base_cifar10_config.get_config()
config.model_type = "transformer"
return config
def get_hyper(hyper):
return hyper.product([])
| transformer-ls-master | lra/datasets/long-range-arena/lra_benchmarks/image/configs/cifar10/transformer_base.py |
# Copyright 2021 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# https://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Input pipeline for the imdb dataset."""
from absl import logging
import numpy as np
import tensorflow.compat.v1 as tf
import tensorflow_datasets as tfds
AUTOTUNE = tf.data.experimental.AUTOTUNE
def preprocess_dataset(file_path, batch_size):
"""Preprocess dataset."""
tf.logging.info(file_path)
sel_cols = ['Source', 'Target']
col_defaults = [tf.string, tf.int32]
ds = tf.data.experimental.make_csv_dataset([file_path],
batch_size,
column_defaults=col_defaults,
select_columns=sel_cols,
field_delim=',',
header=True,
shuffle=False,
num_epochs=1)
ds = ds.unbatch()
return ds
def get_imdb_dataset():
"""Get dataset from imdb tfds. converts into src/tgt pairs."""
data = tfds.load('imdb_reviews')
train_raw = data['train']
valid_raw = data['test']
test_raw = data['test']
# use test set for validation because IMDb doesn't have val set.
# Print an example.
logging.info('Data sample: %s', next(iter(tfds.as_numpy(train_raw.skip(4)))))
def adapt_example(example):
return {'Source': example['text'], 'Target': example['label']}
train = train_raw.map(adapt_example)
valid = valid_raw.map(adapt_example)
test = test_raw.map(adapt_example)
return train, valid, test
def get_yelp_dataset():
"""Get dataset from yelp tfds. converts into src/tgt pairs."""
data = tfds.load('yelp_polarity_reviews')
train_raw = data['train']
valid_raw = data['test']
test_raw = data['test']
# use test set for validation because yelp doesn't have val set.
# Print an example.
logging.info('Data sample: %s', next(iter(tfds.as_numpy(train_raw.skip(4)))))
def adapt_example(example):
return {'Source': example['text'], 'Target': example['label']}
train = train_raw.map(adapt_example)
valid = valid_raw.map(adapt_example)
test = test_raw.map(adapt_example)
return train, valid, test
def get_agnews_dataset():
"""Get dataset from agnews tfds. converts into src/tgt pairs."""
data = tfds.load('ag_news_subset')
train_raw = data['train']
valid_raw = data['test']
test_raw = data['test']
# use test set for validation because agnews doesn't have val set.
# Print an example.
logging.info('Data sample: %s', next(iter(tfds.as_numpy(train_raw.skip(4)))))
def adapt_example(example):
return {'Source': example['description'], 'Target': example['label']}
train = train_raw.map(adapt_example)
valid = valid_raw.map(adapt_example)
test = test_raw.map(adapt_example)
return train, valid, test
def get_tc_datasets(n_devices,
task_name,
data_dir=None,
batch_size=256,
fixed_vocab=None,
max_length=512,
tokenizer='char'):
"""Get text classification datasets."""
if batch_size % n_devices:
raise ValueError("Batch size %d isn't divided evenly by n_devices %d" %
(batch_size, n_devices))
if task_name == 'imdb_reviews':
train_dataset, val_dataset, test_dataset = get_imdb_dataset()
elif task_name == 'yelp_reviews':
train_dataset, val_dataset, test_dataset = get_yelp_dataset()
elif task_name == 'agnews':
train_dataset, val_dataset, test_dataset = get_agnews_dataset()
else:
train_path = data_dir + task_name + '_train.tsv'
val_path = data_dir + task_name + '_val.tsv'
test_path = data_dir + task_name + '_test.tsv'
train_dataset = preprocess_dataset(train_path, batch_size)
val_dataset = preprocess_dataset(val_path, batch_size)
test_dataset = preprocess_dataset(test_path, batch_size)
tf.logging.info('Finished preprocessing')
tf.logging.info(val_dataset)
if tokenizer == 'char':
logging.info('Using char/byte level vocab')
encoder = tfds.deprecated.text.ByteTextEncoder()
else:
if fixed_vocab is None:
tf.logging.info('Building vocab')
# build vocab
vocab_set = set()
tokenizer = tfds.deprecated.text.Tokenizer()
for i, data in enumerate(train_dataset):
examples = data['Source']
examples = tokenizer.tokenize(examples.numpy())
examples = np.reshape(examples, (-1)).tolist()
vocab_set.update(examples)
if i % 1000 == 0:
tf.logging.info('Processed {}'.format(i))
tf.logging.info(len(vocab_set))
vocab_set = list(set(vocab_set))
tf.logging.info('Finished processing vocab size={}'.format(
len(vocab_set)))
else:
vocab_set = list(set(fixed_vocab))
encoder = tfds.deprecated.text.TokenTextEncoder(vocab_set)
def tf_encode(x):
result = tf.py_function(lambda s: tf.constant(encoder.encode(s.numpy())), [
x,
], tf.int32)
result.set_shape([None])
return result
def tokenize(d):
return {
'inputs': tf_encode(d['Source'])[:max_length],
'targets': d['Target']
}
train_dataset = train_dataset.map(tokenize, num_parallel_calls=AUTOTUNE)
val_dataset = val_dataset.map(tokenize, num_parallel_calls=AUTOTUNE)
test_dataset = test_dataset.map(tokenize, num_parallel_calls=AUTOTUNE)
max_shape = {'inputs': [max_length], 'targets': []}
# train_dataset = train_dataset.shuffle(
# buffer_size=256, reshuffle_each_iteration=True).padded_batch(
# batch_size, padded_shapes=max_shape)
# train_dataset = train_dataset.shuffle(
# buffer_size=256, reshuffle_each_iteration=True).padded_batch(
# batch_size, padded_shapes=max_shape)
train_dataset = train_dataset.padded_batch(batch_size, padded_shapes=max_shape)
val_dataset = val_dataset.padded_batch(batch_size, padded_shapes=max_shape)
test_dataset = test_dataset.padded_batch(batch_size, padded_shapes=max_shape)
return train_dataset, val_dataset, test_dataset, encoder
| transformer-ls-master | lra/datasets/long-range-arena/lra_benchmarks/text_classification/input_pipeline.py |
# Copyright 2021 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# https://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Document Classification tasks."""
import functools
import itertools
import json
import os
import time
from absl import app
from absl import flags
from absl import logging
from flax import jax_utils
from flax import nn
from flax import optim
from flax.metrics import tensorboard
from flax.training import checkpoints
from flax.training import common_utils
import jax
from jax import random
import jax.nn
import jax.numpy as jnp
from lra_benchmarks.models.transformer import transformer
from lra_benchmarks.text_classification import input_pipeline
from lra_benchmarks.utils import train_utils
from ml_collections import config_flags
import tensorflow.compat.v2 as tf
FLAGS = flags.FLAGS
config_flags.DEFINE_config_file(
'config', None, 'Training configuration.', lock_config=True)
flags.DEFINE_string(
'model_dir', default=None, help='Directory to store model data.')
flags.DEFINE_string(
'task_name',
default='basic_two_ptrs',
help='Directory to store model data.')
flags.DEFINE_string(
'data_dir', default=None, help='Directory containing datasets.')
flags.DEFINE_bool(
'test_only', default=False, help='Run the evaluation on the test data.')
CLASS_MAP = {'imdb_reviews': 2}
def create_model(key, flax_module, input_shape, model_kwargs):
"""Creates and initializes the model."""
@functools.partial(jax.jit, backend='cpu')
def _create_model(key):
module = flax_module.partial(**model_kwargs)
with nn.stochastic(key):
_, initial_params = module.init_by_shape(key,
[(input_shape, jnp.float32)])
model = nn.Model(module, initial_params)
return model
return _create_model(key)
def create_optimizer(model, learning_rate, weight_decay):
optimizer_def = optim.Adam(
learning_rate, beta1=0.9, beta2=0.98, eps=1e-9, weight_decay=weight_decay)
optimizer = optimizer_def.create(model)
return optimizer
def compute_metrics(logits, labels, weights):
"""Compute summary metrics."""
loss, weight_sum = train_utils.compute_weighted_cross_entropy(
logits, labels, num_classes=CLASS_MAP[FLAGS.task_name], weights=None)
acc, _ = train_utils.compute_weighted_accuracy(logits, labels, weights)
metrics = {
'loss': loss,
'accuracy': acc,
'denominator': weight_sum,
}
metrics = jax.lax.psum(metrics, 'batch')
return metrics
def train_step(optimizer, batch, learning_rate_fn, dropout_rng=None):
"""Perform a single training step."""
train_keys = ['inputs', 'targets']
(inputs, targets) = [batch.get(k, None) for k in train_keys]
# We handle PRNG splitting inside the top pmap, rather
# than handling it outside in the training loop - doing the
# latter can add some stalls to the devices.
dropout_rng, new_dropout_rng = random.split(dropout_rng)
def loss_fn(model):
"""Loss function used for training."""
with nn.stochastic(dropout_rng):
logits = model(inputs, train=True)
loss, weight_sum = train_utils.compute_weighted_cross_entropy(
logits, targets, num_classes=CLASS_MAP[FLAGS.task_name], weights=None)
mean_loss = loss / weight_sum
return mean_loss, logits
step = optimizer.state.step
lr = learning_rate_fn(step)
grad_fn = jax.value_and_grad(loss_fn, has_aux=True)
(_, logits), grad = grad_fn(optimizer.target)
grad = jax.lax.pmean(grad, 'batch')
new_optimizer = optimizer.apply_gradient(grad, learning_rate=lr)
metrics = compute_metrics(logits, targets, None)
metrics['learning_rate'] = lr
return new_optimizer, metrics, new_dropout_rng
def eval_step(model, batch):
eval_keys = ['inputs', 'targets']
(inputs, targets) = [batch.get(k, None) for k in eval_keys]
logits = model(inputs, train=False)
logging.info(logits)
return compute_metrics(logits, targets, None)
def main(argv):
if len(argv) > 1:
raise app.UsageError('Too many command-line arguments.')
tf.enable_v2_behavior()
config = FLAGS.config
logging.info('===========Config Dict============')
logging.info(config)
batch_size = config.batch_size
learning_rate = config.learning_rate
num_train_steps = config.num_train_steps
num_eval_steps = config.num_eval_steps
eval_freq = config.eval_frequency
random_seed = config.random_seed
model_type = config.model_type
max_length = config.max_length
if jax.host_id() == 0:
summary_writer = tensorboard.SummaryWriter(
os.path.join(FLAGS.model_dir, 'summary'))
if batch_size % jax.device_count() > 0:
raise ValueError('Batch size must be divisible by the number of devices')
train_ds, eval_ds, test_ds, encoder = input_pipeline.get_tc_datasets(
n_devices=jax.local_device_count(),
task_name=FLAGS.task_name,
data_dir=FLAGS.data_dir,
batch_size=batch_size,
fixed_vocab=None,
max_length=max_length)
vocab_size = encoder.vocab_size
logging.info('Vocab Size: %d', vocab_size)
train_ds = train_ds.repeat()
train_iter = iter(train_ds)
input_shape = (batch_size, max_length)
model_kwargs = {
'vocab_size': vocab_size,
'emb_dim': config.emb_dim,
'num_heads': config.num_heads,
'num_layers': config.num_layers,
'qkv_dim': config.qkv_dim,
'mlp_dim': config.mlp_dim,
'max_len': max_length,
'classifier': True,
'num_classes': CLASS_MAP[FLAGS.task_name],
'classifier_pool': config.classifier_pool
}
rng = random.PRNGKey(random_seed)
rng = jax.random.fold_in(rng, jax.host_id())
rng, init_rng = random.split(rng)
# We init the first set of dropout PRNG keys, but update it afterwards inside
# the main pmap'd training update for performance.
dropout_rngs = random.split(rng, jax.local_device_count())
if model_type == 'transformer':
model = create_model(init_rng, transformer.TransformerEncoder, input_shape,
model_kwargs)
else:
raise ValueError('Model type not supported')
optimizer = create_optimizer(
model, learning_rate, weight_decay=FLAGS.config.weight_decay)
del model # Don't keep a copy of the initial model.
start_step = 0
if config.restore_checkpoints or FLAGS.test_only:
# Restore unreplicated optimizer + model state from last checkpoint.
optimizer = checkpoints.restore_checkpoint(FLAGS.model_dir, optimizer)
# Grab last step.
start_step = int(optimizer.state.step)
# Replicate optimizer.
optimizer = jax_utils.replicate(optimizer)
learning_rate_fn = train_utils.create_learning_rate_scheduler(
factors=config.factors,
base_learning_rate=learning_rate,
warmup_steps=config.warmup)
p_train_step = jax.pmap(
functools.partial(train_step, learning_rate_fn=learning_rate_fn),
axis_name='batch')
p_eval_step = jax.pmap(eval_step, axis_name='batch')
# p_pred_step = jax.pmap(predict_step, axis_name='batch')
def run_eval(eval_ds, num_eval_steps=-1):
eval_metrics = []
eval_iter = iter(eval_ds)
if num_eval_steps == -1:
num_iter = itertools.count()
else:
num_iter = range(num_eval_steps)
for _, eval_batch in zip(num_iter, eval_iter):
# pylint: disable=protected-access
eval_batch = common_utils.shard(
jax.tree_map(lambda x: x._numpy(), eval_batch))
# pylint: enable=protected-access
metrics = p_eval_step(optimizer.target, eval_batch)
eval_metrics.append(metrics)
eval_metrics = common_utils.get_metrics(eval_metrics)
eval_metrics_sums = jax.tree_map(jnp.sum, eval_metrics)
eval_denominator = eval_metrics_sums.pop('denominator')
eval_summary = jax.tree_map(
lambda x: x / eval_denominator, # pylint: disable=cell-var-from-loop
eval_metrics_sums)
# Calculate (clipped) perplexity after averaging log-perplexities:
eval_summary['perplexity'] = jnp.clip(
jnp.exp(eval_summary['loss']), a_max=1.0e4)
return eval_summary
if FLAGS.test_only:
with tf.io.gfile.GFile(os.path.join(FLAGS.model_dir, 'results.json'),
'w') as f:
test_summary = run_eval(test_ds)
json.dump(jax.tree_map(lambda x: x.tolist(), test_summary), f)
return
metrics_all = []
tick = time.time()
logging.info('Starting training')
logging.info('====================')
for step, batch in zip(range(start_step, num_train_steps), train_iter):
batch = common_utils.shard(jax.tree_map(lambda x: x._numpy(), batch)) # pylint: disable=protected-access
optimizer, metrics, dropout_rngs = p_train_step(
optimizer, batch, dropout_rng=dropout_rngs)
metrics_all.append(metrics)
logging.info('train in step: %d', step)
# Save a Checkpoint
if ((step % config.checkpoint_freq == 0 and step > 0) or
step == num_train_steps - 1):
if jax.host_id() == 0 and config.save_checkpoints:
# Save unreplicated optimizer + model state.
checkpoints.save_checkpoint(FLAGS.model_dir,
jax_utils.unreplicate(optimizer), step)
# Periodic metric handling.
if step % eval_freq == 0 and step > 0:
metrics_all = common_utils.get_metrics(metrics_all)
lr = metrics_all.pop('learning_rate').mean()
metrics_sums = jax.tree_map(jnp.sum, metrics_all)
denominator = metrics_sums.pop('denominator')
summary = jax.tree_map(lambda x: x / denominator, metrics_sums) # pylint: disable=cell-var-from-loop
summary['learning_rate'] = lr
# Calculate (clipped) perplexity after averaging log-perplexities:
summary['perplexity'] = jnp.clip(jnp.exp(summary['loss']), a_max=1.0e4)
logging.info('train in step: %d, loss: %.4f, acc: %.4f', step,
summary['loss'], summary['accuracy'])
if jax.host_id() == 0:
tock = time.time()
steps_per_sec = eval_freq / (tock - tick)
tick = tock
summary_writer.scalar('steps per second', steps_per_sec, step)
for key, val in summary.items():
summary_writer.scalar(f'train_{key}', val, step)
summary_writer.flush()
# Reset metric accumulation for next evaluation cycle.
metrics_all = []
# Eval Metrics
eval_summary = run_eval(eval_ds, num_eval_steps)
logging.info('eval in step: %d, loss: %.4f, acc: %.4f', step,
eval_summary['loss'], eval_summary['accuracy'])
if jax.host_id() == 0:
for key, val in eval_summary.items():
summary_writer.scalar(f'eval_{key}', val, step)
summary_writer.flush()
if __name__ == '__main__':
app.run(main)
| transformer-ls-master | lra/datasets/long-range-arena/lra_benchmarks/text_classification/train.py |
# Copyright 2020 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# https://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.\
"""Configuration and hyperparameter sweeps."""
from lra_benchmarks.text_classification.configs import base_tc_config
def get_config():
"""Get the default hyperparameter configuration."""
config = base_tc_config.get_config()
config.model_type = "transformer"
return config
def get_hyper(hyper):
return hyper.product([])
| transformer-ls-master | lra/datasets/long-range-arena/lra_benchmarks/text_classification/configs/transformer_base.py |
# Copyright 2020 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# https://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.\
"""Base Configuration."""
import ml_collections
def get_config():
"""Get the default hyperparameter configuration."""
config = ml_collections.ConfigDict()
config.batch_size = 32
config.eval_frequency = 100
config.num_train_steps = 20000
config.num_eval_steps = -1
config.learning_rate = 0.05
config.weight_decay = 1e-1
config.max_target_length = 200
config.max_eval_target_length = 200
config.sampling_temperature = 0.6
config.sampling_top_k = 20
config.max_predict_token_length = 50
config.save_checkpoints = True
config.restore_checkpoints = True
config.checkpoint_freq = 10000
config.random_seed = 0
config.prompt = ""
config.factors = "constant * linear_warmup * rsqrt_decay"
config.warmup = 8000
config.classifier_pool = "CLS"
config.max_length = 1000
config.emb_dim = 256
config.num_heads = 4
config.num_layers = 4
config.qkv_dim = 256
config.mlp_dim = 1024
config.trial = 0 # dummy for repeated runs.
return config
| transformer-ls-master | lra/datasets/long-range-arena/lra_benchmarks/text_classification/configs/base_tc_config.py |
# Copyright 2021 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# https://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TFDS builder for pathfinder challenge."""
import os
import tensorflow as tf
import tensorflow_datasets as tfds
class Pathfinder32(tfds.core.BeamBasedBuilder):
"""Pathfinder TFDS builder (where the resolution is 32).
The data for this dataset was generated using the script in
https://github.com/drewlinsley/pathfinder with the default parameters, while
followings being customized:
```
args.paddle_margin_list = [1]
args.window_size = [32, 32]
args.padding= 1
args.paddle_length = 2
args.marker_radius = 1.5
args.contour_length = 14
args.paddle_thickness = 0.5
args.antialias_scale = 2
args.seed_distance= 7
args.continuity = 1.0
args.distractor_length = args.contour_length // 3
args.num_distractor_snakes = 20 // args.distractor_length
args.snake_contrast_list = [2]
args.paddle_contrast_list = [0.75]
```
"""
VERSION = tfds.core.Version('1.0.0')
def _info(self):
return tfds.core.DatasetInfo(
builder=self,
description=('This is a builder for pathfinder challenge dataset'),
features=tfds.features.FeaturesDict({
'image': tfds.features.Image(),
'label': tfds.features.ClassLabel(num_classes=2)
}),
supervised_keys=('image', 'label'),
homepage='',
citation="""@inproceedings{
Kim*2020Disentangling,
title={Disentangling neural mechanisms for perceptual grouping},
author={Junkyung Kim* and Drew Linsley* and Kalpit Thakkar and Thomas Serre},
booktitle={International Conference on Learning Representations},
year={2020},
url={https://openreview.net/forum?id=HJxrVA4FDS}
}""",
)
def _split_generators(self, dl_manager):
"""Downloads the data and defines the splits."""
return [
tfds.core.SplitGenerator(
name='easy', gen_kwargs={'file_pattern': 'curv_baseline'}),
tfds.core.SplitGenerator(
name='intermediate',
gen_kwargs={'file_pattern': 'curv_contour_length_9'}),
tfds.core.SplitGenerator(
name='hard', gen_kwargs={'file_pattern': 'curv_contour_length_14'})
]
def _build_pcollection(self, pipeline, file_pattern):
"""Generate examples as dicts."""
beam = tfds.core.lazy_imports.apache_beam
def _generate_examples(file_path):
"""Read the input data out of the source files."""
example_id = 0
meta_examples = tf.io.read_file(file_path).numpy().decode('utf-8').split(
'\n')[:-1]
print(meta_examples)
for m_example in meta_examples:
m_example = m_example.split(' ')
image_path = os.path.join(ORIGINAL_DATA_DIR_32, file_pattern,
m_example[0], m_example[1])
example_id += 1
yield '_'.join([m_example[0], m_example[1],
str(example_id)]), {
'image': image_path,
'label': int(m_example[3]),
}
meta_file_pathes = tf.io.gfile.glob(
os.path.join(ORIGINAL_DATA_DIR_32, file_pattern, 'metadata/*.npy'))
print(len(meta_file_pathes))
return (pipeline
| 'Create' >> beam.Create(meta_file_pathes)
| 'Generate' >> beam.ParDo(_generate_examples))
class Pathfinder64(tfds.core.BeamBasedBuilder):
"""Pathfinder TFDS builder (where the resolution is 64).
The data for this dataset was generated using the script in
https://github.com/drewlinsley/pathfinder with the default parameters, while
followings being customized:
```
args.padding = 1
args.antialias_scale = 4
args.paddle_margin_list = [1]
args.seed_distance = 12
args.window_size = [64,64]
args.marker_radius = 2.5
args.contour_length = 14
args.paddle_thickness = 1
args.antialias_scale = 2
args.continuity = 1.8 # from 1.8 to 0.8, with steps of 66%
args.distractor_length = args.contour_length / 3
args.num_distractor_snakes = 22 / args.distractor_length
args.snake_contrast_list = [0.8]
```
"""
VERSION = tfds.core.Version('1.0.0')
def _info(self):
return tfds.core.DatasetInfo(
builder=self,
description=('This is a builder for pathfinder challenge dataset'),
features=tfds.features.FeaturesDict({
'image': tfds.features.Image(),
'label': tfds.features.ClassLabel(num_classes=2)
}),
supervised_keys=('image', 'label'),
homepage='',
citation="""@inproceedings{
Kim*2020Disentangling,
title={Disentangling neural mechanisms for perceptual grouping},
author={Junkyung Kim* and Drew Linsley* and Kalpit Thakkar and Thomas Serre},
booktitle={International Conference on Learning Representations},
year={2020},
url={https://openreview.net/forum?id=HJxrVA4FDS}
}""",
)
def _split_generators(self, dl_manager):
"""Downloads the data and defines the splits."""
return [
tfds.core.SplitGenerator(
name='easy', gen_kwargs={'file_pattern': 'curv_baseline'}),
tfds.core.SplitGenerator(
name='intermediate',
gen_kwargs={'file_pattern': 'curv_contour_length_9'}),
tfds.core.SplitGenerator(
name='hard', gen_kwargs={'file_pattern': 'curv_contour_length_14'})
]
def _build_pcollection(self, pipeline, file_pattern):
"""Generate examples as dicts."""
beam = tfds.core.lazy_imports.apache_beam
def _generate_examples(file_path):
"""Read the input data out of the source files."""
example_id = 0
meta_examples = tf.io.read_file(file_path).numpy().decode('utf-8').split(
'\n')[:-1]
print(meta_examples)
for m_example in meta_examples:
m_example = m_example.split(' ')
image_path = os.path.join(ORIGINAL_DATA_DIR_64, file_pattern,
m_example[0], m_example[1])
example_id += 1
yield '_'.join([m_example[0], m_example[1],
str(example_id)]), {
'image': image_path,
'label': int(m_example[3]),
}
meta_file_pathes = tf.io.gfile.glob(
os.path.join(ORIGINAL_DATA_DIR_64, file_pattern, 'metadata/*.npy'))
print(len(meta_file_pathes))
return (pipeline
| 'Create' >> beam.Create(meta_file_pathes)
| 'Generate' >> beam.ParDo(_generate_examples))
class Pathfinder128(tfds.core.BeamBasedBuilder):
"""Pathfinder TFDS builder (where the resolution is 128).
The data for this dataset was generated using the script in
https://github.com/drewlinsley/pathfinder with the default parameters, while
followings being customized:
```
args.padding = 1
args.antialias_scale = 4
args.paddle_margin_list = [2,3]
args.seed_distance = 20
args.window_size = [128,128]
args.marker_radius = 3
args.contour_length = 14
args.paddle_thickness = 1.5
args.antialias_scale = 2
args.continuity = 1.8 # from 1.8 to 0.8, with steps of 66%
args.distractor_length = args.contour_length / 3
args.num_distractor_snakes = 35 / args.distractor_length
args.snake_contrast_list = [0.9]
```
"""
VERSION = tfds.core.Version('1.0.0')
def _info(self):
return tfds.core.DatasetInfo(
builder=self,
description=('This is a builder for pathfinder challenge dataset'),
features=tfds.features.FeaturesDict({
'image': tfds.features.Image(),
'label': tfds.features.ClassLabel(num_classes=2)
}),
supervised_keys=('image', 'label'),
homepage='',
citation="""@inproceedings{
Kim*2020Disentangling,
title={Disentangling neural mechanisms for perceptual grouping},
author={Junkyung Kim* and Drew Linsley* and Kalpit Thakkar and Thomas Serre},
booktitle={International Conference on Learning Representations},
year={2020},
url={https://openreview.net/forum?id=HJxrVA4FDS}
}""",
)
def _split_generators(self, dl_manager):
"""Downloads the data and defines the splits."""
return [
tfds.core.SplitGenerator(
name='easy', gen_kwargs={'file_pattern': 'curv_baseline'}),
tfds.core.SplitGenerator(
name='intermediate',
gen_kwargs={'file_pattern': 'curv_contour_length_9'}),
tfds.core.SplitGenerator(
name='hard', gen_kwargs={'file_pattern': 'curv_contour_length_14'})
]
def _build_pcollection(self, pipeline, file_pattern):
"""Generate examples as dicts."""
beam = tfds.core.lazy_imports.apache_beam
def _generate_examples(file_path):
"""Read the input data out of the source files."""
example_id = 0
meta_examples = tf.io.read_file(
file_path).numpy().decode('utf-8').split('\n')[:-1]
print(meta_examples)
for m_example in meta_examples:
m_example = m_example.split(' ')
image_path = os.path.join(ORIGINAL_DATA_DIR_128, file_pattern,
m_example[0], m_example[1])
example_id += 1
yield '_'.join([m_example[0], m_example[1], str(example_id)]), {
'image': image_path,
'label': int(m_example[3]),
}
meta_file_pathes = tf.io.gfile.glob(
os.path.join(ORIGINAL_DATA_DIR_128, file_pattern, 'metadata/*.npy'))
print(len(meta_file_pathes))
return (
pipeline
| 'Create' >> beam.Create(meta_file_pathes)
| 'Generate' >> beam.ParDo(_generate_examples)
)
class Pathfinder256(tfds.core.BeamBasedBuilder):
"""Pathfinder TFDS builder (where the resolution is 256).
The data for this dataset was generated using the script in
https://github.com/drewlinsley/pathfinder with the default parameters, while
followings being customized:
```
args.antialias_scale = 4
args.paddle_margin_list = [3]
args.window_size = [256,256]
args.marker_radius = 5
args.contour_length = 14
args.paddle_thickness = 2
args.antialias_scale = 2
args.continuity = 1.8
args.distractor_length = args.contour_length / 3
args.num_distractor_snakes = 30 / args.distractor_length
args.snake_contrast_list = [1.0]
```
"""
VERSION = tfds.core.Version('1.0.0')
def _info(self):
return tfds.core.DatasetInfo(
builder=self,
description=('This is a builder for pathfinder challenge dataset'),
features=tfds.features.FeaturesDict({
'image': tfds.features.Image(),
'label': tfds.features.ClassLabel(num_classes=2)
}),
supervised_keys=('image', 'label'),
homepage='',
citation="""@inproceedings{
Kim*2020Disentangling,
title={Disentangling neural mechanisms for perceptual grouping},
author={Junkyung Kim* and Drew Linsley* and Kalpit Thakkar and Thomas Serre},
booktitle={International Conference on Learning Representations},
year={2020},
url={https://openreview.net/forum?id=HJxrVA4FDS}
}""",
)
def _split_generators(self, dl_manager):
"""Downloads the data and defines the splits."""
return [
tfds.core.SplitGenerator(
name='easy', gen_kwargs={'file_pattern': 'curv_baseline'}),
tfds.core.SplitGenerator(
name='intermediate',
gen_kwargs={'file_pattern': 'curv_contour_length_9'}),
tfds.core.SplitGenerator(
name='hard', gen_kwargs={'file_pattern': 'curv_contour_length_14'})
]
def _build_pcollection(self, pipeline, file_pattern):
"""Generate examples as dicts."""
beam = tfds.core.lazy_imports.apache_beam
def _generate_examples(file_path):
"""Read the input data out of the source files."""
example_id = 0
meta_examples = tf.io.read_file(file_path).numpy().decode('utf-8').split(
'\n')[:-1]
print(meta_examples)
for m_example in meta_examples:
m_example = m_example.split(' ')
image_path = os.path.join(ORIGINAL_DATA_DIR_256, file_pattern,
m_example[0], m_example[1])
example_id += 1
yield '_'.join([m_example[0], m_example[1],
str(example_id)]), {
'image': image_path,
'label': int(m_example[3]),
}
meta_file_pathes = tf.io.gfile.glob(
os.path.join(ORIGINAL_DATA_DIR_256, file_pattern, 'metadata/*.npy'))
print(len(meta_file_pathes))
return (pipeline
| 'Create' >> beam.Create(meta_file_pathes)
| 'Generate' >> beam.ParDo(_generate_examples))
| transformer-ls-master | lra/datasets/long-range-arena/lra_benchmarks/data/pathfinder.py |
transformer-ls-master | lra/datasets/long-range-arena/lra_benchmarks/data/__init__.py |
|
# Copyright 2021 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# https://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generators for custom listops tasks."""
import csv
import random
from absl import app
from absl import flags
import numpy as np
import tensorflow.compat.v1 as tf
flags.DEFINE_string(
'task', default='basic',
help='Name of task to create.')
flags.DEFINE_integer(
'num_train_samples', default=96000,
help=('Number of train samples.'))
flags.DEFINE_integer(
'num_valid_samples', default=2000,
help=('Number of test samples.'))
flags.DEFINE_integer(
'num_test_samples', default=2000,
help=('Number of test samples.'))
flags.DEFINE_integer(
'max_depth', default=10,
help=('maximum tree depth of training sequences.'))
flags.DEFINE_integer(
'max_args', default=10,
help=('maximum number of arguments per operator in training sequences.'))
flags.DEFINE_integer(
'max_length', default=2000,
help=('maximum length per sequence in training sequences.'))
flags.DEFINE_integer(
'min_length', default=500,
help=('minimum length per sequence in training sequences.'))
flags.DEFINE_string(
'output_dir', default='output_dir',
help='Directory to output files.')
FLAGS = flags.FLAGS
MIN = '[MIN'
MAX = '[MAX'
MED = '[MED'
FIRST = '[FIRST'
LAST = '[LAST'
SUM_MOD = '[SM'
END = ']'
OPERATORS = [MIN, MAX, MED, SUM_MOD] # , FIRST, LAST]
VALUES = range(10)
VALUE_P = 0.25
def generate_tree(depth, max_depth, max_args):
"""Generate tree-like equations.
Args:
depth: current depth of the node, int.
max_depth: maximum depth of the tree, int.
max_args: maximum number of arguments per operator, int.
Returns:
The root node of a tree structure.
"""
if depth < max_depth:
r = random.random()
else:
r = 1
if r > VALUE_P:
value = random.choice(VALUES)
return value, 1
else:
length = 2
num_values = random.randint(2, max_args)
values = []
for _ in range(num_values):
sub_t, sub_l = generate_tree(depth + 1, max_depth, max_args)
values.append(sub_t)
length += sub_l
op = random.choice(OPERATORS)
t = (op, values[0])
for value in values[1:]:
t = (t, value)
t = (t, END)
return t, length
def to_string(t, parens=True):
if isinstance(t, str):
return t
elif isinstance(t, int):
return str(t)
else:
if parens:
return '( ' + to_string(t[0]) + ' ' + to_string(t[1]) + ' )'
def to_value(t):
"""Compute the output of equation t.
Args:
t: a tree structure that represents equation t, list.
Returns:
The result of equation t, int.
"""
if not isinstance(t, tuple):
return t
l = to_value(t[0])
r = to_value(t[1])
if l in OPERATORS: # Create an unsaturated function.
return (l, [r])
elif r == END: # l must be an unsaturated function.
if l[0] == MIN:
return min(l[1])
elif l[0] == MAX:
return max(l[1])
elif l[0] == FIRST:
return l[1][0]
elif l[0] == LAST:
return l[1][-1]
elif l[0] == MED:
return int(np.median(l[1]))
elif l[0] == SUM_MOD:
return np.sum(l[1]) % 10
elif isinstance(l, tuple):
# We've hit an unsaturated function and an argument.
return (l[0], l[1] + [r])
def write_to_file(data, fp):
"""Write to file output."""
tf.logging.info(type(data))
tf.logging.info('Writing {} samples to {}'.format(len(data), fp + '.tsv'))
with tf.io.gfile.GFile(fp + '.tsv', 'w+') as f:
writer = csv.writer(f, delimiter='\t')
writer.writerow(['Source', 'Target'])
writer.writerows(data)
def main(argv):
if len(argv) > 1:
raise app.UsageError('Too many command-line arguments.')
tf.logging.info('Start dataset construction')
data = set()
num_samples = FLAGS.num_train_samples \
+ FLAGS.num_test_samples + FLAGS.num_valid_samples
while len(data) < num_samples:
tree, length = generate_tree(1, FLAGS.max_depth, FLAGS.max_args)
if length > FLAGS.min_length and length < FLAGS.max_length:
data.add(tree)
if len(data) % 1000 == 0:
tf.logging.info('Processed {}'.format(len(data)))
print('Processed {}'.format(len(data)))
train = []
for example in data:
train.append([to_string(example), to_value(example)])
tf.logging.info('Finished running dataset construction')
val = train[FLAGS.num_train_samples:]
test = val[FLAGS.num_valid_samples:]
val = val[:FLAGS.num_valid_samples]
train = train[:FLAGS.num_train_samples]
tf.logging.info('Dataset size: %d/%d/%d' % (len(train), len(val), len(test)))
write_to_file(train, FLAGS.output_dir + '/{}_train'.format(FLAGS.task))
write_to_file(val, FLAGS.output_dir + '/{}_val'.format(FLAGS.task))
write_to_file(test, FLAGS.output_dir + '/{}_test'.format(FLAGS.task))
tf.logging.info('Finished writing all to file')
if __name__ == '__main__':
app.run(main)
| transformer-ls-master | lra/datasets/long-range-arena/lra_benchmarks/data/listops.py |
# Copyright 2021 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# https://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Input pipeline for the listops dataset."""
import numpy as np
import tensorflow.compat.v1 as tf
import tensorflow_datasets as tfds
AUTOTUNE = tf.data.experimental.AUTOTUNE
def preprocess_dataset(file_path, batch_size):
"""Preprocess dataset."""
tf.logging.info(file_path)
sel_cols = ['Source', 'Target']
col_defaults = [tf.string, tf.int32]
ds = tf.data.experimental.make_csv_dataset([file_path],
batch_size,
column_defaults=col_defaults,
select_columns=sel_cols,
field_delim='\t',
header=True,
num_epochs=1)
ds = ds.unbatch()
return ds
def get_datasets(n_devices,
task_name,
data_dir=None,
batch_size=256,
max_length=512):
"""Get algorithmic datasets."""
if batch_size % n_devices:
raise ValueError("Batch size %d isn't divided evenly by n_devices %d" %
(batch_size, n_devices))
train_path = data_dir + task_name + '_train.tsv'
val_path = data_dir + task_name + '_val.tsv'
test_path = data_dir + task_name + '_test.tsv'
train_dataset = preprocess_dataset(train_path, batch_size)
val_dataset = preprocess_dataset(val_path, batch_size)
test_dataset = preprocess_dataset(test_path, batch_size)
tf.logging.info('Finished preprocessing')
tf.logging.info('Building vocab')
# build vocab
vocab_set = set()
tokenizer = tfds.deprecated.text.Tokenizer()
for i, data in enumerate(train_dataset):
examples = data['Source']
examples = tokenizer.tokenize(examples.numpy())
examples = np.reshape(examples, (-1)).tolist()
vocab_set.update(examples)
if i % 1000 == 0:
tf.logging.info('Processed {}'.format(i))
if i > 1000:
break
vocab_set = list(set(vocab_set))
tf.logging.info('Finished processing vocab size={}'.format(len(vocab_set)))
encoder = tfds.deprecated.text.TokenTextEncoder(vocab_set)
def tf_encode(x):
result = tf.py_function(lambda s: tf.constant(encoder.encode(s.numpy())), [
x,
], tf.int32)
result.set_shape([None])
return result
def tokenize(d):
return {
'inputs': tf_encode(d['Source'])[:max_length],
'targets': d['Target']
}
train_dataset = train_dataset.map(tokenize, num_parallel_calls=AUTOTUNE)
val_dataset = val_dataset.map(tokenize, num_parallel_calls=AUTOTUNE)
test_dataset = test_dataset.map(tokenize, num_parallel_calls=AUTOTUNE)
max_shape = {'inputs': [max_length], 'targets': []}
# train_dataset = train_dataset.shuffle(
# buffer_size=1024, reshuffle_each_iteration=True).padded_batch(
# batch_size, padded_shapes=max_shape)
train_dataset = train_dataset.padded_batch(batch_size, padded_shapes=max_shape)
val_dataset = val_dataset.padded_batch(batch_size, padded_shapes=max_shape)
test_dataset = test_dataset.padded_batch(batch_size, padded_shapes=max_shape)
train_dataset = train_dataset.prefetch(tf.data.experimental.AUTOTUNE)
val_dataset = val_dataset.prefetch(tf.data.experimental.AUTOTUNE)
test_dataset = test_dataset.prefetch(tf.data.experimental.AUTOTUNE)
return train_dataset, val_dataset, test_dataset, encoder
| transformer-ls-master | lra/datasets/long-range-arena/lra_benchmarks/listops/input_pipeline.py |
# Copyright 2021 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# https://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Main training script for the listops task."""
import functools
import itertools
import json
import os
import time
from absl import app
from absl import flags
from absl import logging
from flax import jax_utils
from flax import nn
from flax import optim
from flax.metrics import tensorboard
from flax.training import checkpoints
from flax.training import common_utils
import jax
from jax import random
import jax.nn
import jax.numpy as jnp
from lra_benchmarks.listops import input_pipeline
from lra_benchmarks.models.transformer import transformer
from lra_benchmarks.utils import train_utils
from ml_collections import config_flags
import numpy as np
import tensorflow.compat.v2 as tf
FLAGS = flags.FLAGS
config_flags.DEFINE_config_file(
'config', None, 'Training configuration.', lock_config=True)
flags.DEFINE_string(
'model_dir', default=None, help='Directory to store model data.')
flags.DEFINE_string(
'task_name',
default='basic',
help='Name of the task used for load training/test data.')
flags.DEFINE_string(
'data_dir', default=None, help='Directory containing datasets.')
flags.DEFINE_bool(
'test_only', default=False, help='Run the evaluation on the test data.')
def create_model(key, flax_module, input_shape, model_kwargs):
"""Creates and initializes the model."""
@functools.partial(jax.jit, backend='cpu')
def _create_model(key):
module = flax_module.partial(**model_kwargs)
with nn.stochastic(key):
_, initial_params = module.init_by_shape(key,
[(input_shape, jnp.float32)])
model = nn.Model(module, initial_params)
return model
return _create_model(key)
def create_optimizer(model, learning_rate):
optimizer_def = optim.Adam(
learning_rate,
beta1=0.9,
beta2=0.98,
eps=1e-9,
weight_decay=FLAGS.config.weight_decay)
optimizer = optimizer_def.create(model)
return optimizer
def compute_metrics(logits, labels, weights):
"""Compute summary metrics."""
loss, weight_sum = train_utils.compute_weighted_cross_entropy(
logits, labels, num_classes=10, weights=weights)
acc, _ = train_utils.compute_weighted_accuracy(logits, labels, weights)
metrics = {
'loss': loss,
'accuracy': acc,
'denominator': weight_sum,
}
metrics = jax.lax.psum(metrics, 'batch')
return metrics
def train_step(optimizer, batch, learning_rate_fn, dropout_rng=None):
"""Perform a single training step."""
train_keys = ['inputs', 'targets']
(inputs, targets) = [batch.get(k, None) for k in train_keys]
# We handle PRNG splitting inside the top pmap, rather
# than handling it outside in the training loop - doing the
# latter can add some stalls to the devices.
dropout_rng, new_dropout_rng = random.split(dropout_rng)
def loss_fn(model):
"""Loss function used for training."""
with nn.stochastic(dropout_rng):
logits = model(inputs, train=True)
loss, weight_sum = train_utils.compute_weighted_cross_entropy(
logits, targets, num_classes=10, weights=None)
mean_loss = loss / weight_sum
return mean_loss, logits
step = optimizer.state.step
lr = learning_rate_fn(step)
grad_fn = jax.value_and_grad(loss_fn, has_aux=True)
(_, logits), grad = grad_fn(optimizer.target)
grad = jax.lax.pmean(grad, 'batch')
new_optimizer = optimizer.apply_gradient(grad, learning_rate=lr)
metrics = compute_metrics(logits, targets, None)
metrics['learning_rate'] = lr
return new_optimizer, metrics, new_dropout_rng
def eval_step(model, batch):
eval_keys = ['inputs', 'targets']
(inputs, targets) = [batch.get(k, None) for k in eval_keys]
logits = model(inputs, train=False)
return compute_metrics(logits, targets, None)
def tohost(x):
"""Collect batches from all devices to host and flatten batch dimensions."""
n_device, n_batch, *remaining_dims = x.shape
return np.array(x).reshape((n_device * n_batch,) + tuple(remaining_dims))
def main(argv):
if len(argv) > 1:
raise app.UsageError('Too many command-line arguments.')
tf.enable_v2_behavior()
config = FLAGS.config
logging.info('===========Config Dict============')
logging.info(config)
batch_size = config.batch_size
learning_rate = config.learning_rate
num_train_steps = config.num_train_steps
num_eval_steps = config.num_eval_steps
eval_freq = config.eval_frequency
random_seed = config.random_seed
model_type = config.model_type
model_kwargs = (
config.model_kwargs.to_dict() if 'model_kwargs' in config else {})
if jax.host_id() == 0:
summary_writer = tensorboard.SummaryWriter(
os.path.join(FLAGS.model_dir, 'summary'))
if batch_size % jax.device_count() > 0:
raise ValueError('Batch size must be divisible by the number of devices')
train_ds, eval_ds, test_ds, encoder = input_pipeline.get_datasets(
n_devices=jax.local_device_count(),
task_name=FLAGS.task_name,
data_dir=FLAGS.data_dir,
batch_size=batch_size,
max_length=config.max_length)
vocab_size = encoder.vocab_size
train_ds = train_ds.repeat()
train_iter = iter(train_ds)
max_length = config.max_length
input_shape = (batch_size, max_length)
model_kwargs.update({
'vocab_size': vocab_size,
'emb_dim': config.emb_dim,
'num_heads': config.num_heads,
'num_layers': config.num_layers,
'qkv_dim': config.qkv_dim,
'mlp_dim': config.mlp_dim,
'max_len': config.max_length,
'classifier': True,
'num_classes': 10
})
rng = random.PRNGKey(random_seed)
rng = jax.random.fold_in(rng, jax.host_id())
rng, init_rng = random.split(rng)
# We init the first set of dropout PRNG keys, but update it afterwards inside
# the main pmap'd training update for performance.
dropout_rngs = random.split(rng, jax.local_device_count())
if model_type == 'transformer':
model = create_model(init_rng, transformer.TransformerEncoder, input_shape,
model_kwargs)
else:
raise ValueError('Model type not supported')
optimizer = create_optimizer(model, learning_rate)
del model # Don't keep a copy of the initial model.
start_step = 0
if config.restore_checkpoints or FLAGS.test_only:
# Restore unreplicated optimizer + model state from last checkpoint.
optimizer = checkpoints.restore_checkpoint(FLAGS.model_dir, optimizer)
# Grab last step.
start_step = int(optimizer.state.step)
# Replicate optimizer.
optimizer = jax_utils.replicate(optimizer)
learning_rate_fn = train_utils.create_learning_rate_scheduler(
base_learning_rate=learning_rate)
p_train_step = jax.pmap(
functools.partial(train_step, learning_rate_fn=learning_rate_fn),
axis_name='batch')
p_eval_step = jax.pmap(eval_step, axis_name='batch')
# p_pred_step = jax.pmap(predict_step, axis_name='batch')
def run_eval(eval_ds, num_eval_steps=-1):
eval_metrics = []
eval_iter = iter(eval_ds)
if num_eval_steps == -1:
num_iter = itertools.count()
else:
num_iter = range(num_eval_steps)
for _, eval_batch in zip(num_iter, eval_iter):
# pylint: disable=protected-access
eval_batch = common_utils.shard(
jax.tree_map(lambda x: x._numpy(), eval_batch))
# pylint: enable=protected-access
metrics = p_eval_step(optimizer.target, eval_batch)
eval_metrics.append(metrics)
eval_metrics = common_utils.get_metrics(eval_metrics)
eval_metrics_sums = jax.tree_map(jnp.sum, eval_metrics)
eval_denominator = eval_metrics_sums.pop('denominator')
eval_summary = jax.tree_map(
lambda x: x / eval_denominator, # pylint: disable=cell-var-from-loop
eval_metrics_sums)
# Calculate (clipped) perplexity after averaging log-perplexities:
eval_summary['perplexity'] = jnp.clip(
jnp.exp(eval_summary['loss']), a_max=1.0e4)
return eval_summary
if FLAGS.test_only:
with tf.io.gfile.GFile(os.path.join(FLAGS.model_dir, 'results.json'),
'w') as f:
test_summary = run_eval(test_ds)
json.dump(jax.tree_map(lambda x: x.tolist(), test_summary), f)
return
metrics_all = []
tick = time.time()
for step, batch in zip(range(start_step, num_train_steps), train_iter):
batch = common_utils.shard(jax.tree_map(lambda x: x._numpy(), batch)) # pylint: disable=protected-access
optimizer, metrics, dropout_rngs = p_train_step(
optimizer, batch, dropout_rng=dropout_rngs)
metrics_all.append(metrics)
logging.info('train in step: %d', step)
# Save a Checkpoint
if ((step % config.checkpoint_freq == 0 and step > 0) or
step == num_train_steps - 1):
if jax.host_id() == 0 and config.save_checkpoints:
# Save unreplicated optimizer + model state.
checkpoints.save_checkpoint(FLAGS.model_dir,
jax_utils.unreplicate(optimizer), step)
# Periodic metric handling.
if step % eval_freq == 0 and step > 0:
metrics_all = common_utils.get_metrics(metrics_all)
lr = metrics_all.pop('learning_rate').mean()
metrics_sums = jax.tree_map(jnp.sum, metrics_all)
denominator = metrics_sums.pop('denominator')
summary = jax.tree_map(lambda x: x / denominator, metrics_sums) # pylint: disable=cell-var-from-loop
summary['learning_rate'] = lr
# Calculate (clipped) perplexity after averaging log-perplexities:
summary['perplexity'] = jnp.clip(jnp.exp(summary['loss']), a_max=1.0e4)
logging.info('train in step: %d, loss: %.4f', step, summary['loss'])
if jax.host_id() == 0:
tock = time.time()
steps_per_sec = eval_freq / (tock - tick)
tick = tock
summary_writer.scalar('steps per second', steps_per_sec, step)
for key, val in summary.items():
summary_writer.scalar(f'train_{key}', val, step)
summary_writer.flush()
# Reset metric accumulation for next evaluation cycle.
metrics_all = []
# Eval Metrics
eval_summary = run_eval(eval_ds, num_eval_steps)
logging.info('eval in step: %d, loss: %.4f, acc: %.4f', step,
eval_summary['loss'], eval_summary['accuracy'])
if jax.host_id() == 0:
for key, val in eval_summary.items():
summary_writer.scalar(f'eval_{key}', val, step)
summary_writer.flush()
if __name__ == '__main__':
app.run(main)
| transformer-ls-master | lra/datasets/long-range-arena/lra_benchmarks/listops/train.py |
# Copyright 2021 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# https://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base Configuration."""
import ml_collections
def get_config():
"""Get the default hyperparameter configuration."""
config = ml_collections.ConfigDict()
config.batch_size = 32
config.eval_frequency = 20
config.num_train_steps = 5000
config.num_eval_steps = 20
config.learning_rate = 0.05
config.weight_decay = 1e-1
config.max_target_length = 200 # ignored
config.max_eval_target_length = 200 # ignored
config.sampling_temperature = 0.6
config.sampling_top_k = 20
config.max_predict_token_length = 50
config.save_checkpoints = True
config.restore_checkpoints = True
config.checkpoint_freq = 10000
config.random_seed = 0
config.prompt = ""
config.factors = "constant * linear_warmup * rsqrt_decay"
config.warmup = 1000
config.max_length = 2000
config.tied_weights = True
config.pooling_mode = "CLS"
# config.interaction = "NLI"
config.emb_dim = 512
config.num_heads = 8
config.num_layers = 6
config.qkv_dim = 512
config.mlp_dim = 2048
config.trial = 0 # dummy for repeated runs.
return config
| transformer-ls-master | lra/datasets/long-range-arena/lra_benchmarks/listops/configs/base_listops_config.py |
# Copyright 2021 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# https://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Configuration and hyperparameter sweeps."""
from lra_benchmarks.listops.configs import base_listops_config
def get_config():
"""Get the default hyperparameter configuration."""
config = base_listops_config.get_config()
config.model_type = "transformer"
return config
def get_hyper(hyper):
return hyper.product([])
| transformer-ls-master | lra/datasets/long-range-arena/lra_benchmarks/listops/configs/transformer_base.py |
from .model_wrapper import *
from .truncated_bptt_lm_task import *
from .loss import * | transformer-ls-master | autoregressive/model_lib/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from fairseq.modules.layer_norm import LayerNorm
from fairseq.modules.gelu import gelu
from .layer import ChunkedLSAttention
class PositionalEmbedding(nn.Module):
def __init__(self, demb):
super().__init__()
self.demb = demb
inv_freq = 1 / (10000 ** (torch.arange(0.0, demb, 2.0) / demb))
self.register_buffer("inv_freq", inv_freq)
def forward(self, pos_seq, bsz=None):
sinusoid_inp = torch.ger(pos_seq, self.inv_freq)
pos_emb = torch.cat([sinusoid_inp.sin(), sinusoid_inp.cos()], dim=-1)
if bsz is not None:
return pos_emb[None, :, :].expand(bsz, -1, -1)
else:
return pos_emb[None, :, :]
class FeedForwardLayer(nn.Module):
def __init__(self, d_model, d_inner, dropout, use_gelu):
nn.Module.__init__(self)
self.fc1 = nn.Linear(d_model, d_inner)
self.fc2 = nn.Linear(d_inner, d_model)
nn.init.xavier_uniform_(self.fc1.weight)
nn.init.xavier_uniform_(self.fc2.weight)
self.dropout = nn.Dropout(dropout)
self.use_gelu = use_gelu
def forward(self, h):
if self.use_gelu:
h1 = gelu(self.fc1(h))
else:
h1 = F.relu(self.fc1(h))
h1 = self.dropout(h1)
h2 = self.fc2(h1)
h2 = self.dropout(h2)
return h2
class TransformerLSLayer(nn.Module):
def __init__(self, d_model, d_inner, n_head, chunk_size, chunk_rank, window_len,
dropout, grad_chk, use_bias, pre_ln, use_gelu, probing):
nn.Module.__init__(self)
self.pre_ln = pre_ln
self.attn = ChunkedLSAttention(
d_model=d_model, n_head=n_head,
chunk_size=chunk_size, chunk_rank=chunk_rank, window_len=window_len,
dropout=dropout, grad_chk=grad_chk, use_bias=use_bias, probing=probing)
self.norm1 = LayerNorm(d_model, export=probing)
self.ff = FeedForwardLayer(d_model=d_model, d_inner=d_inner, dropout=dropout, use_gelu=use_gelu)
self.norm2 = LayerNorm(d_model, export=probing)
def forward(self, h, h_cache, key_pe, pos_embed_window, attn_mask=None, chunk_attn_mask=None):
# h = B x M x H
# h_cache = B x L x H
if self.pre_ln:
h = self.norm1(h)
h_cache = self.norm1(h_cache)
attn_out = self.attn(h, h_cache, key_pe, pos_embed_window, chunk_attn_mask)
if self.pre_ln:
h = h + attn_out
else:
h = self.norm1(h + attn_out) # B x M x H
if self.ff is not None:
if self.pre_ln:
h = self.norm2(h)
ff_out = self.ff(h)
if self.pre_ln:
out = h + ff_out # B x M x H
else:
out = self.norm2(h + ff_out) # B x M x H
else:
out = h
return out
class TransformerLSModel(nn.Module):
def __init__(
self,
vocab_size,
d_model,
d_inner,
n_head,
n_layer,
mem_len,
emb_dropout,
chunk_rank,
chunk_size,
window_len,
dropout,
use_bias,
pre_ln,
use_gelu,
grad_chk,
clamp_len,
cpos_clamp_len=-1,
probing=False,
):
nn.Module.__init__(self)
# token embeddings
self.in_emb = nn.Embedding(vocab_size, d_model)
nn.init.normal_(self.in_emb.weight, mean=0, std=d_model ** -0.5)
self.pos_emb = PositionalEmbedding(d_model)
# nn.init.uniform_(self.in_emb.weight, -0.01, 0.01)
self.out_emb = nn.Linear(d_model, vocab_size)
self.out_emb.weight = self.in_emb.weight
self.window_len = window_len
# Some knobs copied from Transformer XL
self.init = 'normal'
self.init_range = 0.01
self.proj_init_std = 0.01
self.init_std = 0.02
self.cpos_clamp_len = cpos_clamp_len
self.d_model = d_model
if emb_dropout > 0:
self.emb_dropout = nn.Dropout(emb_dropout)
else:
self.emb_dropout = None
self.chunk_size = chunk_size
self.chunk_rank = chunk_rank
self.layers = nn.ModuleList()
self.layers.extend(
TransformerLSLayer(
d_model=d_model,
d_inner=d_inner,
n_head=n_head,
chunk_rank=chunk_rank,
chunk_size=chunk_size,
window_len=window_len,
dropout=dropout,
use_bias=use_bias,
pre_ln=pre_ln,
use_gelu=use_gelu,
grad_chk=grad_chk,
probing=probing,
)
for _ in range(n_layer)
)
self.mem_len = mem_len
self.clamp_len = clamp_len
self.apply(self._init_weights)
def forward(self, x, h_cache, target=None):
# x size = B x M
padded = False
if self.chunk_size > 0 and (x.shape[1] % self.chunk_size ):
# or x.shape[1] % self.window_len
# usually happens at the end
# ad-hoc solution for the chunking issue during evaluation
orig_seqlen = x.shape[1]
pad_multip = abs(self.chunk_size * self.window_len) // math.gcd(self.chunk_size, self.window_len)
n_pad = pad_multip - x.shape[1] % pad_multip
x = F.pad(x, (0, n_pad))
padded = True
block_size = x.size(1)
h = self.in_emb(x) #.mul_(self.d_model ** 0.5) # B x M x H
h.mul_(self.d_model ** 0.5)
mlen = h_cache[0].shape[1]
klen = h.shape[1] + mlen
dec_attn_mask = None
pos_seq = torch.arange(self.window_len - 1, -1, -1.0, device=h.device, dtype=h.dtype)
n_chunk_vecs = klen // self.chunk_size * self.chunk_rank
n_chunks = klen // self.chunk_size
n_mem_chunks = mlen // self.chunk_size
chunk_attn_mask = torch.triu(h.new_ones((x.shape[1]//self.chunk_size, n_chunks), dtype=torch.bool), diagonal=n_mem_chunks)[
None, None, :, None, :, None]
chunk_attn_mask = chunk_attn_mask.expand(-1, -1, -1, -1, -1, self.chunk_rank).contiguous().view(1, 1, -1, 1, n_chunks*self.chunk_rank)
pos_chunk_ids = torch.arange(n_chunk_vecs - 1, -1, -1.0, device=h.device, dtype=h.dtype)
if self.cpos_clamp_len > 0:
pos_chunk_ids.clamp_(max=self.cpos_clamp_len)
pos_chunks = self.pos_emb(pos_chunk_ids)
if self.clamp_len > 0:
pos_seq.clamp_(max=self.clamp_len)
pos_emb = self.pos_emb(pos_seq)
if self.emb_dropout is not None:
h = self.emb_dropout(h)
pos_emb = self.emb_dropout(pos_emb)
h_cache_next = []
for l, layer in enumerate(self.layers):
cache_size = self.mem_len
if cache_size > block_size:
h_cache_next_l = torch.cat(
[h_cache[l][:, -cache_size + block_size :, :], h], dim=1
).detach()
else:
h_cache_next_l = h[:, -cache_size:, :].detach()
h_cache_next.append(h_cache_next_l)
h = layer(h, h_cache[l], pos_chunks, pos_emb, dec_attn_mask, chunk_attn_mask) # B x M x H
if self.emb_dropout is not None:
h = self.emb_dropout(h)
out = F.log_softmax(self.out_emb(h).float(), dim=-1).type_as(h)
dummy_loss = None
if padded:
out = out[:, :orig_seqlen]
return out, h_cache_next, dummy_loss
def get_aux_loss(self):
loss = 0.0
for layer in self.layers:
loss += layer.attn.attn.adaptive_span.get_loss()
return self.aux_loss_scaler * loss
def get_current_max_span(self):
max_span = 0.0
for layer in self.layers:
max_span = max(
max_span, layer.attn.attn.adaptive_span.get_current_max_span()
)
return max_span
def get_current_avg_span(self):
avg_span = 0.0
for layer in self.layers:
avg_span += layer.attn.attn.adaptive_span.get_current_avg_span()
return avg_span / len(self.layers)
def _init_weight(self, weight):
if self.init == "uniform":
nn.init.uniform_(weight, -self.init_range, self.init_range)
elif self.init == "normal":
nn.init.normal_(weight, 0.0, self.init_std)
def _init_bias(self, bias):
nn.init.constant_(bias, 0.0)
def _init_weights(self, m):
"""Initialize the weights."""
classname = m.__class__.__name__
if classname.find("Linear") != -1:
if hasattr(m, "weight") and m.weight is not None:
self._init_weight(m.weight)
if hasattr(m, "bias") and m.bias is not None:
self._init_bias(m.bias)
elif classname.find("Embedding") != -1:
if hasattr(m, "weight"):
self._init_weight(m.weight)
elif classname.find("LayerNorm") != -1:
if hasattr(m, "weight"):
nn.init.normal_(m.weight, 1.0, self.init_std)
if hasattr(m, "bias") and m.bias is not None:
self._init_bias(m.bias)
else:
hit = False
if hasattr(m, "r_emb"):
self._init_weight(m.r_emb)
hit = True
if hasattr(m, "r_w_bias"):
self._init_weight(m.r_w_bias)
hit = True
if hasattr(m, "r_r_bias"):
self._init_weight(m.r_r_bias)
hit = True
if hasattr(m, "r_bias"):
self._init_bias(m.r_bias)
hit = True
if not hit:
print("Missing {}".format(classname))
| transformer-ls-master | autoregressive/model_lib/model.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
from dataclasses import dataclass
from typing import Dict, List, Optional
import torch
from fairseq.dataclass import FairseqDataclass
from fairseq.models import (
FairseqIncrementalDecoder,
FairseqLanguageModel,
register_model,
)
from .model import TransformerLSModel
logger = logging.getLogger(__name__)
@dataclass
class TransformerLSConfig(FairseqDataclass):
# defaults come from https://github.com/facebookresearch/adaptive-span/blob/master/experiments/enwik8_small.sh
vocab_size: int = 50
d_model: int = 256
n_head: int = 4
d_inner: int = 1024
n_layer: int = 8
dropout: float = 0.0
emb_dropout: float = 0.0
chunk_rank: int = 1
chunk_size: int = 32
mem_len: int = 4096
window_len: int = 256
grad_chk: bool = False
pre_ln: bool = False
use_gelu: bool = False
use_bias: bool = False
clamp_len: int = -1
cpos_clamp_len: int = -1
probing: bool = False
@register_model("transformer-ls", dataclass=TransformerLSConfig)
class TransformerLS(FairseqLanguageModel):
@classmethod
def build_model(cls, cfg: TransformerLSConfig, task):
return cls(TransformerLSDecoder(cfg, task))
def get_aux_loss(self):
return self.decoder.get_aux_loss()
def get_current_max_span(self):
return self.decoder.get_current_max_span()
def get_current_avg_span(self):
return self.decoder.get_current_avg_span()
class TransformerLSDecoder(FairseqIncrementalDecoder):
def __init__(self, cfg, task):
super().__init__(task.target_dictionary)
self.config = cfg
config = TransformerLSConfig(
vocab_size=len(task.target_dictionary),
d_model=cfg.d_model,
n_head=cfg.n_head,
d_inner=cfg.d_inner,
n_layer=cfg.n_layer,
dropout=cfg.dropout,
emb_dropout=cfg.emb_dropout,
mem_len=cfg.mem_len,
chunk_rank=cfg.chunk_rank,
chunk_size=cfg.chunk_size,
window_len=cfg.window_len,
grad_chk=cfg.grad_chk,
pre_ln=cfg.pre_ln,
use_gelu=cfg.use_gelu,
use_bias=cfg.use_bias,
clamp_len=cfg.clamp_len,
cpos_clamp_len=cfg.cpos_clamp_len,
probing=cfg.probing,
)
logger.info(config)
del config.__dict__['_name']
self.model = TransformerLSModel(**config.__dict__)
self.cache_size = cfg.mem_len
self._mems = None
def forward(
self,
src_tokens,
incremental_state: Optional[Dict[str, List[torch.Tensor]]] = None,
encoder_out=None,
):
bsz = src_tokens.size(0)
if incremental_state is not None: # used during inference
mems = self.get_incremental_state("mems")
src_tokens = src_tokens[:, -1:] # only keep the most recent token
else:
mems = self._mems
if mems is None:
# first time init
mems = self.init_hid_cache(bsz)
output = self.model(x=src_tokens, h_cache=mems,)
if incremental_state is not None:
self.set_incremental_state(incremental_state, "mems", output[1])
else:
self._mems = output[1]
return (output[0],)
def init_hid_cache(self, batch_sz):
hid = []
for layer in self.model.layers:
param = next(self.model.parameters())
h = torch.zeros(
batch_sz,
self.cache_size,
self.config.d_model,
dtype=param.dtype,
device=param.device,
)
hid.append(h)
return hid
def get_aux_loss(self):
return self.model.get_aux_loss()
def get_current_max_span(self):
return self.model.get_current_max_span()
def get_current_avg_span(self):
return self.model.get_current_avg_span()
def reorder_incremental_state(
self,
incremental_state: Dict[str, Dict[str, Optional[torch.Tensor]]],
new_order: torch.Tensor,
):
"""Reorder incremental state.
This will be called when the order of the input has changed from the
previous time step. A typical use case is beam search, where the input
order changes between time steps based on the selection of beams.
"""
raise NotImplementedError("This is required for generation/beam search")
# mems = self.get_incremental_state(incremental_state, "mems")
# if mems is not None:
# new_mems = [mems_i.index_select(1, new_order) for mems_i in mems]
# self.set_incremental_state(incremental_state, "mems", new_mems)
| transformer-ls-master | autoregressive/model_lib/model_wrapper.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
from dataclasses import dataclass
import torch.nn.functional as F
from fairseq import metrics, utils
from fairseq.criterions import register_criterion
from fairseq.criterions.cross_entropy import CrossEntropyCriterion
from fairseq.dataclass import FairseqDataclass
from omegaconf import II
import pdb
@dataclass
class AdaptiveSpanCriterionConfig(FairseqDataclass):
sentence_avg: bool = II("optimization.sentence_avg")
@register_criterion("char_level_lm_loss", dataclass=AdaptiveSpanCriterionConfig)
class AdaptiveSpanCriterion(CrossEntropyCriterion):
def __init__(self, task, sentence_avg):
super().__init__(task, sentence_avg)
def forward(self, model, sample, reduce=True):
"""Compute the loss for the given sample.
Returns a tuple with three elements:
1) the loss here is summed, different from the adaptive span code
2) the sample size, which is used as the denominator for the gradient
3) logging outputs to display while training
"""
net_output = model(**sample["net_input"])
loss = self.compute_loss(
model, net_output, sample, reduce=reduce
)
sample_size = (
sample["target"].size(0) if self.sentence_avg else sample["ntokens"]
)
loss /= sample_size
total_loss = loss
sample_size = 1
logging_output = {
"loss": loss.data,
"ntokens": sample["ntokens"],
"nsentences": sample["target"].size(0),
"sample_size": sample_size,
"total_loss": total_loss.data,
}
return total_loss, sample_size, logging_output
def compute_loss(self, model, net_output, sample, reduce=True):
loss, _ = super().compute_loss(model, net_output, sample, reduce)
return loss
@staticmethod
def reduce_metrics(logging_outputs) -> None:
"""Aggregate logging outputs from data parallel training."""
loss_sum = sum(log.get("loss", 0) for log in logging_outputs)
ntokens = sum(log.get("ntokens", 0) for log in logging_outputs)
sample_size = sum(log.get("sample_size", 0) for log in logging_outputs)
total_loss_sum = sum(log.get("total_loss", 0) for log in logging_outputs)
# we divide by log(2) to convert the loss from base e to base 2
metrics.log_scalar(
"loss", loss_sum / sample_size / math.log(2), sample_size, round=3
)
# total loss contains the L1 norm on adaptive-span
metrics.log_scalar(
"total_loss",
total_loss_sum / sample_size / math.log(2),
sample_size,
round=3,
)
if sample_size != ntokens:
metrics.log_scalar(
"nll_loss", loss_sum / ntokens / math.log(2), ntokens, round=3
)
metrics.log_derived(
"ppl", lambda meters: utils.get_perplexity(meters["nll_loss"].avg)
)
else:
metrics.log_derived(
"ppl", lambda meters: utils.get_perplexity(meters["loss"].avg)
)
@staticmethod
def logging_outputs_can_be_summed() -> bool:
"""
Whether the logging outputs returned by `forward` can be summed
across workers prior to calling `reduce_metrics`. Setting this
to True will improves distributed training speed.
"""
return True
| transformer-ls-master | autoregressive/model_lib/loss.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Forked from https://github.com/pytorch/fairseq/blob/master/examples/truncated_bptt/truncated_bptt_lm_task.py
"""
import logging
import os
from dataclasses import dataclass, field
from typing import List, Optional, Tuple
import torch
from fairseq import utils
from fairseq.data import (
Dictionary,
TokenBlockDataset,
data_utils,
iterators,
)
from fairseq.dataclass import FairseqDataclass
from fairseq.distributed import utils as dist_utils
from fairseq.tasks import FairseqTask, register_task
from omegaconf import II
logger = logging.getLogger(__name__)
@dataclass
class TruncatedBPTTLMConfig(FairseqDataclass):
data: str = field(default="???", metadata={"help": "path to data directory"})
tokens_per_sample: int = field(
default=1024,
metadata={"help": "max number of tokens per sequence"},
)
batch_size: int = II("dataset.batch_size")
# Some models use *max_target_positions* to know how many positional
# embeddings to learn. We use II(...) to make it default to
# *tokens_per_sample*, but in principle there could be more positional
# embeddings than tokens in a single batch. This may also be irrelevant for
# custom model implementations.
max_target_positions: int = II("task.tokens_per_sample")
# these will be populated automatically if not provided
data_parallel_rank: Optional[int] = None
data_parallel_size: Optional[int] = None
@register_task("truncated_bptt_lm", dataclass=TruncatedBPTTLMConfig)
class TruncatedBPTTLMTask(FairseqTask):
def __init__(self, cfg: TruncatedBPTTLMConfig):
super().__init__(cfg)
if cfg.data_parallel_rank is None or cfg.data_parallel_size is None:
if torch.distributed.is_initialized():
cfg.data_parallel_rank = dist_utils.get_data_parallel_rank()
cfg.data_parallel_size = dist_utils.get_data_parallel_world_size()
else:
cfg.data_parallel_rank = 0
cfg.data_parallel_size = 1
# load the dictionary
paths = utils.split_paths(cfg.data)
assert len(paths) > 0
self.dictionary = Dictionary.load(os.path.join(paths[0], "dict.txt"))
logger.info("dictionary: {} types".format(len(self.dictionary)))
def load_dataset(self, split, epoch=1, combine=False, **kwargs):
"""Load a given dataset split (e.g., train, valid, test)"""
# support sharded datasets
paths = utils.split_paths(self.cfg.data)
assert len(paths) > 0
data_path = paths[(epoch - 1) % len(paths)]
split_path = os.path.join(data_path, split)
# each element of *data* will be a tensorized line from the original
# text dataset, similar to ``open(split_path).readlines()``
data = data_utils.load_indexed_dataset(
split_path, self.dictionary, combine=combine
)
if data is None:
raise FileNotFoundError(
"Dataset not found: {} ({})".format(split, split_path)
)
# this is similar to ``data.view(-1).split(tokens_per_sample)``
data = TokenBlockDataset(
data,
data.sizes,
block_size=self.cfg.tokens_per_sample,
pad=None, # unused
eos=None, # unused
break_mode="none",
)
self.datasets[split] = TruncatedBPTTDataset(
data=data,
bsz_per_shard=self.cfg.batch_size,
shard_id=self.cfg.data_parallel_rank,
num_shards=self.cfg.data_parallel_size,
)
def dataset(self, split):
return self.datasets[split]
def get_batch_iterator(
self, dataset, num_workers=0, epoch=1, data_buffer_size=0, **kwargs
):
return iterators.EpochBatchIterator(
dataset=dataset,
collate_fn=self._collate_fn,
num_workers=num_workers,
epoch=epoch,
buffer_size=data_buffer_size,
# we don't use the batching functionality from EpochBatchIterator;
# instead every item in *dataset* is a whole batch
batch_sampler=[[i] for i in range(len(dataset))],
disable_shuffling=True,
)
def _collate_fn(self, items: List[List[torch.Tensor]]):
# we don't use fairseq's batching functionality, so we expect a single
# Tensor of type List[torch.Tensor]
assert len(items) == 1
# item will have shape B x T (the last batch may have length < T)
id, item = items[0]
item = data_utils.collate_tokens(item, pad_idx=self.source_dictionary.pad())
B, T = item.size()
# shift item one position over and append a padding token for the target
target = torch.nn.functional.pad(
item[:, 1:], (0, 1, 0, 0), value=self.target_dictionary.pad()
)
# fairseq expects batches to have the following structure
return {
"id": torch.tensor([id]*item.size(0)),
"net_input": {
"src_tokens": item,
},
"target": target,
"nsentences": item.size(0),
"ntokens": item.numel(),
}
def build_dataset_for_inference(
self, src_tokens: List[torch.Tensor], src_lengths: List[int], **kwargs
) -> torch.utils.data.Dataset:
eos = self.source_dictionary.eos()
dataset = TokenBlockDataset(
src_tokens,
src_lengths,
block_size=None, # ignored for "eos" break mode
pad=self.source_dictionary.pad(),
eos=eos,
break_mode="eos",
)
class Dataset(torch.utils.data.Dataset):
def __getitem__(self, i):
item = dataset[i]
if item[-1] == eos:
# remove eos to support generating with a prefix
item = item[:-1]
return (i, [item])
def __len__(self):
return len(dataset)
return Dataset()
def inference_step(
self, generator, models, sample, prefix_tokens=None, constraints=None
):
with torch.no_grad():
if constraints is not None:
raise NotImplementedError
# SequenceGenerator doesn't use *src_tokens* directly, we need to
# pass the *prefix_tokens* argument instead.
if prefix_tokens is None and sample["net_input"]["src_tokens"].nelement():
prefix_tokens = sample["net_input"]["src_tokens"]
# begin generation with the end-of-sentence token
bos_token = self.source_dictionary.eos()
return generator.generate(
models, sample, prefix_tokens=prefix_tokens, bos_token=bos_token
)
def eval_lm_dataloader(
self,
dataset,
max_tokens: Optional[int] = 36000,
batch_size: Optional[int] = None,
max_positions: Optional[int] = None,
num_shards: int = 1,
shard_id: int = 0,
num_workers: int = 1,
data_buffer_size: int = 10,
context_window: int = 0,
):
if context_window > 0:
raise NotImplementedError(
"Transformer-XL doesn't need --context-window, try "
"--model-overrides '{\"mem_len\":42}' instead "
)
return self.get_batch_iterator(
dataset=dataset,
max_tokens=max_tokens,
max_sentences=batch_size,
max_positions=max_positions,
ignore_invalid_inputs=True,
num_shards=num_shards,
shard_id=shard_id,
num_workers=num_workers,
data_buffer_size=data_buffer_size,
).next_epoch_itr(shuffle=False)
@property
def source_dictionary(self):
return self.dictionary
@property
def target_dictionary(self):
return self.dictionary
class TruncatedBPTTDataset(torch.utils.data.Dataset):
def __init__(
self,
data: List[torch.Tensor], # ordered list of items
bsz_per_shard, # number of items processed per GPUs per forward
shard_id, # current GPU ID
num_shards, # number of GPUs
):
super().__init__()
self.data = data
def batchify(data, bsz):
# Work out how cleanly we can divide the dataset into bsz parts.
nbatch = data.size(0) // bsz
# Trim off any extra elements that wouldn't cleanly fit (remainders).
data = data.narrow(0, 0, nbatch * bsz)
# Evenly divide the data across the bsz batches.
data = data.view(bsz, -1).contiguous()
return data
# total number of sequences processed by all GPUs in each forward pass
global_batch_size = bsz_per_shard * num_shards
"""
With a 16 item dataset, bsz_per_shard=2 and num_shards=3,
*indices* might look like:
indices = [[0, 1],
[2, 3],
[4, 5],
[6, 7],
[8, 9],
[10, 11]]
The size of the TruncatedBPTTDataset instance will be 2,
and shard 1 will see items:
[(0, [data[4], data[6]]),
(1, [data[5], data[7]])]
"""
indices = batchify(torch.arange(len(data)), global_batch_size)
assert indices.size(0) == global_batch_size
self.my_indices = indices[
shard_id * bsz_per_shard : (shard_id + 1) * bsz_per_shard
]
assert self.my_indices.size(0) == bsz_per_shard
def __len__(self):
return self.my_indices.size(1)
def __getitem__(self, i) -> Tuple[int, List[torch.Tensor]]:
return (i, [self.data[idx] for idx in self.my_indices[:, i]])
| transformer-ls-master | autoregressive/model_lib/truncated_bptt_lm_task.py |
# Copyright (c) 2021 NVIDIA CORPORATION. Licensed under the MIT license.
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.checkpoint as cp
from fairseq.modules.layer_norm import LayerNorm
import pdb
class ChunkedLSAttention(nn.Module):
def __init__(self, d_model, n_head, chunk_size, chunk_rank, window_len, dropout,
grad_chk=False, use_bias=False, dp_attn=0,
probing=False):
nn.Module.__init__(self)
self.dropout = nn.Dropout(dropout)
self.dp_attn = nn.Dropout(dp_attn)
assert d_model % n_head == 0
assert chunk_size > 0
self.n_head = n_head
self.head_dim = d_model // n_head
self.window_len = window_len
self.chunk_rank = chunk_rank
self.chunk_size = chunk_size
self.n_head = n_head
self.d_h = d_model // n_head
self.d_model = d_model
self.dconv_1 = nn.Linear(d_model, n_head * chunk_rank)
self.r_net = nn.Linear(d_model, d_model, bias=False)
self.r_net_chunk = nn.Linear(d_model, d_model)
self.d_head = d_model // self.n_head
# Positional bias as in Transformer-XL.
self.r_r_bias = nn.Parameter(torch.FloatTensor(1, self.n_head, 1, self.d_head))
self.r_w_bias = nn.Parameter(torch.FloatTensor(1, self.n_head, 1, 1, self.d_head))
self.grad_chk = grad_chk
self.proj_query = nn.Linear(d_model, d_model, bias=use_bias)
nn.init.xavier_normal_(self.proj_query.weight)
self.proj_out = nn.Linear(d_model, d_model, bias=use_bias)
nn.init.xavier_normal_(self.proj_out.weight)
self.proj_val = nn.Linear(d_model, d_model, bias=use_bias)
nn.init.xavier_normal_(self.proj_val.weight)
self.proj_key = nn.Linear(d_model, d_model, bias=use_bias)
nn.init.xavier_normal_(self.proj_key.weight)
self.dual_ln_dproj = LayerNorm(d_model, export=probing)
self.dual_ln_win = LayerNorm(d_model, export=probing)
nn.init.zeros_(self.r_r_bias)
nn.init.zeros_(self.r_w_bias)
if use_bias:
nn.init.zeros_(self.proj_query.bias)
nn.init.zeros_(self.proj_out.bias)
nn.init.zeros_(self.proj_val.bias)
nn.init.zeros_(self.proj_key.bias)
def head_reshape(self, x):
K = self.n_head
D = self.head_dim
x = x.view(x.size()[:-1] + (K, D)) # B x (M+L) x K x D
x = x.transpose(1, 2).contiguous() # B x K x (M+L) x D
return x
def compute_scores(self, h_vecs):
# h_vecs: B x L x H
bsz = h_vecs.shape[0]
n_chunks = h_vecs.shape[1] // self.chunk_size
h_scores = self.dconv_1(h_vecs).view(bsz, n_chunks, self.chunk_size, self.n_head, self.chunk_rank)
# bsz x num_heads x n_chunks x chunk_rank x chunk_size
h_scores = h_scores.permute(0, 3, 1, 4, 2)
h_scores = F.softmax(h_scores.float(), dim=-1).type_as(h_scores)
return h_scores
def compress_chunks(self, h_vecs, h_scores):
# Reshape hvecs to be compatible with the weights
# h_vecs: B x L x H
bsz = h_vecs.shape[0]
n_chunks = h_vecs.shape[1] // self.chunk_size
# bsz x n_heads x n_chunks x chunk_size x d_h
h_vecs = h_vecs.view(-1, n_chunks, self.chunk_size, self.n_head, self.d_h).permute(0, 3, 1, 2, 4)
# bsz x n_heads x n_chunks x chunk_rank x d_h
h_vecs = h_scores.matmul(h_vecs).view(bsz, self.n_head, n_chunks * self.chunk_rank, self.d_h)
return h_vecs
def get_tiles(self, x, n_queries, transpose=False):
# input: bsz x win_bp_len x d
bsz, win_bp_len, d = x.shape
in_strides = x.stride()
out_strides = (in_strides[0], self.window_len*in_strides[1], in_strides[1], d//self.n_head, 1)
out_shape = (bsz, n_queries//self.window_len, 2*self.window_len, self.n_head, d//self.n_head)
x = x.as_strided(size=out_shape, stride=out_strides)
if transpose:
# shape: bsz x n_heads x n_queries//wlen x d//n_heads x 2*wlen
return x.permute(0, 3, 1, 4, 2)
else:
# shape: bsz x n_heads x n_queries//wlen x 2*wlen x d//n_heads
return x.permute(0, 3, 1, 2, 4)
def put_tiles(self, x):
# input: bsz x n_heads x bp_len x self.window_len
bsz, n_heads, bp_len, window_len = x.shape
if bp_len > window_len:
x = x.view(bsz, n_heads, bp_len//window_len, window_len, window_len)
out_size = (bsz, n_heads, bp_len//window_len, window_len, 2*window_len)
x = F.pad(x, (1, window_len))
else:
x = x.view(bsz, n_heads, 1, bp_len, window_len)
out_size = (bsz, n_heads, 1, bp_len, window_len + bp_len)
x = F.pad(x, (1, bp_len))
stride = x.stride()
out_stride = (stride[0], stride[1], stride[2], stride[3]-1, stride[4])
return x.as_strided(size=out_size, stride=out_stride)
def compute_pv(self, attn, val):
# attn: bsz x n_head x seqlen//wlen x wlen x 2*wlen
# val: bsz x n_head x seqlen//wlen x 2*wlen x d_h
bsz, n_head, chunks, wlen, _ = attn.shape
out = attn.matmul(val)
return out.view(bsz, n_head, int(chunks*wlen), -1)
def get_diagonals(self, attn):
# attn: bsz x n_heads x bp_len//self.window_len x self.window_len x 2*self.window_len
# takes the upper diagonal with length self.window_len from attn, ignoring the diagonal
bsz, n_heads, n_tiles, n_query, _ = attn.shape
out_size = (bsz, n_heads, n_tiles, n_query, self.window_len)
in_stride = attn.stride()
out_stride = (in_stride[0], in_stride[1], in_stride[2], in_stride[3]+1, 1)
return attn.as_strided(size=out_size, stride=out_stride, storage_offset=1).contiguous().view(
bsz, n_heads, -1, self.window_len)
def _rel_shift_chunked(self, x, chunk_size, chunk_rank):
# x: bsz x n_head x n_query x (n_chunks * chunk_rank)
# out: same size but shifted to the left, relative position encoding
bsz, n_head, n_query, n_c_vecs = x.shape
n_q_chunks = n_query // chunk_size
x = x.view(bsz, n_head, n_q_chunks, chunk_size, n_c_vecs).transpose(2, 3).contiguous()
x = F.pad(x, [0, chunk_rank])
p_stride = x.stride()
out_shape = list(x.shape)
out_shape[-1] -= chunk_rank
out_strides = (p_stride[0], p_stride[1], p_stride[2], p_stride[3]-chunk_rank, p_stride[4])
x = x.as_strided(size=out_shape, stride=out_strides, storage_offset=n_q_chunks*chunk_rank)
return x.transpose(2, 3).contiguous().view(bsz, n_head, n_query, n_c_vecs)
def attn(self, query, key_window, val_window, key_compressed, value_compressed,
pos_embed_chunks, pos_embed_window, chunk_attn_mask=None):
# query size = bsz x n_heads x M x H
# key, value sizes = bsz x (seq_len + cache_len) x (n_heads * H)
# key_compressed: bsz x n_heads x (M+L)//chunk_size*chunk_rank x H
bsz, n_heads, seq_len, d_h = query.shape
assert (self.window_len > 0 or self.chunk_size > 1)
query = query / math.sqrt(self.d_model // self.n_head)
# get the keys, values for the local window attention
if seq_len > self.window_len:
query_tile = query.view(bsz, n_heads, seq_len // self.window_len, self.window_len, d_h)
key_window = self.get_tiles(key_window, seq_len, transpose=True)
val_window = self.get_tiles(val_window, seq_len,
transpose=False) # bsz x n_heads x n_queries//wlen x 2*wlen x d//n_heads
else:
query_tile = query.view(bsz, n_heads, 1, seq_len, d_h)
key_window = key_window.view(bsz, -1, self.n_head, d_h).permute(0, 2, 3, 1)[:, :, None, :, :]
val_window = val_window.view(bsz, -1, self.n_head, d_h).permute(0, 2, 1, 3)[:, :, None, :, :]
# bsz x n_heads x bp_len//self.window_len x self.window_len x 2*self.window_len
attn_window = (query_tile+self.r_w_bias).matmul(key_window)
attn_window = self.get_diagonals(attn_window)
pos_trans = self.r_net(pos_embed_window).view(1, self.window_len, self.n_head, self.d_head).permute(0, 2, 3, 1)
attn_window_pos = (query+self.r_r_bias).matmul(pos_trans)
attn_window = attn_window + attn_window_pos
# Compute the long-range attention.
n_chunks = key_compressed.shape[2]
# compute attention from context
# bsz x n_heads x seq_len x (n_chunks*chunk_rank)
attn_cont = torch.matmul(query, key_compressed.transpose(-1, -2))
pos_chunks = self.r_net_chunk(pos_embed_chunks).view(1, n_chunks, self.n_head, self.d_head).permute(0, 2, 3, 1)
attn_pos = torch.matmul(query, pos_chunks) # B x H x M x L_pos
attn_pos = self._rel_shift_chunked(attn_pos, self.chunk_size, self.chunk_rank)
attn_compress = attn_cont + attn_pos
if chunk_attn_mask is not None:
attn_compress = attn_compress.view(
bsz, n_heads, seq_len//self.chunk_size, self.chunk_size, -1)
attn_compress = attn_compress.masked_fill(chunk_attn_mask, float('-inf'))
attn_compress = attn_compress.view(bsz, n_heads, seq_len, -1)
# Get the softmax score of both short-term and long-range attentions.
full_attn = torch.cat([attn_compress, attn_window], dim=3)
full_attn = F.softmax(full_attn.float(), dim=-1).type_as(full_attn)
full_attn = self.dp_attn(full_attn)
attn_compress = full_attn[:, :, :, :attn_compress.shape[3]]
attn_window = full_attn[:, :, :, attn_compress.shape[3]:]
attn_window = self.put_tiles(attn_window)
out = torch.matmul(attn_compress, value_compressed) \
+ self.compute_pv(attn_window, val_window)
return out
def forward(self, h, h_cache, key_pe, pos_embed_window, chunked_attn_mask=None):
if self.grad_chk:
out = cp.checkpoint(self.forward_, *[
h, h_cache, key_pe, pos_embed_window, chunked_attn_mask
])
else:
out = self.forward_(h, h_cache, key_pe, pos_embed_window, chunked_attn_mask)
return out
def forward_(self, h, h_cache, key_pe, pos_embed_window, chunked_attn_mask=None):
# h = bsz x seq_len x H
# h_cache = bsz x cache_len x H
bsz = h.size(0)
seqlen = h.size(1)
query = self.proj_query(h)
query = self.head_reshape(query)
# sequence length and cache length should be divisible by the chunk size
assert seqlen % self.chunk_size == 0 and h_cache.shape[1] % self.chunk_size == 0
cache_scores = self.compute_scores(h_cache)
h_cache_compressed = self.compress_chunks(h_cache, cache_scores)
# The projection for the cache can be compressed using dynamic projection
h_cache_merge = h_cache_compressed.view(
bsz, self.n_head, -1, self.d_h).transpose(1, 2).contiguous().view(
bsz, -1, self.d_model)
# Apply projections to the compressed sequence.
val_cache = self.proj_val(h_cache_merge)
key_cache = self.proj_key(h_cache_merge)
# DualLN (dproj)
key_cache = self.dual_ln_dproj(key_cache)
val_cache = self.dual_ln_dproj(val_cache)
val_cache = self.head_reshape(val_cache)
key_cache = self.head_reshape(key_cache)
# Apply window attention
val_window_bp = self.proj_val(h)
key_window_bp = self.proj_key(h)
# better using multipliers of 8
h_cache_win = h_cache[:, -self.window_len:]
key_cache_win = self.proj_key(h_cache_win)
val_cache_win = self.proj_val(h_cache_win)
key_window = torch.cat([key_cache_win, key_window_bp], dim=1)
val_window = torch.cat([val_cache_win, val_window_bp], dim=1)
# DualLN (window)
key_window = self.dual_ln_win(key_window)
val_window = self.dual_ln_win(val_window)
bp_scores = self.compute_scores(h)
# Compress the projeced keys and values.
val_bp_compressed = self.compress_chunks(val_window_bp, bp_scores)
key_bp_compressed = self.compress_chunks(key_window_bp, bp_scores)
# DualLN (dproj)
val_bp_compressed = self.dual_ln_dproj(
val_bp_compressed.transpose(1, 2).contiguous().view(bsz, -1, self.d_model))
key_bp_compressed = self.dual_ln_dproj(
key_bp_compressed.transpose(1, 2).contiguous().view(bsz, -1, self.d_model))
val_bp_compressed = self.head_reshape(val_bp_compressed)
key_bp_compressed = self.head_reshape(key_bp_compressed)
val_compressed = torch.cat([val_cache, val_bp_compressed], dim=2)
key_compressed = torch.cat([key_cache, key_bp_compressed], dim=2)
out = self.attn(query, key_window, val_window, key_compressed, val_compressed, key_pe, pos_embed_window, chunked_attn_mask) # B_K x M x D
out = out.transpose(1, 2).contiguous() # B x M x K x D
out = out.view(bsz, seqlen, -1) # B x M x K_D
out = self.proj_out(out)
out = self.dropout(out)
return out
| transformer-ls-master | autoregressive/model_lib/layer.py |
#!/usr/bin/env python
# coding=utf-8
"""Copied rom https://github.com/kimiyoung/transformer-xl/blob/master/prep_text8.py"""
import os
import sys
import zipfile
from io import open
if os.path.exists('train.txt'):
print('Tokenized text8 already exists - skipping processing')
sys.exit()
data = zipfile.ZipFile('text8.zip').extractall()
data = open('text8', 'r', encoding='utf-8').read()
print('Length of text8: {}'.format(len(data)))
num_test_chars = 5000000
train_data = data[: -2 * num_test_chars]
valid_data = data[-2 * num_test_chars: -num_test_chars]
test_data = data[-num_test_chars:]
for fn, part in [('train.txt', train_data), ('valid.txt', valid_data), ('test.txt', test_data)]:
print('{} will have {} bytes'.format(fn, len(part)))
print('- Tokenizing...')
# Change space ' ' to underscore '_'
part_str = ' '.join(['_' if c == ' ' else c for c in part.strip()])
print('- Writing...')
f = open(fn, 'w').write(part_str)
f = open(fn + '.raw', 'w', encoding='utf-8').write(part) | transformer-ls-master | autoregressive/data_prepro/prep_text8.py |
"""Unified script for classification problems, with distributed training support.
Adapted from https://github.com/microsoft/vision-longformer
"""
import wandb
import argparse
import logging
import os
import os.path as op
import sys
import torch
import torch.utils.data.distributed
from timm.data import Mixup
from models import build_model
from engine import train, validate
from loss import get_criterion
from utils.miscellaneous import mkdir, set_seed, config_iteration
from utils.comm import synchronize, get_rank
from optim import get_opt, get_lr_scheduler
from utils.checkpoint import Checkpointer
from utils.metric_logger import TensorboardLogger
from dat.loader import make_epoch_data_loader
from config import cfg
try:
this_file = __file__
except NameError:
this_file = sys.argv[0]
this_file = op.abspath(this_file)
logging.getLogger().setLevel(logging.INFO)
def main():
################### parse experiment settings #####################
parser = argparse.ArgumentParser(description='PyTorch for image cls')
parser.add_argument('--config-file',
default="",
metavar="FILE",
help="path to config file",
type=str,
)
parser.add_argument("--local_rank", type=int, default=-1,
help="For distributed training: local_rank")
parser.add_argument('--seed', type=int, default=0,
help='random seed')
parser.add_argument('--data', metavar='DIR', help='path to datasets',
default=os.getenv('PT_DATA_DIR', './datasets'))
parser.add_argument('--output_dir', type=str,
default=os.getenv('PT_OUTPUT_DIR', '/tmp'))
parser.add_argument('--wandb', default=False, action='store_true')
parser.add_argument('--clip-grad', default=-1, type=float)
parser.add_argument('--expname', default="default", type=str)
parser.add_argument('--resume-path', default='.', type=str)
parser.add_argument(
"opts",
help="Modify config options using the command-line",
default=None,
nargs=argparse.REMAINDER,
)
args = parser.parse_args()
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
device = 'cuda' if torch.cuda.is_available() else 'cpu'
assert (device == 'cuda')
# Setup CUDA, GPU & distributed training
args.num_gpus = int(
os.environ["WORLD_SIZE"]) if "WORLD_SIZE" in os.environ else 1
cfg.distributed = args.num_gpus > 1
if 'RANK' in os.environ:
args.local_rank = int(os.environ['LOCAL_RANK'])
args.rank = int(os.environ['RANK'])
if args.local_rank == -1:
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
args.n_gpu = torch.cuda.device_count()
else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
torch.distributed.init_process_group(backend='nccl', init_method='env://',
world_size=int(os.environ['WORLD_SIZE']), rank=int(os.environ["RANK"]))
args.n_gpu = 1
if int(os.environ['RANK']) == 0 and args.wandb:
# wandb.login(key=YOUR_KEY)
experiment = wandb.init(project="vision-vil", name=args.expname, entity='default', reinit=False,)
wandb.config.update(args)
else:
experiment = None
if args.local_rank == 0:
logging.info("args.n_gpu: {}".format(args.n_gpu))
# Set the random seed manually for reproducibility.
if args.seed != 0:
set_seed(args.seed, args.n_gpu)
cfg.DATA.PATH = args.data
cfg.OUTPUT_DIR = args.output_dir
##################### Data ############################
if args.local_rank == 0:
logging.info('==> Preparing data..')
testloaders = make_epoch_data_loader(cfg, is_train=False, drop_last=False,
is_distributed=cfg.distributed)
## fix warmup based on trainset size, batch size.
iteration = 0
if not cfg.EVALUATE:
trainloader = make_epoch_data_loader(cfg, is_train=True, drop_last=True,
is_distributed=cfg.distributed)
steps_per_epoch = len(trainloader)
cfg.SOLVER.STEPS_PER_EPOCH = steps_per_epoch
warmup = cfg.OPTIM.WARMUP * steps_per_epoch
cfg.OPTIM.WARMUP = warmup
cfg.SOLVER.MAX_ITER = steps_per_epoch * cfg.OPTIM.EPOCHS
# get the starting checkpoint's iteration
iteration = config_iteration(cfg.OUTPUT_DIR, steps_per_epoch)
if args.local_rank == 0:
logging.info("Experiment settings:")
logging.info(cfg)
if cfg.OUTPUT_DIR:
mkdir(cfg.OUTPUT_DIR)
# save full config to a file in output_dir for future reference
with open(os.path.join(cfg.OUTPUT_DIR, 'config.yaml'), 'w') as f:
f.write(str(cfg))
cfg.freeze()
# mix-up
aug = cfg.AUG
mixup_fn = Mixup(
mixup_alpha=aug.MIXUP, cutmix_alpha=aug.MIXCUT,
cutmix_minmax=aug.MIXCUT_MINMAX if aug.MIXCUT_MINMAX else None,
prob=aug.MIXUP_PROB, switch_prob=aug.MIXUP_SWITCH_PROB,
mode=aug.MIXUP_MODE, label_smoothing=cfg.LOSS.LABEL_SMOOTHING,
num_classes=cfg.DATA.NUM_CLASSES
) if aug.MIXUP_PROB > 0.0 else None
##################### Model ############################
net = build_model(cfg)
net = net.to(device)
n_params = 0
for p in net.parameters():
n_params += p.numel()
if args.local_rank == 0:
logging.info("**** Params: {}".format(n_params))
if not cfg.EVALUATE and cfg.AMP.ENABLED and cfg.AMP.MEMORY_FORMAT == 'nhwc':
if args.local_rank == 0:
logging.info('=> convert memory format to nhwc')
net.to(memory_format=torch.channels_last)
# multi-gpu training (should be after apex fp16 initialization)
if args.n_gpu > 1:
net = torch.nn.DataParallel(net)
if args.local_rank == 0:
logging.info("Number of GPUs: {}, using DaraParallel.".format(args.n_gpu))
# Distributed training (should be after apex fp16 initialization)
if args.local_rank != -1 and cfg.distributed:
process_group = torch.distributed.new_group(list(range(args.num_gpus)))
net = torch.nn.SyncBatchNorm.convert_sync_batchnorm(net, process_group)
net = torch.nn.parallel.DistributedDataParallel(
net, device_ids=[args.local_rank], output_device=args.local_rank,
)
if args.local_rank == 0:
logging.info("Number of GPUs: {}, using DistributedDaraParallel.".format(args.num_gpus))
##################### Loss function and optimizer ############################
criterion_eval = get_criterion(cfg, train=False)
criterion_eval.cuda()
optimizer = None
scheduler = None
if not cfg.EVALUATE:
criterion = get_criterion(cfg)
criterion.cuda()
optimizer = get_opt(cfg, net, resume=iteration>0)
scheduler = get_lr_scheduler(cfg, optimizer, last_iter=iteration)
##################### make a checkpoint ############################
best_acc = 0.0
checkpointer = Checkpointer(net, cfg.MODEL.ARCH, best_acc=best_acc,
optimizer=optimizer, scheduler=scheduler,
save_dir=cfg.OUTPUT_DIR,
is_test=cfg.EVALUATE,
only_save_last=cfg.ONLY_SAVE_LAST)
############## tensorboard writers #############################
tb_log_dir = os.path.join(args.output_dir, 'tf_logs')
train_tb_log_dir = os.path.join(tb_log_dir, 'train_logs')
task_names = [task_name.replace('.yaml', '').replace('/', '_')
for task_name in cfg.DATA.TEST]
test_tb_log_dirs = [os.path.join(tb_log_dir, '{}_logs'.format(
task_name)) for task_name in task_names]
train_meters = TensorboardLogger(
log_dir=train_tb_log_dir,
delimiter=" ",
)
test_meters = [
TensorboardLogger(
log_dir=test_tb_log_dir,
delimiter=" ",
) for test_tb_log_dir in test_tb_log_dirs
]
if cfg.EVALUATE:
# first, load the model from the checkpoint
checkpointer.load(args.resume_path)
for task_name, testloader, test_meter in zip(task_names, testloaders, test_meters):
validate(testloader, net, criterion_eval, cfg,
test_meter, global_step=0, device=device,
local_rank=get_rank())
############## training code #############################
if not cfg.EVALUATE:
scaler = torch.cuda.amp.GradScaler(enabled=cfg.AMP.ENABLED)
# start from epoch 0 or last checkpoint epoch
start_epoch = checkpointer.epoch
for epoch in range(start_epoch, cfg.OPTIM.EPOCHS):
# wait for all processes before every epoch
synchronize()
if args.local_rank == 0:
logging.info("PROGRESS: {}%".format(
round(100 * epoch / cfg.OPTIM.EPOCHS, 4)))
global_step = epoch * len(trainloader)
# an empirical rule for redraw projects in Performer
if cfg.MODEL.ARCH.startswith('msvit') and cfg.MODEL.VIT.MSVIT.ATTN_TYPE == "performer":
if hasattr(net, 'module'):
net.module.feature_redraw_interval = 1 + 5 * epoch
else:
net.feature_redraw_interval = 1 + 5 * epoch
if cfg.MODEL.ARCH.startswith('msvit') and cfg.MODEL.VIT.MSVIT.ATTN_TYPE.startswith('longformer'):
vil_swith_mode = cfg.MODEL.VIT.MSVIT.VIL_MODE_SWITCH * cfg.OPTIM.EPOCHS
if cfg.MODEL.VIT.MSVIT.MODE > 0 and epoch >= vil_swith_mode:
# only reset random sample mode to full mode
if hasattr(net, 'module'):
net.module.reset_vil_mode(mode=0)
else:
net.reset_vil_mode(mode=0)
# train for one epoch
with torch.autograd.set_detect_anomaly(cfg.SOLVER.DETECT_ANOMALY):
train(trainloader, net, criterion, optimizer, scheduler, epoch,
cfg, train_meters, global_step=global_step, device=device,
mixup_fn=mixup_fn, scaler=scaler)
# evaluate on validation set
global_step = (epoch + 1) * len(trainloader)
accs = []
for task_name, testloader, test_meter in zip(task_names, testloaders, test_meters):
if args.local_rank == 0:
logging.info("Evaluating dataset: {}".format(task_name))
acc = validate(testloader, net, criterion_eval, cfg,
test_meter, global_step=global_step,
device=device,
local_rank=get_rank())
accs.append(acc)
# remember best prec@1 and save checkpoint
is_best = accs[0] > checkpointer.best_acc
if is_best:
checkpointer.best_acc = accs[0]
elif cfg.OPTIM.VAL and cfg.OPTIM.OPT in ['sgd', 'qhm', 'salsa']:
if args.local_rank == 0:
logging.info("DROPPING LEARNING RATE")
# Anneal the learning rate if no improvement has been seen in the validation dataset.
for group in optimizer.param_groups:
group['lr'] = group['lr'] * 1.0 / cfg.OPTIM.DROP_FACTOR
if cfg.OPTIM.OPT in ['salsa']:
optimizer.state['switched'] = True
if args.local_rank == 0:
logging.info("Switch due to overfiting!")
checkpointer.epoch = epoch + 1
checkpointer.save(is_best)
if experiment is not None and args.rank == 0:
# experiment.log({f'train_{k}': v for k, v in train_stats.items()}, epoch)
experiment.log({f'test_acc': accs[0], 'best_test_acc': checkpointer.best_acc}, epoch)
lr = optimizer.param_groups[0]['lr']
experiment.log({'lr': lr}, epoch)
wd = 0
for param_group in optimizer.param_groups:
if param_group['weight_decay'] > 0:
wd = param_group['weight_decay']
experiment.log({'wd': wd}, epoch)
# exactly evaluate the best checkpoint
# wait for all processes to complete before calculating the score
synchronize()
best_model_path = os.path.join(checkpointer.save_dir, "model_best.pth")
if os.path.isfile(best_model_path):
if args.local_rank == 0:
logging.info("Evaluating the best checkpoint: {}".format(best_model_path))
cfg.defrost()
cfg.EVALUATE = True
checkpointer.is_test = True
cfg.freeze()
for task_name, testloader, test_meter in zip(task_names, testloaders, test_meters):
if args.local_rank == 0:
logging.info("Evaluating dataset: {}".format(task_name))
validate(testloader, net, criterion_eval, cfg,
test_meter, global_step=cfg.SOLVER.MAX_ITER, device=device,
local_rank=get_rank())
# Close meters
train_meters.close()
for meter in test_meters:
meter.close()
return 0
if __name__ == '__main__':
# When run as script, modify path assuming absolute import
sys.path.append(op.join(op.dirname(this_file), '..'))
main()
| transformer-ls-master | imagenet/run_experiment_distributed.py |
"""
This file is from https://github.com/microsoft/vision-longformer
"""
import torch as th
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
def linear_combination(x, y, epsilon):
return epsilon*x + (1-epsilon)*y
def reduce_loss(loss, reduction='mean'):
return loss.mean() if reduction == 'mean' \
else loss.sum() if reduction == 'sum' else loss
class LabelSmoothingCrossEntropy(nn.Module):
def __init__(self, epsilon=0.1, reduction='mean'):
super().__init__()
self.epsilon = epsilon
self.reduction = reduction
def forward(self, preds, target):
n = preds.size()[-1]
log_preds = F.log_softmax(preds, dim=-1)
loss = reduce_loss(-log_preds.sum(dim=-1), self.reduction)
nll = F.nll_loss(log_preds, target, reduction=self.reduction)
return linear_combination(loss/n, nll, self.epsilon)
class FocalLoss(nn.Module):
"""
Origianl code is from https://github.com/richardaecn/class-balanced-loss/blob/master/src/cifar_main.py#L226-L266
"""
def __init__(self, alpha, gamma, normalize):
super().__init__()
self.alpha = alpha
self.gamma = gamma
self.normalize = normalize
def forward(self, preds, targets):
cross_entropy = F.binary_cross_entropy_with_logits(
preds, targets, reduction='none'
)
gamma = self.gamma
if gamma == 0.0:
modulator = 1.0
else:
modulator = th.exp(
-gamma * targets * preds - gamma * th.log1p(
th.exp(-1.0 * preds)
)
)
loss = modulator * cross_entropy
weighted_loss = self.alpha * loss
focal_loss = reduce_loss(weighted_loss, reduction='sum')
return focal_loss / targets.sum() if self.normalize else focal_loss
class MultiSoftmaxCrossEntropyLoss(nn.Module):
def __init__(self, class_weight=None, label_smoothing_value=0):
super(MultiSoftmaxCrossEntropyLoss, self).__init__()
self.class_weight = class_weight
if self.class_weight is not None:
self.class_weight = self.class_weight.cuda()
self.logsoftmax = nn.LogSoftmax(dim=1)
self.label_smoothing_value = label_smoothing_value
def forward(self, input, target):
return self.cross_entropy(input, target, self.class_weight)
def cross_entropy(self, pred, soft_targets, class_weight=None):
if class_weight is not None:
class_weight_matrix = class_weight.expand_as(soft_targets)
used_class_weights = th.where(
soft_targets > 0, class_weight_matrix, soft_targets
)
samples_weight = th.max(used_class_weights, dim=1, keepdim=True)[0]
loss = th.mean(
th.sum(
-samples_weight*soft_targets*self.logsoftmax(pred), 1
), 0
)
else:
if self.label_smoothing_value > 0:
# label smoothing
batch_size, total_classes_count = soft_targets.size()
for sample_index in range(batch_size):
pos_indices = np.where(soft_targets[sample_index, :] > 0)
pos_classes_count = len(pos_indices[0])
if pos_classes_count > 0:
neg_p = self.label_smoothing_value / float(total_classes_count - pos_classes_count)
pos_p = self.label_smoothing_value / pos_classes_count
soft_targets[sample_index, :] += neg_p
soft_targets[sample_index, pos_indices[0]] = soft_targets[sample_index, pos_indices[0]] - pos_p - neg_p
loss = th.sum(-soft_targets * self.logsoftmax(pred))
loss = loss / soft_targets.sum()
return loss
class SoftTargetCrossEntropy(nn.Module):
def __init__(self):
super(SoftTargetCrossEntropy, self).__init__()
def forward(self, x, target):
loss = th.sum(-target * F.log_softmax(x, dim=-1), dim=-1)
return loss.mean()
def get_criterion(config, train=True):
if config.AUG.MIXUP_PROB > 0.0 and config.LOSS.LOSS == 'xentropy':
criterion = SoftTargetCrossEntropy() \
if train else nn.CrossEntropyLoss()
elif config.LOSS.LABEL_SMOOTHING > 0.0 and config.LOSS.LOSS == 'xentropy':
criterion = LabelSmoothingCrossEntropy(config.LOSS.LABEL_SMOOTHING)
elif config.LOSS.LOSS == 'xentropy':
criterion = nn.CrossEntropyLoss()
elif config.LOSS.LOSS == 'sigmoid':
criterion = nn.MultiLabelSoftMarginLoss(reduction='sum')
elif config.LOSS.LOSS == 'focal':
alpha = config.LOSS.FOCAL.ALPHA
gamma = config.LOSS.FOCAL.GAMMA
normalize = config.LOSS.FOCAL.NORMALIZE
criterion = FocalLoss(alpha, gamma, normalize)
elif config.LOSS.LOSS == 'multisoftmax':
criterion = MultiSoftmaxCrossEntropyLoss()
elif config.LOSS.LOSS == 'bce':
criterion = nn.BCEWithLogitsLoss()
elif config.LOSS.LOSS == 'mse':
criterion = nn.MSELoss()
else:
raise ValueError('Unkown loss {}'.format(config.LOSS.LOSS))
return criterion
| transformer-ls-master | imagenet/loss.py |
"""
This file is from https://github.com/microsoft/vision-longformer
"""
import time
import logging
import torch
import os
import json
from torch.cuda.amp import autocast
from utils.comm import _accumulate_predictions_from_multiple_gpus, \
is_main_process
ONLY_OVERLAP_CLASSES = True
def compute_accuracy(output, target, topk=(1,), target_map=None):
"""Computes the precision@k for the specified values of k"""
with torch.no_grad():
if type(output) is not torch.Tensor:
# inception v3 model
output = output[0]
if target_map is not None and ONLY_OVERLAP_CLASSES:
overlap_classes = []
for key, val in target_map.items():
overlap_classes += val
output[:, overlap_classes] += output.max() - output.min() + 10
maxk = max(topk)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
if target_map is None:
correct = pred.eq(target.view(1, -1).expand_as(pred)).float()
else:
target_list = [target_map[t.item()] if t.item() in target_map else
[-1] for t in target]
correct = pred.new_zeros(pred.shape)
for i, tlist in enumerate(target_list):
for j in range(maxk):
correct[j, i] = pred[j, i] in tlist
res = []
for k in topk:
correct_k = (correct[:k].sum(0, keepdim=False) > 0).float() # (B, )
res.append(correct_k)
return torch.stack(res).t() # (B, k)
def output_metrics(scores, targets=None):
avg_scores = 100 * scores.mean(0, keepdim=False)
metrics = {'top1': avg_scores[0], 'top5': avg_scores[1]}
if targets is not None:
assert len(targets) == len(scores), "Length of scores and targets does not match!"
for label in range(targets.max().item() + 1):
label_avg_scores = 100 * scores[targets.view(-1) == label].mean(0, keepdim=False)
metrics.update({'top1/{}'.format(label): label_avg_scores[0],
'top5/{}'.format(label): label_avg_scores[1]})
return metrics
# Training
def train(train_loader, model, criterion, optimizer, scheduler, epoch,
cfg, meters, global_step=0, device='cuda', mixup_fn=None, scaler=None):
print('\nEpoch: %d' % epoch)
if cfg.distributed:
train_loader.sampler.set_epoch(epoch)
total_top1 = 0
total_top5 = 0
total_cnt = 0
end = time.time()
for i, batch in enumerate(train_loader):
image, target, img_id = batch[0], batch[1], batch[2:]
# compute output and record loss
image, target = image.to(device, non_blocking=True), target.to(device,
non_blocking=True)
if mixup_fn:
image, target = mixup_fn(image, target)
# measure data loading time
data_time = time.time() - end
# switch to train mode
model.train()
with autocast(enabled=cfg.AMP.ENABLED):
if cfg.AMP.ENABLED and cfg.AMP.MEMORY_FORMAT == 'nwhc':
image = image.contiguous(memory_format=torch.channels_last)
target = target.contiguous(memory_format=torch.channels_last)
output = model(image)
if cfg.MODEL.ARCH == 'inception_v3':
loss = 0.5 * (criterion(output[0], target) + criterion(output[1],
target))
else:
loss = criterion(output, target)
# compute gradient and do SGD step
optimizer.zero_grad()
scaler.scale(loss).backward()
if getattr(cfg, "clip_grad", -1) > 0:
gnorm = torch.nn.utils.clip_grad_norm_(model.parameters(), cfg.clip_grad)
else:
gnorm = -1
# closure function defined for line search used in SGD_SLS
def eval_loss():
# if cfg.ls_eval:
if cfg.OPTIM.LS.EVAL:
model.eval()
with torch.no_grad():
return criterion(model(image), target)
if cfg.OPTIM.OPT in ['salsa', 'ssls', 'slope']:
scaler.step(optimizer, closure=eval_loss)
else:
scaler.step(optimizer)
scaler.update()
# update scheduler
if scheduler and not cfg.SOLVER.EPOCH_BASED_SCHEDULE:
scheduler.step()
# measure and record accuracy
batch_cnt = image.size(0)
total_cnt += batch_cnt
if mixup_fn:
target = torch.argmax(target, dim=1)
if cfg.LOSS.LOSS == "xentropy":
precision = compute_accuracy(output, target, topk=(1, 5))
score = precision.sum(0, keepdim=False)
total_top1 += score[0].item()
total_top5 += score[1].item()
else:
raise ValueError("Only xentropy loss is supported!")
# measure elapsed time
batch_time = time.time() - end
end = time.time()
metrics_to_log = {
'time_info': {'compute': batch_time, 'data': data_time},
'batch_metrics': {'loss': loss, 'qa_cnt': float(batch_cnt),
'top1': 100 * score[0] / batch_cnt,
'top5': 100 * score[1] / batch_cnt}
}
params_to_log = {'params': {'lr': optimizer.param_groups[0]["lr"]}}
if cfg.OPTIM.OPT in ['salsa', 'sasa']:
params_to_log.update(
{
'stats': {'stats_x1d': optimizer.state['stats_x1d'],
'stats_ld2': optimizer.state['stats_ld2'],
'stats_mean': optimizer.state['stats_mean'],
'stats_lb': optimizer.state['stats_lb'],
'stats_ub': optimizer.state['stats_ub'], }
}
)
meters.update_metrics(metrics_to_log)
meters.update_params(params_to_log)
# only log once per cfg.LOG_FREQ param updates. adjust factor because pflug uses
# 3 batches to make 1 param update.
if (i + 1) % cfg.LOG_FREQ == 0:
logging.info(
meters.delimiter.join(
[
"iter: {iter}",
"max mem: {memory:.0f}",
]
).format(
iter=global_step + i + 1,
memory=torch.cuda.max_memory_allocated() / 1024.0 / 1024.0,
) + "\n " + meters.get_logs(global_step + i + 1)
)
# update scheduler
if scheduler and cfg.SOLVER.EPOCH_BASED_SCHEDULE:
scheduler.step()
train_metrics = torch.Tensor([total_top1, total_top5, total_cnt]).to(device)
if cfg.distributed:
torch.distributed.all_reduce(train_metrics)
top1 = 100 * train_metrics[0] / train_metrics[2]
top5 = 100 * train_metrics[1] / train_metrics[2]
logging.info(' * Prec@1 {top1:.3f} Prec@5 {top5:.3f} gnorm {gnorm:.2e}'
.format(top1=top1, top5=top5, gnorm=gnorm))
logging.info("Eval Score: %.3f" % top1)
meters.update_metrics(
{'epoch_metrics': {'total_cnt': float(train_metrics[2])},
'accuracy_metrics': {'top1': top1, 'top5': top5, }
}
)
logging.info(
meters.delimiter.join(
[
"iter: {iter}",
"max mem: {memory:.0f}",
]
).format(
iter=global_step + len(train_loader),
memory=torch.cuda.max_memory_allocated() / 1024.0 / 1024.0,
) + "\n " + meters.get_logs(int(global_step + len(train_loader)))
)
def validate(val_loader, model, criterion,
cfg, meters, global_step=0,
device='cuda', local_rank=-1):
# compute target to model output map from target file
target_map = None
if cfg.DATA.TARGETMAP:
target_file = os.path.join(cfg.DATA.PATH, cfg.DATA.TARGETMAP)
if os.path.isfile(target_file):
target_file = json.load(open(target_file))
target_file = {key: val[:val.rfind('(')] for key, val in target_file.items()}
if hasattr(val_loader.dataset,
'labelmap') and val_loader.dataset.labelmap is not None:
labelmap = val_loader.dataset.labelmap
target_map = {}
for objectname, objectid in labelmap.items():
target_map[objectid] = []
for imagenetname, objectalias in target_file.items():
if objectname == objectalias or objectname.startswith(objectalias + '('):
target_map[objectid].append(int(imagenetname))
else:
logging.warning(
"Given validation dataset does not have labelmap!")
else:
logging.warning("Given target map file {} does not exists!".format(
target_file))
# switch to evaluate mode
model.eval()
results_dict = {}
total_loss = 0
total_cnt = 0
total_top1 = 0
total_top5 = 0
dataset_len = len(val_loader.dataset)
with torch.no_grad():
start = time.time()
for i, batch in enumerate(val_loader):
image, target, img_id = batch[0], batch[1], batch[2:]
if len(img_id) == 0:
img_id = range(dataset_len * local_rank + total_cnt,
dataset_len * local_rank + total_cnt + image.size(
0))
else:
img_id = img_id[0].tolist()
image, target = image.to(device, non_blocking=True), target.to(
device, non_blocking=True)
with autocast(enabled=cfg.AMP.ENABLED):
if cfg.AMP.ENABLED and cfg.AMP.MEMORY_FORMAT == 'nwhc':
image = image.contiguous(memory_format=torch.channels_last)
target = target.contiguous(
memory_format=torch.channels_last)
# compute output and record loss
output = model(image)
loss = criterion(output, target)
total_loss += loss.item()
total_cnt += image.size(0)
# measure and record accuracy
if cfg.LOSS.LOSS == "xentropy":
precision = compute_accuracy(output, target, topk=(1, 5),
target_map=target_map) # B*2
score = precision.sum(0, keepdim=False)
total_top1 += score[0].item()
total_top5 += score[1].item()
if cfg.EVALUATE:
results_dict.update(
{im_id: (prec, label) for im_id, prec, label in
zip(img_id, precision.to(torch.device("cpu")), target.to(torch.device("cpu")))}
)
else:
raise ValueError("Only xentropy loss is supported!")
# measure elapsed time
total_time = time.time() - start
# measure epoch metrics
test_metrics = torch.Tensor([total_loss, total_time, total_cnt, total_top1, total_top5]).to(
device)
if cfg.distributed:
torch.distributed.all_reduce(test_metrics)
if cfg.EVALUATE:
results_dict = _accumulate_predictions_from_multiple_gpus(
results_dict, cfg.GATHER_ON_CPU,)
test_loss_gathered = test_metrics[0] / test_metrics[2]
test_time_gathered = test_metrics[1] / test_metrics[2]
metrics = {
'top1': 100 * test_metrics[3] / test_metrics[2],
'top5': 100 * test_metrics[4] / test_metrics[2]
}
output = metrics['top1'].item()
if not is_main_process():
# let the main process do the final computing
return output
if cfg.EVALUATE:
assert len(results_dict) == len(val_loader.dataset), \
"Number of gathered items {} does not match the dataset size {}!" .format(len(results_dict), len(val_loader.dataset))
scores = torch.stack([val[0] for key, val in results_dict.items()])
targets = torch.stack([val[1] for key, val in results_dict.items()])
metrics = output_metrics(scores, targets=targets if cfg.OUTPUT_PERCLASS_ACC else None)
logging.info("ACCURACY: {}%".format(metrics['top1']))
meters.update_metrics(
{'epoch_metrics': {'total_cnt': float(test_metrics[2]),
'loss': test_loss_gathered,
'time': test_time_gathered},
'accuracy_metrics': metrics
}
)
logging.info(
meters.delimiter.join(
[
"iter: {iter}",
"max mem: {memory:.0f}",
]
).format(
iter=global_step,
memory=torch.cuda.max_memory_allocated() / 1024.0 / 1024.0,
) + "\n " + meters.get_logs(int(global_step))
)
# save per image result
if cfg.EVALUATE and hasattr(val_loader.dataset, 'get_img_key'):
results_dict = {val_loader.dataset.get_img_key(key): val for key, val in results_dict.items()}
torch.save(results_dict, os.path.join(meters.tb_logger.logdir, 'results.pth'))
return output
| transformer-ls-master | imagenet/engine.py |
# Copyright (c) 2021 Microsoft Corporation. Licensed under the MIT license.
from .defaults import _C as cfg
| transformer-ls-master | imagenet/config/__init__.py |
# Copyright (c) 2021 Microsoft Corporation. Licensed under the MIT license.
# Written by Pengchuan Zhang, [email protected]
import os
from yacs.config import CfgNode as CN
# -----------------------------------------------------------------------------
# Config definition
# -----------------------------------------------------------------------------
_C = CN()
# training data augmentation
_C.INPUT = CN()
_C.INPUT.MEAN = [0.485, 0.456, 0.406]
_C.INPUT.STD = [0.229, 0.224, 0.225]
_C.INPUT.IMAGE_SIZE = 224 # 299 for inception_v3
_C.INPUT.CROP_PCT = 0.875 # 0.816 for inception_v3
_C.INPUT.INTERPOLATION = 2
_C.AMP = CN()
_C.AMP.ENABLED = False
_C.AMP.MEMORY_FORMAT = 'nchw'
# data augmentation
_C.AUG = CN()
_C.AUG.SCALE = (0.08, 1.0)
_C.AUG.RATIO = (3.0/4.0, 4.0/3.0)
_C.AUG.COLOR_JITTER = [0.4, 0.4, 0.4, 0.1, 0.0]
_C.AUG.GRAY_SCALE = 0.0
_C.AUG.GAUSSIAN_BLUR = 0.0
_C.AUG.DROPBLOCK_LAYERS = [3, 4]
_C.AUG.DROPBLOCK_KEEP_PROB = 1.0
_C.AUG.DROPBLOCK_BLOCK_SIZE = 7
_C.AUG.MIXUP_PROB = 0.0
_C.AUG.MIXUP = 0.0
_C.AUG.MIXCUT = 0.0
_C.AUG.MIXCUT_MINMAX = []
_C.AUG.MIXUP_SWITCH_PROB = 0.5
_C.AUG.MIXUP_MODE = 'batch'
_C.AUG.MIXCUT_AND_MIXUP = False
_C.AUG.REPEATED_AUG = False
_C.AUG.TIMM_AUG = CN(new_allowed=True)
_C.AUG.TIMM_AUG.USE_TRANSFORM = False
_C.DATA = CN()
# choices=['toy_ill', 'toy_well', 'mnist', 'cifar', 'cifar100', 'imagenet', 'wikitext-2', 'celeba']
_C.DATA.TRAIN = ('imagenet',)
_C.DATA.TEST = ('imagenet',)
_C.DATA.NUM_CLASSES = 1000
_C.DATA.TARGETMAP = ''
# path to datasets, default=os.getenv('PT_DATA_DIR', './datasets')
_C.DATA.PATH = "./datasets"
# path to other necessary data like checkpoints other than datasets.
_C.DATA.DATA_DIR = "./data"
# choices=['mse', 'xentropy', 'bce'], msr for least regression or xentropy for classification
_C.LOSS = CN()
_C.LOSS.LABEL_SMOOTHING = 0.0
_C.LOSS.LOSS = 'xentropy'
_C.LOSS.FOCAL = CN()
_C.LOSS.FOCAL.NORMALIZE = True
_C.LOSS.FOCAL.ALPHA = 1.0
_C.LOSS.FOCAL.GAMMA = 0.5
# dataloader
_C.DATALOADER = CN()
# batch size
_C.DATALOADER.BSZ = 128
# samples are drawn with replacement if yes
_C.DATALOADER.RE = False
# number of data loading workers
_C.DATALOADER.WORKERS = 0
# optimizer
_C.OPTIM = CN()
# optimizer, default='adamw'
_C.OPTIM.OPT = 'adamw'
# effective learning rate
_C.OPTIM.LR = 1.0
# effective momentum value
_C.OPTIM.MOM = 0.9
# nu value for qhm
_C.OPTIM.NU = 1.0
# weight decay lambda
_C.OPTIM.WD = 5e-4
_C.OPTIM.WD0 = 0.0
# Number of Epochs
_C.OPTIM.EPOCHS = 150
# Warm up: epochs of qhm before switching to sasa/salsa
_C.OPTIM.WARMUP = 0
# Drop frequency and factor for all methods
_C.OPTIM.DROP_FREQ = 50
_C.OPTIM.DROP_FACTOR = 10.0
# use validation dataset to adapt learning rate
_C.OPTIM.VAL = 0
_C.OPTIM.TEST_FREQ = 1000
# ADAM's default parameters
_C.OPTIM.ADAM = CN()
_C.OPTIM.ADAM.BETA1 = 0.9
_C.OPTIM.ADAM.BETA2 = 0.999
_C.OPTIM.ADAM.EPS = 1e-8
# LR scheduler
_C.SOLVER = CN()
_C.SOLVER.LR_POLICY = '' # multistep, cosine, linear
_C.SOLVER.WARMUP_FACTOR = 1.0 / 3
_C.SOLVER.WARMUP_EPOCHS = 5.0
_C.SOLVER.WARMUP_METHOD = "linear"
_C.SOLVER.MIN_LR = 0.0 # MAX_LR is _C.OPTIM.LR
_C.SOLVER.DETECT_ANOMALY = False
_C.SOLVER.EPOCH_BASED_SCHEDULE = False
_C.SOLVER.USE_LARC = False
# models
_C.MODEL = CN()
# choices=model_names + my_model_names + seq_model_names,
# help='model architecture: ' +
# ' | '.join(model_names + my_model_names + seq_model_names) +
# ' (default: resnet18)')
_C.MODEL.ARCH = 'msvit'
# nonlinearity, choices=['celu', 'softplus', 'gelu']
_C.MODEL.NONLINEARITY = 'celu'
# relative path of checkpoint relative to DATA_DIR
_C.MODEL.MODEL_PATH = ""
# use pre-trained model from torchvision
_C.MODEL.PRETRAINED = False
_C.MODEL.FREEZE_CONV_BODY_AT = -1
_C.MODEL.VIT = CN()
_C.MODEL.VIT.DROP = 0.0
_C.MODEL.VIT.DROP_PATH = 0.1
# Add LayerNorm in PatchEmbedding
_C.MODEL.VIT.NORM_EMBED = True
# Use average pooled feature instead of CLS token for classification head
_C.MODEL.VIT.AVG_POOL = False
_C.MODEL.VIT.MSVIT = CN()
# multi-scale model arch: see ReadMe.md for explanation
_C.MODEL.VIT.MSVIT.ARCH = 'l1,h3,d192,n1,s1,g1,p16,f7,a1_l2,h6,d384,n10,s0,g1,p2,f7,a1_l3,h12,d796,n1,s0,g1,p2,f7,a1'
# For vision longformer: whether to share the q/k/v projections of global and local tokens
_C.MODEL.VIT.MSVIT.SHARE_W = True
# choices=['full', 'longformerhand', 'longformerauto', 'linformer', 'srformer', 'performer', 'longformer_cuda']
_C.MODEL.VIT.MSVIT.ATTN_TYPE = 'longformerhand'
# For linformer: whether to share the projection matrices of key and value
_C.MODEL.VIT.MSVIT.SHARE_KV = True
# Only use global attention mechanism
_C.MODEL.VIT.MSVIT.ONLY_GLOBAL = False
# Three masking methods of longformer attention with sliding chunk implementation:
# 1: exact conv-like local attention
# 0: blockwise sliding chunk without padding
# -1: blockwise sliding chunk with cyclic padding
_C.MODEL.VIT.MSVIT.SW_EXACT = 0
# Customized LayerNorm eps
_C.MODEL.VIT.MSVIT.LN_EPS = 1e-6
# mode to control the sampling strategy of neighbor blocks
# 0: all 8 blocks; -1: no neighbor block; >0: random sample one block
_C.MODEL.VIT.MSVIT.MODE = 0
# Switching time from mode 1 to mode 0 during training
_C.MODEL.VIT.MSVIT.VIL_MODE_SWITCH = 0.75
# finetune setting
_C.FINETUNE = CN()
_C.FINETUNE.FINETUNE = False
_C.FINETUNE.USE_TRAIN_AUG = False
# ---------------------------------------------------------------------------- #
# Misc options
# ---------------------------------------------------------------------------- #
# default=os.getenv('PT_OUTPUT_DIR', '/tmp')
_C.OUTPUT_DIR = "/tmp"
# default=os.getenv('PHILLY_LOG_DIRECTORY', None)
_C.BACKUP_LOG_DIR = ""
_C.LOG_FREQ = 10
# evaluate model on validation set
_C.EVALUATE = False
_C.OUTPUT_PERCLASS_ACC = False
# Only save the last checkpoint in the checkpointer
_C.ONLY_SAVE_LAST = 0
_C.DISTRIBUTED_BACKEND = "nccl" # could be "nccl", "gloo" or "mpi"
# whether to use CPU to do gather of predictions. Note that this requires
# running with "gloo" (or "mpi") distributed backend
_C.GATHER_ON_CPU = False | transformer-ls-master | imagenet/config/defaults.py |
transformer-ls-master | imagenet/dat/__init__.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.