text
stringlengths 5
22M
| id
stringlengths 12
177
| metadata
dict | __index_level_0__
int64 0
1.37k
|
---|---|---|---|
import pytest
import matchzoo as mz
@pytest.mark.cron
def test_load_data():
train_data = mz.datasets.wiki_qa.load_data('train', task='ranking')
assert len(train_data) == 20360
train_data, _ = mz.datasets.wiki_qa.load_data('train',
task='classification',
return_classes=True)
assert len(train_data) == 20360
dev_data = mz.datasets.wiki_qa.load_data('dev', task='ranking',
filtered=False)
assert len(dev_data) == 2733
dev_data, tag = mz.datasets.wiki_qa.load_data('dev', task='classification',
filtered=True,
return_classes=True)
assert len(dev_data) == 1126
assert tag == [False, True]
test_data = mz.datasets.wiki_qa.load_data('test', task='ranking',
filtered=False)
assert len(test_data) == 6165
test_data, tag = mz.datasets.wiki_qa.load_data('test',
task='classification',
filtered=True,
return_classes=True)
assert len(test_data) == 2341
assert tag == [False, True]
@pytest.mark.cron
def test_load_snli():
train_data, classes = mz.datasets.snli.load_data('train',
'classification',
return_classes=True)
num_samples = 549361
assert len(train_data) == num_samples
x, y = train_data.unpack()
assert len(x['text_left']) == num_samples
assert len(x['text_right']) == num_samples
assert y.shape == (num_samples, 1)
assert classes == ['entailment', 'contradiction', 'neutral']
dev_data, classes = mz.datasets.snli.load_data('dev', 'classification',
return_classes=True)
assert len(dev_data) == 9842
assert classes == ['entailment', 'contradiction', 'neutral']
test_data, classes = mz.datasets.snli.load_data('test', 'classification',
return_classes=True)
assert len(test_data) == 9824
assert classes == ['entailment', 'contradiction', 'neutral']
train_data = mz.datasets.snli.load_data('train', 'ranking')
x, y = train_data.unpack()
assert len(x['text_left']) == num_samples
assert len(x['text_right']) == num_samples
assert y.shape == (num_samples, 1)
@pytest.mark.cron
def test_load_quora_qp():
train_data = mz.datasets.quora_qp.load_data(task='classification')
assert len(train_data) == 363177
dev_data, tag = mz.datasets.quora_qp.load_data(
'dev',
task='classification',
return_classes=True)
assert tag == [False, True]
assert len(dev_data) == 40371
x, y = dev_data.unpack()
assert len(x['text_left']) == 40371
assert len(x['text_right']) == 40371
assert y.shape == (40371, 1)
test_data = mz.datasets.quora_qp.load_data('test')
assert len(test_data) == 390965
dev_data = mz.datasets.quora_qp.load_data('dev', 'ranking')
x, y = dev_data.unpack()
assert y.shape == (40371, 1)
|
ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/tests/test_datasets.py/0
|
{
"file_path": "ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/tests/test_datasets.py",
"repo_id": "ContextualSP",
"token_count": 1742
}
| 261 |
<jupyter_start><jupyter_code>%run init.ipynb
ranking_task = mz.tasks.Ranking(losses=mz.losses.RankCrossEntropyLoss(num_neg=10))
ranking_task.metrics = [
mz.metrics.NormalizedDiscountedCumulativeGain(k=3),
mz.metrics.NormalizedDiscountedCumulativeGain(k=5),
mz.metrics.MeanAveragePrecision()
]
preprocessor = mz.models.DRMM.get_default_preprocessor()
train_pack_processed = preprocessor.fit_transform(train_pack_raw)
dev_pack_processed = preprocessor.transform(dev_pack_raw)
test_pack_processed = preprocessor.transform(test_pack_raw)
preprocessor.context
glove_embedding = mz.datasets.embeddings.load_glove_embedding(dimension=300)
term_index = preprocessor.context['vocab_unit'].state['term_index']
embedding_matrix = glove_embedding.build_matrix(term_index)
l2_norm = np.sqrt((embedding_matrix * embedding_matrix).sum(axis=1))
embedding_matrix = embedding_matrix / l2_norm[:, np.newaxis]
histgram_callback = mz.dataloader.callbacks.Histogram(
embedding_matrix, bin_size=30, hist_mode='LCH'
)
trainset = mz.dataloader.Dataset(
data_pack=train_pack_processed,
mode='pair',
num_dup=5,
num_neg=10,
callbacks=[histgram_callback]
)
testset = mz.dataloader.Dataset(
data_pack=test_pack_processed,
callbacks=[histgram_callback]
)
padding_callback = mz.models.DRMM.get_default_padding_callback()
trainloader = mz.dataloader.DataLoader(
device='cpu',
dataset=trainset,
batch_size=20,
stage='train',
resample=True,
callback=padding_callback
)
testloader = mz.dataloader.DataLoader(
dataset=testset,
batch_size=20,
stage='dev',
callback=padding_callback
)
model = mz.models.DRMM()
model.params['task'] = ranking_task
model.params['mask_value'] = 0
model.params['embedding'] = embedding_matrix
model.params['hist_bin_size'] = 30
model.params['mlp_num_layers'] = 1
model.params['mlp_num_units'] = 10
model.params['mlp_num_fan_out'] = 1
model.params['mlp_activation_func'] = 'tanh'
model.build()
print(model)
print('Trainable params: ', sum(p.numel() for p in model.parameters() if p.requires_grad))
optimizer = torch.optim.Adadelta(model.parameters())
trainer = mz.trainers.Trainer(
device='cpu',
model=model,
optimizer=optimizer,
trainloader=trainloader,
validloader=testloader,
validate_interval=None,
epochs=10
)
trainer.run()<jupyter_output><empty_output>
|
ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/tutorials/ranking/drmm.ipynb/0
|
{
"file_path": "ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/tutorials/ranking/drmm.ipynb",
"repo_id": "ContextualSP",
"token_count": 937
}
| 262 |
# MIT License
#
# Copyright (c) 2019 seq2struct contributors and Microsoft Corporation
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os
import dataclasses
import json
from typing import Optional, Tuple, List, Iterable
import networkx as nx
from pydantic.dataclasses import dataclass
from pydantic.main import BaseConfig
from third_party.spider import evaluation
from third_party.spider.preprocess.schema import get_schemas_from_json, Schema
@dataclass
class SpiderTable:
id: int
name: List[str]
unsplit_name: str
orig_name: str
columns: List["SpiderColumn"] = dataclasses.field(default_factory=list)
primary_keys: List[str] = dataclasses.field(default_factory=list)
@dataclass
class SpiderColumn:
id: int
table: Optional[SpiderTable]
name: List[str]
unsplit_name: str
orig_name: str
type: str
foreign_key_for: Optional[str] = None
SpiderTable.__pydantic_model__.update_forward_refs()
class SpiderSchemaConfig:
arbitrary_types_allowed = True
@dataclass(config=SpiderSchemaConfig)
class SpiderSchema(BaseConfig):
db_id: str
tables: Tuple[SpiderTable, ...]
columns: Tuple[SpiderColumn, ...]
foreign_key_graph: nx.DiGraph
orig: dict
@dataclass
class SpiderItem:
question: str
slml_question: Optional[str]
query: str
spider_sql: dict
spider_schema: SpiderSchema
db_path: str
orig: dict
def schema_dict_to_spider_schema(schema_dict):
tables = tuple(
SpiderTable(id=i, name=name.split(), unsplit_name=name, orig_name=orig_name,)
for i, (name, orig_name) in enumerate(
zip(schema_dict["table_names"], schema_dict["table_names_original"])
)
)
columns = tuple(
SpiderColumn(
id=i,
table=tables[table_id] if table_id >= 0 else None,
name=col_name.split(),
unsplit_name=col_name,
orig_name=orig_col_name,
type=col_type,
)
for i, ((table_id, col_name), (_, orig_col_name), col_type,) in enumerate(
zip(
schema_dict["column_names"],
schema_dict["column_names_original"],
schema_dict["column_types"],
)
)
)
# Link columns to tables
for column in columns:
if column.table:
column.table.columns.append(column)
for column_id in schema_dict["primary_keys"]:
# Register primary keys
column = columns[column_id]
column.table.primary_keys.append(column)
foreign_key_graph = nx.DiGraph()
for source_column_id, dest_column_id in schema_dict["foreign_keys"]:
# Register foreign keys
source_column = columns[source_column_id]
dest_column = columns[dest_column_id]
source_column.foreign_key_for = dest_column
foreign_key_graph.add_edge(
source_column.table.id,
dest_column.table.id,
columns=(source_column_id, dest_column_id),
)
foreign_key_graph.add_edge(
dest_column.table.id,
source_column.table.id,
columns=(dest_column_id, source_column_id),
)
db_id = schema_dict["db_id"]
return SpiderSchema(db_id, tables, columns, foreign_key_graph, schema_dict)
def load_tables(paths):
schemas = {}
eval_foreign_key_maps = {}
with open(paths, 'r',encoding='UTF-8') as f:
schema_dicts = json.load(f)
for schema_dict in schema_dicts:
db_id = schema_dict["db_id"]
if 'column_names_original' not in schema_dict: # {'table': [col.lower, ..., ]} * -> __all__
# continue
schema_dict["column_names_original"] = schema_dict["column_names"]
schema_dict["table_names_original"] = schema_dict["table_names"]
# assert db_id not in schemas
schemas[db_id] = schema_dict_to_spider_schema(schema_dict)
eval_foreign_key_maps[db_id] = evaluation.build_foreign_key_map(schema_dict)
return schemas, eval_foreign_key_maps
def load_original_schemas(tables_paths):
all_schemas = {}
schemas, db_ids, tables = get_schemas_from_json(tables_paths)
for db_id in db_ids:
all_schemas[db_id] = Schema(schemas[db_id], tables[db_id])
return all_schemas
|
ContextualSP/unified_parser_text_to_sql/semparse/sql/spider.py/0
|
{
"file_path": "ContextualSP/unified_parser_text_to_sql/semparse/sql/spider.py",
"repo_id": "ContextualSP",
"token_count": 2113
}
| 263 |
import json
class Schema:
"""
Simple schema which maps table&column to a unique identifier
"""
def __init__(self, schema, table):
self._schema = schema
self._table = table
self._idMap = self._map(self._schema, self._table)
@property
def schema(self):
return self._schema
@property
def idMap(self):
return self._idMap
def _map(self, schema, table):
if 'column_names_original' not in table: # {'table': [col.lower, ..., ]} * -> __all__
table["column_names_original"] = table["column_names"]
table["table_names_original"] = table["table_names"]
column_names_original = table["column_names_original"]
table_names_original = table["table_names_original"]
# print 'column_names_original: ', column_names_original
# print 'table_names_original: ', table_names_original
for i, (tab_id, col) in enumerate(column_names_original):
if tab_id == -1:
idMap = {"*": i}
else:
key = table_names_original[tab_id].lower()
val = col.lower()
idMap[key + "." + val] = i
for i, tab in enumerate(table_names_original):
key = tab.lower()
idMap[key] = i
return idMap
def _get_schemas_from_json(data: dict):
db_names = [db["db_id"] for db in data]
tables = {}
schemas = {}
for db in data:
db_id = db["db_id"]
schema = {}
if 'column_names_original' not in db:
db["column_names_original"] = db["column_names"]
db["table_names_original"] = db["table_names"]
column_names_original = db["column_names_original"]
table_names_original = db["table_names_original"]
tables[db_id] = {
"column_names_original": column_names_original,
"table_names_original": table_names_original,
}
for i, tabn in enumerate(table_names_original):
table = str(tabn.lower())
cols = [str(col.lower()) for td, col in column_names_original if td == i]
schema[table] = cols
schemas[db_id] = schema
return schemas, db_names, tables
def get_schemas_from_json(fpath):
with open(fpath, 'r',encoding='UTF-8') as f:
data = json.load(f)
return _get_schemas_from_json(data)
|
ContextualSP/unified_parser_text_to_sql/third_party/spider/preprocess/schema.py/0
|
{
"file_path": "ContextualSP/unified_parser_text_to_sql/third_party/spider/preprocess/schema.py",
"repo_id": "ContextualSP",
"token_count": 1080
}
| 264 |
MLP_RATIO: [[4.0, 4.0], [4.0, 4.0], [4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0,4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0], [4.0, 4.0]]
NUM_HEADS: [[3,3], [6,6], [12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12,12], [24,24]]
EMBED_DIM: [96,192,384,768]
DEPTHS: [ 2, 2, 18, 2 ]
WINDOW_SIZE: [[14,14], [14,14], [14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14], [14,14]]
|
Cream/AutoFormerV2/configs/S3-S.yaml/0
|
{
"file_path": "Cream/AutoFormerV2/configs/S3-S.yaml",
"repo_id": "Cream",
"token_count": 254
}
| 265 |
from .io import load, dump, register_handler
from .handlers import BaseFileHandler, JsonHandler, PickleHandler, YamlHandler
from .parse import list_from_file, dict_from_file
__all__ = [
'load', 'dump', 'register_handler', 'BaseFileHandler', 'JsonHandler',
'PickleHandler', 'YamlHandler', 'list_from_file', 'dict_from_file'
]
|
Cream/CDARTS/CDARTS_detection/mmcv/fileio/__init__.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_detection/mmcv/fileio/__init__.py",
"repo_id": "Cream",
"token_count": 111
}
| 266 |
from .collate import collate
from .data_container import DataContainer
from .data_parallel import MMDataParallel
from .distributed import MMDistributedDataParallel
from .scatter_gather import scatter, scatter_kwargs
__all__ = [
'collate', 'DataContainer', 'MMDataParallel', 'MMDistributedDataParallel',
'scatter', 'scatter_kwargs'
]
|
Cream/CDARTS/CDARTS_detection/mmcv/parallel/__init__.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_detection/mmcv/parallel/__init__.py",
"repo_id": "Cream",
"token_count": 107
}
| 267 |
from abc import ABCMeta, abstractmethod
from ..hook import Hook
class LoggerHook(Hook):
"""Base class for logger hooks.
Args:
interval (int): Logging interval (every k iterations).
ignore_last (bool): Ignore the log of last iterations in each epoch
if less than `interval`.
reset_flag (bool): Whether to clear the output buffer after logging.
"""
__metaclass__ = ABCMeta
def __init__(self, interval=10, ignore_last=True, reset_flag=False):
self.interval = interval
self.ignore_last = ignore_last
self.reset_flag = reset_flag
@abstractmethod
def log(self, runner):
pass
def before_run(self, runner):
for hook in runner.hooks[::-1]:
if isinstance(hook, LoggerHook):
hook.reset_flag = True
break
def before_epoch(self, runner):
runner.log_buffer.clear() # clear logs of last epoch
def after_train_iter(self, runner):
if self.every_n_inner_iters(runner, self.interval):
runner.log_buffer.average(self.interval)
elif self.end_of_epoch(runner) and not self.ignore_last:
# not precise but more stable
runner.log_buffer.average(self.interval)
if runner.log_buffer.ready:
self.log(runner)
if self.reset_flag:
runner.log_buffer.clear_output()
def arch_after_train_iter(self, runner):
if self.every_n_inner_iters(runner, self.interval):
runner.log_buffer.average(self.interval)
elif self.end_of_epoch(runner) and not self.ignore_last:
# not precise but more stable
runner.log_buffer.average(self.interval)
if runner.log_buffer.ready:
self.log(runner)
if self.reset_flag:
runner.log_buffer.clear_output()
def after_train_epoch(self, runner):
if runner.log_buffer.ready:
self.log(runner)
if self.reset_flag:
runner.log_buffer.clear_output()
def after_val_epoch(self, runner):
runner.log_buffer.average()
self.log(runner)
if self.reset_flag:
runner.log_buffer.clear_output()
|
Cream/CDARTS/CDARTS_detection/mmcv/runner/hooks/logger/base.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_detection/mmcv/runner/hooks/logger/base.py",
"repo_id": "Cream",
"token_count": 1003
}
| 268 |
import os
import os.path as osp
import sys
from pathlib import Path
import six
from .misc import is_str
if sys.version_info <= (3, 3):
FileNotFoundError = IOError
else:
FileNotFoundError = FileNotFoundError
def is_filepath(x):
if is_str(x) or isinstance(x, Path):
return True
else:
return False
def fopen(filepath, *args, **kwargs):
if is_str(filepath):
return open(filepath, *args, **kwargs)
elif isinstance(filepath, Path):
return filepath.open(*args, **kwargs)
def check_file_exist(filename, msg_tmpl='file "{}" does not exist'):
if not osp.isfile(filename):
raise FileNotFoundError(msg_tmpl.format(filename))
def mkdir_or_exist(dir_name, mode=0o777):
if dir_name == '':
return
dir_name = osp.expanduser(dir_name)
if six.PY3:
os.makedirs(dir_name, mode=mode, exist_ok=True)
else:
if not osp.isdir(dir_name):
os.makedirs(dir_name, mode=mode)
def symlink(src, dst, overwrite=True, **kwargs):
if os.path.lexists(dst) and overwrite:
os.remove(dst)
os.symlink(src, dst, **kwargs)
def _scandir_py35(dir_path, suffix=None):
for entry in os.scandir(dir_path):
if not entry.is_file():
continue
filename = entry.name
if suffix is None:
yield filename
elif filename.endswith(suffix):
yield filename
def _scandir_py(dir_path, suffix=None):
for filename in os.listdir(dir_path):
if not osp.isfile(osp.join(dir_path, filename)):
continue
if suffix is None:
yield filename
elif filename.endswith(suffix):
yield filename
def scandir(dir_path, suffix=None):
if suffix is not None and not isinstance(suffix, (str, tuple)):
raise TypeError('"suffix" must be a string or tuple of strings')
if sys.version_info >= (3, 5):
return _scandir_py35(dir_path, suffix)
else:
return _scandir_py(dir_path, suffix)
|
Cream/CDARTS/CDARTS_detection/mmcv/utils/path.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_detection/mmcv/utils/path.py",
"repo_id": "Cream",
"token_count": 895
}
| 269 |
from __future__ import division
import numpy as np
from mmcv.image import rgb2bgr
from mmcv.video import flowread
from .image import imshow
def flowshow(flow, win_name='', wait_time=0):
"""Show optical flow.
Args:
flow (ndarray or str): The optical flow to be displayed.
win_name (str): The window name.
wait_time (int): Value of waitKey param.
"""
flow = flowread(flow)
flow_img = flow2rgb(flow)
imshow(rgb2bgr(flow_img), win_name, wait_time)
def flow2rgb(flow, color_wheel=None, unknown_thr=1e6):
"""Convert flow map to RGB image.
Args:
flow (ndarray): Array of optical flow.
color_wheel (ndarray or None): Color wheel used to map flow field to
RGB colorspace. Default color wheel will be used if not specified.
unknown_thr (str): Values above this threshold will be marked as
unknown and thus ignored.
Returns:
ndarray: RGB image that can be visualized.
"""
assert flow.ndim == 3 and flow.shape[-1] == 2
if color_wheel is None:
color_wheel = make_color_wheel()
assert color_wheel.ndim == 2 and color_wheel.shape[1] == 3
num_bins = color_wheel.shape[0]
dx = flow[:, :, 0].copy()
dy = flow[:, :, 1].copy()
ignore_inds = (
np.isnan(dx) | np.isnan(dy) | (np.abs(dx) > unknown_thr) |
(np.abs(dy) > unknown_thr))
dx[ignore_inds] = 0
dy[ignore_inds] = 0
rad = np.sqrt(dx**2 + dy**2)
if np.any(rad > np.finfo(float).eps):
max_rad = np.max(rad)
dx /= max_rad
dy /= max_rad
[h, w] = dx.shape
rad = np.sqrt(dx**2 + dy**2)
angle = np.arctan2(-dy, -dx) / np.pi
bin_real = (angle + 1) / 2 * (num_bins - 1)
bin_left = np.floor(bin_real).astype(int)
bin_right = (bin_left + 1) % num_bins
w = (bin_real - bin_left.astype(np.float32))[..., None]
flow_img = (1 -
w) * color_wheel[bin_left, :] + w * color_wheel[bin_right, :]
small_ind = rad <= 1
flow_img[small_ind] = 1 - rad[small_ind, None] * (1 - flow_img[small_ind])
flow_img[np.logical_not(small_ind)] *= 0.75
flow_img[ignore_inds, :] = 0
return flow_img
def make_color_wheel(bins=None):
"""Build a color wheel.
Args:
bins(list or tuple, optional): Specify the number of bins for each
color range, corresponding to six ranges: red -> yellow,
yellow -> green, green -> cyan, cyan -> blue, blue -> magenta,
magenta -> red. [15, 6, 4, 11, 13, 6] is used for default
(see Middlebury).
Returns:
ndarray: Color wheel of shape (total_bins, 3).
"""
if bins is None:
bins = [15, 6, 4, 11, 13, 6]
assert len(bins) == 6
RY, YG, GC, CB, BM, MR = tuple(bins)
ry = [1, np.arange(RY) / RY, 0]
yg = [1 - np.arange(YG) / YG, 1, 0]
gc = [0, 1, np.arange(GC) / GC]
cb = [0, 1 - np.arange(CB) / CB, 1]
bm = [np.arange(BM) / BM, 0, 1]
mr = [1, 0, 1 - np.arange(MR) / MR]
num_bins = RY + YG + GC + CB + BM + MR
color_wheel = np.zeros((3, num_bins), dtype=np.float32)
col = 0
for i, color in enumerate([ry, yg, gc, cb, bm, mr]):
for j in range(3):
color_wheel[j, col:col + bins[i]] = color[j]
col += bins[i]
return color_wheel.T
|
Cream/CDARTS/CDARTS_detection/mmcv/visualization/optflow.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_detection/mmcv/visualization/optflow.py",
"repo_id": "Cream",
"token_count": 1533
}
| 270 |
import torch
from ..bbox import build_assigner, build_sampler, PseudoSampler
from ..utils import unmap, multi_apply
def calc_region(bbox, ratio, featmap_size=None):
"""Calculate a proportional bbox region.
The bbox center are fixed and the new h' and w' is h * ratio and w * ratio.
Args:
bbox (Tensor): Bboxes to calculate regions, shape (n, 4)
ratio (float): Ratio of the output region.
featmap_size (tuple): Feature map size used for clipping the boundary.
Returns:
tuple: x1, y1, x2, y2
"""
x1 = torch.round((1 - ratio) * bbox[0] + ratio * bbox[2]).long()
y1 = torch.round((1 - ratio) * bbox[1] + ratio * bbox[3]).long()
x2 = torch.round(ratio * bbox[0] + (1 - ratio) * bbox[2]).long()
y2 = torch.round(ratio * bbox[1] + (1 - ratio) * bbox[3]).long()
if featmap_size is not None:
x1 = x1.clamp(min=0, max=featmap_size[1] - 1)
y1 = y1.clamp(min=0, max=featmap_size[0] - 1)
x2 = x2.clamp(min=0, max=featmap_size[1] - 1)
y2 = y2.clamp(min=0, max=featmap_size[0] - 1)
return (x1, y1, x2, y2)
def ga_loc_target(gt_bboxes_list,
featmap_sizes,
anchor_scale,
anchor_strides,
center_ratio=0.2,
ignore_ratio=0.5):
"""Compute location targets for guided anchoring.
Each feature map is divided into positive, negative and ignore regions.
- positive regions: target 1, weight 1
- ignore regions: target 0, weight 0
- negative regions: target 0, weight 0.1
Args:
gt_bboxes_list (list[Tensor]): Gt bboxes of each image.
featmap_sizes (list[tuple]): Multi level sizes of each feature maps.
anchor_scale (int): Anchor scale.
anchor_strides ([list[int]]): Multi level anchor strides.
center_ratio (float): Ratio of center region.
ignore_ratio (float): Ratio of ignore region.
Returns:
tuple
"""
img_per_gpu = len(gt_bboxes_list)
num_lvls = len(featmap_sizes)
r1 = (1 - center_ratio) / 2
r2 = (1 - ignore_ratio) / 2
all_loc_targets = []
all_loc_weights = []
all_ignore_map = []
for lvl_id in range(num_lvls):
h, w = featmap_sizes[lvl_id]
loc_targets = torch.zeros(img_per_gpu,
1,
h,
w,
device=gt_bboxes_list[0].device,
dtype=torch.float32)
loc_weights = torch.full_like(loc_targets, -1)
ignore_map = torch.zeros_like(loc_targets)
all_loc_targets.append(loc_targets)
all_loc_weights.append(loc_weights)
all_ignore_map.append(ignore_map)
for img_id in range(img_per_gpu):
gt_bboxes = gt_bboxes_list[img_id]
scale = torch.sqrt((gt_bboxes[:, 2] - gt_bboxes[:, 0] + 1) *
(gt_bboxes[:, 3] - gt_bboxes[:, 1] + 1))
min_anchor_size = scale.new_full(
(1, ), float(anchor_scale * anchor_strides[0]))
# assign gt bboxes to different feature levels w.r.t. their scales
target_lvls = torch.floor(
torch.log2(scale) - torch.log2(min_anchor_size) + 0.5)
target_lvls = target_lvls.clamp(min=0, max=num_lvls - 1).long()
for gt_id in range(gt_bboxes.size(0)):
lvl = target_lvls[gt_id].item()
# rescaled to corresponding feature map
gt_ = gt_bboxes[gt_id, :4] / anchor_strides[lvl]
# calculate ignore regions
ignore_x1, ignore_y1, ignore_x2, ignore_y2 = calc_region(
gt_, r2, featmap_sizes[lvl])
# calculate positive (center) regions
ctr_x1, ctr_y1, ctr_x2, ctr_y2 = calc_region(
gt_, r1, featmap_sizes[lvl])
all_loc_targets[lvl][img_id, 0, ctr_y1:ctr_y2 + 1, ctr_x1:ctr_x2 +
1] = 1
all_loc_weights[lvl][img_id, 0, ignore_y1:ignore_y2 +
1, ignore_x1:ignore_x2 + 1] = 0
all_loc_weights[lvl][img_id, 0, ctr_y1:ctr_y2 + 1, ctr_x1:ctr_x2 +
1] = 1
# calculate ignore map on nearby low level feature
if lvl > 0:
d_lvl = lvl - 1
# rescaled to corresponding feature map
gt_ = gt_bboxes[gt_id, :4] / anchor_strides[d_lvl]
ignore_x1, ignore_y1, ignore_x2, ignore_y2 = calc_region(
gt_, r2, featmap_sizes[d_lvl])
all_ignore_map[d_lvl][img_id, 0, ignore_y1:ignore_y2 +
1, ignore_x1:ignore_x2 + 1] = 1
# calculate ignore map on nearby high level feature
if lvl < num_lvls - 1:
u_lvl = lvl + 1
# rescaled to corresponding feature map
gt_ = gt_bboxes[gt_id, :4] / anchor_strides[u_lvl]
ignore_x1, ignore_y1, ignore_x2, ignore_y2 = calc_region(
gt_, r2, featmap_sizes[u_lvl])
all_ignore_map[u_lvl][img_id, 0, ignore_y1:ignore_y2 +
1, ignore_x1:ignore_x2 + 1] = 1
for lvl_id in range(num_lvls):
# ignore negative regions w.r.t. ignore map
all_loc_weights[lvl_id][(all_loc_weights[lvl_id] < 0)
& (all_ignore_map[lvl_id] > 0)] = 0
# set negative regions with weight 0.1
all_loc_weights[lvl_id][all_loc_weights[lvl_id] < 0] = 0.1
# loc average factor to balance loss
loc_avg_factor = sum(
[t.size(0) * t.size(-1) * t.size(-2) for t in all_loc_targets]) / 200
return all_loc_targets, all_loc_weights, loc_avg_factor
def ga_shape_target(approx_list,
inside_flag_list,
square_list,
gt_bboxes_list,
img_metas,
approxs_per_octave,
cfg,
gt_bboxes_ignore_list=None,
sampling=True,
unmap_outputs=True):
"""Compute guided anchoring targets.
Args:
approx_list (list[list]): Multi level approxs of each image.
inside_flag_list (list[list]): Multi level inside flags of each image.
square_list (list[list]): Multi level squares of each image.
gt_bboxes_list (list[Tensor]): Ground truth bboxes of each image.
img_metas (list[dict]): Meta info of each image.
approxs_per_octave (int): number of approxs per octave
cfg (dict): RPN train configs.
gt_bboxes_ignore_list (list[Tensor]): ignore list of gt bboxes.
sampling (bool): sampling or not.
unmap_outputs (bool): unmap outputs or not.
Returns:
tuple
"""
num_imgs = len(img_metas)
assert len(approx_list) == len(inside_flag_list) == len(
square_list) == num_imgs
# anchor number of multi levels
num_level_squares = [squares.size(0) for squares in square_list[0]]
# concat all level anchors and flags to a single tensor
inside_flag_flat_list = []
approx_flat_list = []
square_flat_list = []
for i in range(num_imgs):
assert len(square_list[i]) == len(inside_flag_list[i])
inside_flag_flat_list.append(torch.cat(inside_flag_list[i]))
approx_flat_list.append(torch.cat(approx_list[i]))
square_flat_list.append(torch.cat(square_list[i]))
# compute targets for each image
if gt_bboxes_ignore_list is None:
gt_bboxes_ignore_list = [None for _ in range(num_imgs)]
(all_bbox_anchors, all_bbox_gts, all_bbox_weights, pos_inds_list,
neg_inds_list) = multi_apply(ga_shape_target_single,
approx_flat_list,
inside_flag_flat_list,
square_flat_list,
gt_bboxes_list,
gt_bboxes_ignore_list,
img_metas,
approxs_per_octave=approxs_per_octave,
cfg=cfg,
sampling=sampling,
unmap_outputs=unmap_outputs)
# no valid anchors
if any([bbox_anchors is None for bbox_anchors in all_bbox_anchors]):
return None
# sampled anchors of all images
num_total_pos = sum([max(inds.numel(), 1) for inds in pos_inds_list])
num_total_neg = sum([max(inds.numel(), 1) for inds in neg_inds_list])
# split targets to a list w.r.t. multiple levels
bbox_anchors_list = images_to_levels(all_bbox_anchors, num_level_squares)
bbox_gts_list = images_to_levels(all_bbox_gts, num_level_squares)
bbox_weights_list = images_to_levels(all_bbox_weights, num_level_squares)
return (bbox_anchors_list, bbox_gts_list, bbox_weights_list, num_total_pos,
num_total_neg)
def images_to_levels(target, num_level_anchors):
"""Convert targets by image to targets by feature level.
[target_img0, target_img1] -> [target_level0, target_level1, ...]
"""
target = torch.stack(target, 0)
level_targets = []
start = 0
for n in num_level_anchors:
end = start + n
level_targets.append(target[:, start:end].squeeze(0))
start = end
return level_targets
def ga_shape_target_single(flat_approxs,
inside_flags,
flat_squares,
gt_bboxes,
gt_bboxes_ignore,
img_meta,
approxs_per_octave,
cfg,
sampling=True,
unmap_outputs=True):
"""Compute guided anchoring targets.
This function returns sampled anchors and gt bboxes directly
rather than calculates regression targets.
Args:
flat_approxs (Tensor): flat approxs of a single image,
shape (n, 4)
inside_flags (Tensor): inside flags of a single image,
shape (n, ).
flat_squares (Tensor): flat squares of a single image,
shape (approxs_per_octave * n, 4)
gt_bboxes (Tensor): Ground truth bboxes of a single image.
img_meta (dict): Meta info of a single image.
approxs_per_octave (int): number of approxs per octave
cfg (dict): RPN train configs.
sampling (bool): sampling or not.
unmap_outputs (bool): unmap outputs or not.
Returns:
tuple
"""
if not inside_flags.any():
return (None, ) * 6
# assign gt and sample anchors
expand_inside_flags = inside_flags[:, None].expand(
-1, approxs_per_octave).reshape(-1)
approxs = flat_approxs[expand_inside_flags, :]
squares = flat_squares[inside_flags, :]
bbox_assigner = build_assigner(cfg.ga_assigner)
assign_result = bbox_assigner.assign(approxs, squares, approxs_per_octave,
gt_bboxes, gt_bboxes_ignore)
if sampling:
bbox_sampler = build_sampler(cfg.ga_sampler)
else:
bbox_sampler = PseudoSampler()
sampling_result = bbox_sampler.sample(assign_result, squares, gt_bboxes)
bbox_anchors = torch.zeros_like(squares)
bbox_gts = torch.zeros_like(squares)
bbox_weights = torch.zeros_like(squares)
pos_inds = sampling_result.pos_inds
neg_inds = sampling_result.neg_inds
if len(pos_inds) > 0:
bbox_anchors[pos_inds, :] = sampling_result.pos_bboxes
bbox_gts[pos_inds, :] = sampling_result.pos_gt_bboxes
bbox_weights[pos_inds, :] = 1.0
# map up to original set of anchors
if unmap_outputs:
num_total_anchors = flat_squares.size(0)
bbox_anchors = unmap(bbox_anchors, num_total_anchors, inside_flags)
bbox_gts = unmap(bbox_gts, num_total_anchors, inside_flags)
bbox_weights = unmap(bbox_weights, num_total_anchors, inside_flags)
return (bbox_anchors, bbox_gts, bbox_weights, pos_inds, neg_inds)
|
Cream/CDARTS/CDARTS_detection/mmdet/core/anchor/guided_anchor_target.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_detection/mmdet/core/anchor/guided_anchor_target.py",
"repo_id": "Cream",
"token_count": 6207
}
| 271 |
import torch
from .base_sampler import BaseSampler
from .sampling_result import SamplingResult
class PseudoSampler(BaseSampler):
def __init__(self, **kwargs):
pass
def _sample_pos(self, **kwargs):
raise NotImplementedError
def _sample_neg(self, **kwargs):
raise NotImplementedError
def sample(self, assign_result, bboxes, gt_bboxes, **kwargs):
pos_inds = torch.nonzero(
assign_result.gt_inds > 0).squeeze(-1).unique()
neg_inds = torch.nonzero(
assign_result.gt_inds == 0).squeeze(-1).unique()
gt_flags = bboxes.new_zeros(bboxes.shape[0], dtype=torch.uint8)
sampling_result = SamplingResult(pos_inds, neg_inds, bboxes, gt_bboxes,
assign_result, gt_flags)
return sampling_result
|
Cream/CDARTS/CDARTS_detection/mmdet/core/bbox/samplers/pseudo_sampler.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_detection/mmdet/core/bbox/samplers/pseudo_sampler.py",
"repo_id": "Cream",
"token_count": 380
}
| 272 |
import torch
import numpy as np
import mmcv
def mask_target(pos_proposals_list, pos_assigned_gt_inds_list, gt_masks_list,
cfg):
cfg_list = [cfg for _ in range(len(pos_proposals_list))]
mask_targets = map(mask_target_single, pos_proposals_list,
pos_assigned_gt_inds_list, gt_masks_list, cfg_list)
mask_targets = torch.cat(list(mask_targets))
return mask_targets
def mask_target_single(pos_proposals, pos_assigned_gt_inds, gt_masks, cfg):
mask_size = cfg.mask_size
num_pos = pos_proposals.size(0)
mask_targets = []
if num_pos > 0:
proposals_np = pos_proposals.cpu().numpy()
pos_assigned_gt_inds = pos_assigned_gt_inds.cpu().numpy()
for i in range(num_pos):
gt_mask = gt_masks[pos_assigned_gt_inds[i]]
bbox = proposals_np[i, :].astype(np.int32)
x1, y1, x2, y2 = bbox
w = np.maximum(x2 - x1 + 1, 1)
h = np.maximum(y2 - y1 + 1, 1)
# mask is uint8 both before and after resizing
target = mmcv.imresize(gt_mask[y1:y1 + h, x1:x1 + w],
(mask_size, mask_size))
mask_targets.append(target)
mask_targets = torch.from_numpy(np.stack(mask_targets)).float().to(
pos_proposals.device)
else:
mask_targets = pos_proposals.new_zeros((0, mask_size, mask_size))
return mask_targets
|
Cream/CDARTS/CDARTS_detection/mmdet/core/mask/mask_target.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_detection/mmdet/core/mask/mask_target.py",
"repo_id": "Cream",
"token_count": 745
}
| 273 |
from __future__ import division
import math
import numpy as np
import torch
from mmcv.runner.utils import get_dist_info
from torch.utils.data import DistributedSampler as _DistributedSampler
from torch.utils.data import Sampler
class DistributedSampler(_DistributedSampler):
def __init__(self, dataset, num_replicas=None, rank=None, shuffle=True):
super().__init__(dataset, num_replicas=num_replicas, rank=rank)
self.shuffle = shuffle
def __iter__(self):
# deterministically shuffle based on epoch
if self.shuffle:
g = torch.Generator()
g.manual_seed(self.epoch)
indices = torch.randperm(len(self.dataset), generator=g).tolist()
else:
indices = torch.arange(len(self.dataset)).tolist()
# add extra samples to make it evenly divisible
indices += indices[:(self.total_size - len(indices))]
assert len(indices) == self.total_size
# subsample
indices = indices[self.rank:self.total_size:self.num_replicas]
assert len(indices) == self.num_samples
return iter(indices)
class GroupSampler(Sampler):
def __init__(self, dataset, samples_per_gpu=1, split=1000, mode=None):
assert hasattr(dataset, 'flag')
self.dataset = dataset
self.split = int(np.floor(split / samples_per_gpu)) * samples_per_gpu
self.mode = mode
self.samples_per_gpu = samples_per_gpu
self.flag = dataset.flag.astype(np.int64)
self.group_sizes = np.bincount(self.flag)
self.num_samples = 0
for i, size in enumerate(self.group_sizes):
self.num_samples += int(np.ceil(
size / self.samples_per_gpu)) * self.samples_per_gpu
if mode == 'train':
self.num_samples = self.split
elif mode == 'val':
self.num_samples = self.num_samples - self.split
def __iter__(self):
#indices = []
indices = np.array([], dtype='int64')
size_flag = 0
for i, size in enumerate(self.group_sizes):
if size == 0:
continue
indice = np.where(self.flag == i)[0]
assert len(indice) == size
num_extra = int(np.ceil(size / self.samples_per_gpu)
) * self.samples_per_gpu - len(indice)
indice = np.concatenate([indice, indice[:num_extra]])
if self.mode == 'train':
if (size * self.split) % sum(self.group_sizes) != 0:
size_flag += 1
split = int(size/sum(self.group_sizes)*self.split)
if i == len(self.group_sizes) - 1 and size_flag != 0:
split += 1
indice = indice[:split]
elif self.mode == 'val':
if (size * self.split) % sum(self.group_sizes) != 0:
size_flag += 1
split = int(size/sum(self.group_sizes)*self.split)
if i == len(self.group_sizes) - 1 and size_flag != 0:
split += 1
indice = indice[split:]
np.random.shuffle(indice)
# indices.append(indice)
indices = np.concatenate([indices, indice])
_indices = np.array([], dtype='int64')
for i in np.random.permutation(range(len(indices) // self.samples_per_gpu)):
_indices = np.append(_indices, indices[i * self.samples_per_gpu:(i + 1) * self.samples_per_gpu])
indices = _indices
indices = torch.from_numpy(indices).long()
assert len(indices) == self.num_samples
return iter(indices)
def __len__(self):
return self.num_samples
class DistributedGroupSampler(Sampler):
"""Sampler that restricts data loading to a subset of the dataset.
It is especially useful in conjunction with
:class:`torch.nn.parallel.DistributedDataParallel`. In such case, each
process can pass a DistributedSampler instance as a DataLoader sampler,
and load a subset of the original dataset that is exclusive to it.
.. note::
Dataset is assumed to be of constant size.
Arguments:
dataset: Dataset used for sampling.
num_replicas (optional): Number of processes participating in
distributed training.
rank (optional): Rank of the current process within num_replicas.
"""
def __init__(self, dataset, samples_per_gpu=1, num_replicas=None, rank=None, split=1000, mode=None):
_rank, _num_replicas = get_dist_info()
if num_replicas is None:
num_replicas = _num_replicas
if rank is None:
rank = _rank
self.dataset = dataset
self.samples_per_gpu = samples_per_gpu
self.num_replicas = num_replicas
self.rank = rank
self.epoch = 0
self.split = int(np.floor(split / samples_per_gpu / self.num_replicas)) * samples_per_gpu
self.mode = mode
assert hasattr(self.dataset, 'flag')
self.flag = self.dataset.flag
self.group_sizes = np.bincount(self.flag)
self.num_samples = 0
for i, j in enumerate(self.group_sizes):
self.num_samples += int(
math.ceil(self.group_sizes[i] * 1.0 / self.samples_per_gpu /
self.num_replicas)) * self.samples_per_gpu
if self.mode == 'train':
self.num_samples = self.split
elif self.mode == 'val':
self.num_samples = self.num_samples - self.split
self.total_size = self.num_samples * self.num_replicas
self.split *= self.num_replicas
def __iter__(self):
# deterministically shuffle based on epoch
g = torch.Generator()
g.manual_seed(self.epoch)
#indices = np.array([])
indices = []
size_flag = 0
for i, size in enumerate(self.group_sizes):
if size > 0:
indice = np.where(self.flag == i)[0]
assert len(indice) == size
extra = int(
math.ceil(
size * 1.0 / self.samples_per_gpu / self.num_replicas)
) * self.samples_per_gpu * self.num_replicas - len(indice)
indice = np.concatenate([indice, indice[:extra]])
if self.mode == 'train':
split = int(size/sum(self.group_sizes)*self.split)
if (size * self.split) % sum(self.group_sizes) != 0:
size_flag += 1
if i == len(self.group_sizes) - 1 and size_flag != 0:
split += 1
indice = indice[:split]
elif self.mode == 'val':
split = int(size/sum(self.group_sizes)*self.split)
if (size * self.split) % sum(self.group_sizes) != 0:
size_flag += 1
if i == len(self.group_sizes) - 1 and size_flag != 0:
split += 1
indice = indice[split:]
indice = indice[list(torch.randperm(int(len(indice)), generator=g))].tolist()
indices += indice
assert len(indices) == self.total_size
indices = [
indices[j] for i in list(
torch.randperm(
len(indices) // self.samples_per_gpu, generator=g))
for j in range(i * self.samples_per_gpu, (i + 1) *
self.samples_per_gpu)
]
# subsample
offset = self.num_samples * self.rank
indices = indices[offset:offset + self.num_samples]
assert len(indices) == self.num_samples
return iter(indices)
def __len__(self):
return self.num_samples
def set_epoch(self, epoch):
self.epoch = epoch
|
Cream/CDARTS/CDARTS_detection/mmdet/datasets/loader/sampler.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_detection/mmdet/datasets/loader/sampler.py",
"repo_id": "Cream",
"token_count": 3882
}
| 274 |
import torch
import torch.nn as nn
from mmcv.cnn import normal_init
from mmdet.core import multi_apply, multiclass_nms, distance2bbox, force_fp32
from ..builder import build_loss
from ..registry import HEADS
from ..utils import bias_init_with_prob, Scale, ConvModule
INF = 1e8
@HEADS.register_module
class FCOSHead(nn.Module):
def __init__(self,
num_classes,
in_channels,
feat_channels=256,
stacked_convs=4,
strides=(4, 8, 16, 32, 64),
regress_ranges=((-1, 64), (64, 128), (128, 256), (256, 512),
(512, INF)),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='IoULoss', loss_weight=1.0),
loss_centerness=dict(
type='CrossEntropyLoss',
use_sigmoid=True,
loss_weight=1.0),
conv_cfg=None,
norm_cfg=dict(type='GN', num_groups=32, requires_grad=True)):
super(FCOSHead, self).__init__()
self.num_classes = num_classes
self.cls_out_channels = num_classes - 1
self.in_channels = in_channels
self.feat_channels = feat_channels
self.stacked_convs = stacked_convs
self.strides = strides
self.regress_ranges = regress_ranges
self.loss_cls = build_loss(loss_cls)
self.loss_bbox = build_loss(loss_bbox)
self.loss_centerness = build_loss(loss_centerness)
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.fp16_enabled = False
self._init_layers()
def _init_layers(self):
self.cls_convs = nn.ModuleList()
self.reg_convs = nn.ModuleList()
for i in range(self.stacked_convs):
chn = self.in_channels if i == 0 else self.feat_channels
self.cls_convs.append(
ConvModule(
chn,
self.feat_channels,
3,
stride=1,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
bias=self.norm_cfg is None))
self.reg_convs.append(
ConvModule(
chn,
self.feat_channels,
3,
stride=1,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
bias=self.norm_cfg is None))
self.fcos_cls = nn.Conv2d(
self.feat_channels, self.cls_out_channels, 3, padding=1)
self.fcos_reg = nn.Conv2d(self.feat_channels, 4, 3, padding=1)
self.fcos_centerness = nn.Conv2d(self.feat_channels, 1, 3, padding=1)
self.scales = nn.ModuleList([Scale(1.0) for _ in self.strides])
def init_weights(self):
for m in self.cls_convs:
normal_init(m.conv, std=0.01)
for m in self.reg_convs:
normal_init(m.conv, std=0.01)
bias_cls = bias_init_with_prob(0.01)
normal_init(self.fcos_cls, std=0.01, bias=bias_cls)
normal_init(self.fcos_reg, std=0.01)
normal_init(self.fcos_centerness, std=0.01)
def forward(self, feats):
return multi_apply(self.forward_single, feats, self.scales)
def forward_single(self, x, scale):
cls_feat = x
reg_feat = x
for cls_layer in self.cls_convs:
cls_feat = cls_layer(cls_feat)
cls_score = self.fcos_cls(cls_feat)
centerness = self.fcos_centerness(cls_feat)
for reg_layer in self.reg_convs:
reg_feat = reg_layer(reg_feat)
# scale the bbox_pred of different level
# float to avoid overflow when enabling FP16
bbox_pred = scale(self.fcos_reg(reg_feat)).float().exp()
return cls_score, bbox_pred, centerness
@force_fp32(apply_to=('cls_scores', 'bbox_preds', 'centernesses'))
def loss(self,
cls_scores,
bbox_preds,
centernesses,
gt_bboxes,
gt_labels,
img_metas,
cfg,
gt_bboxes_ignore=None):
assert len(cls_scores) == len(bbox_preds) == len(centernesses)
featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]
all_level_points = self.get_points(featmap_sizes, bbox_preds[0].dtype,
bbox_preds[0].device)
labels, bbox_targets = self.fcos_target(all_level_points, gt_bboxes,
gt_labels)
num_imgs = cls_scores[0].size(0)
# flatten cls_scores, bbox_preds and centerness
flatten_cls_scores = [
cls_score.permute(0, 2, 3, 1).reshape(-1, self.cls_out_channels)
for cls_score in cls_scores
]
flatten_bbox_preds = [
bbox_pred.permute(0, 2, 3, 1).reshape(-1, 4)
for bbox_pred in bbox_preds
]
flatten_centerness = [
centerness.permute(0, 2, 3, 1).reshape(-1)
for centerness in centernesses
]
flatten_cls_scores = torch.cat(flatten_cls_scores)
flatten_bbox_preds = torch.cat(flatten_bbox_preds)
flatten_centerness = torch.cat(flatten_centerness)
flatten_labels = torch.cat(labels)
flatten_bbox_targets = torch.cat(bbox_targets)
# repeat points to align with bbox_preds
flatten_points = torch.cat(
[points.repeat(num_imgs, 1) for points in all_level_points])
pos_inds = flatten_labels.nonzero().reshape(-1)
num_pos = len(pos_inds)
loss_cls = self.loss_cls(
flatten_cls_scores, flatten_labels,
avg_factor=num_pos + num_imgs) # avoid num_pos is 0
pos_bbox_preds = flatten_bbox_preds[pos_inds]
pos_bbox_targets = flatten_bbox_targets[pos_inds]
pos_centerness = flatten_centerness[pos_inds]
pos_centerness_targets = self.centerness_target(pos_bbox_targets)
if num_pos > 0:
pos_points = flatten_points[pos_inds]
pos_decoded_bbox_preds = distance2bbox(pos_points, pos_bbox_preds)
pos_decoded_target_preds = distance2bbox(pos_points,
pos_bbox_targets)
# centerness weighted iou loss
loss_bbox = self.loss_bbox(
pos_decoded_bbox_preds,
pos_decoded_target_preds,
weight=pos_centerness_targets,
avg_factor=pos_centerness_targets.sum())
loss_centerness = self.loss_centerness(pos_centerness,
pos_centerness_targets)
else:
loss_bbox = pos_bbox_preds.sum()
loss_centerness = pos_centerness.sum()
return dict(
loss_cls=loss_cls,
loss_bbox=loss_bbox,
loss_centerness=loss_centerness)
@force_fp32(apply_to=('cls_scores', 'bbox_preds', 'centernesses'))
def get_bboxes(self,
cls_scores,
bbox_preds,
centernesses,
img_metas,
cfg,
rescale=None):
assert len(cls_scores) == len(bbox_preds)
num_levels = len(cls_scores)
featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]
mlvl_points = self.get_points(featmap_sizes, bbox_preds[0].dtype,
bbox_preds[0].device)
result_list = []
for img_id in range(len(img_metas)):
cls_score_list = [
cls_scores[i][img_id].detach() for i in range(num_levels)
]
bbox_pred_list = [
bbox_preds[i][img_id].detach() for i in range(num_levels)
]
centerness_pred_list = [
centernesses[i][img_id].detach() for i in range(num_levels)
]
img_shape = img_metas[img_id]['img_shape']
scale_factor = img_metas[img_id]['scale_factor']
det_bboxes = self.get_bboxes_single(cls_score_list, bbox_pred_list,
centerness_pred_list,
mlvl_points, img_shape,
scale_factor, cfg, rescale)
result_list.append(det_bboxes)
return result_list
def get_bboxes_single(self,
cls_scores,
bbox_preds,
centernesses,
mlvl_points,
img_shape,
scale_factor,
cfg,
rescale=False):
assert len(cls_scores) == len(bbox_preds) == len(mlvl_points)
mlvl_bboxes = []
mlvl_scores = []
mlvl_centerness = []
for cls_score, bbox_pred, centerness, points in zip(
cls_scores, bbox_preds, centernesses, mlvl_points):
assert cls_score.size()[-2:] == bbox_pred.size()[-2:]
scores = cls_score.permute(1, 2, 0).reshape(
-1, self.cls_out_channels).sigmoid()
centerness = centerness.permute(1, 2, 0).reshape(-1).sigmoid()
bbox_pred = bbox_pred.permute(1, 2, 0).reshape(-1, 4)
nms_pre = cfg.get('nms_pre', -1)
if nms_pre > 0 and scores.shape[0] > nms_pre:
max_scores, _ = (scores * centerness[:, None]).max(dim=1)
_, topk_inds = max_scores.topk(nms_pre)
points = points[topk_inds, :]
bbox_pred = bbox_pred[topk_inds, :]
scores = scores[topk_inds, :]
centerness = centerness[topk_inds]
bboxes = distance2bbox(points, bbox_pred, max_shape=img_shape)
mlvl_bboxes.append(bboxes)
mlvl_scores.append(scores)
mlvl_centerness.append(centerness)
mlvl_bboxes = torch.cat(mlvl_bboxes)
if rescale:
mlvl_bboxes /= mlvl_bboxes.new_tensor(scale_factor)
mlvl_scores = torch.cat(mlvl_scores)
padding = mlvl_scores.new_zeros(mlvl_scores.shape[0], 1)
mlvl_scores = torch.cat([padding, mlvl_scores], dim=1)
mlvl_centerness = torch.cat(mlvl_centerness)
det_bboxes, det_labels = multiclass_nms(
mlvl_bboxes,
mlvl_scores,
cfg.score_thr,
cfg.nms,
cfg.max_per_img,
score_factors=mlvl_centerness)
return det_bboxes, det_labels
def get_points(self, featmap_sizes, dtype, device):
"""Get points according to feature map sizes.
Args:
featmap_sizes (list[tuple]): Multi-level feature map sizes.
dtype (torch.dtype): Type of points.
device (torch.device): Device of points.
Returns:
tuple: points of each image.
"""
mlvl_points = []
for i in range(len(featmap_sizes)):
mlvl_points.append(
self.get_points_single(featmap_sizes[i], self.strides[i],
dtype, device))
return mlvl_points
def get_points_single(self, featmap_size, stride, dtype, device):
h, w = featmap_size
x_range = torch.arange(
0, w * stride, stride, dtype=dtype, device=device)
y_range = torch.arange(
0, h * stride, stride, dtype=dtype, device=device)
y, x = torch.meshgrid(y_range, x_range)
points = torch.stack(
(x.reshape(-1), y.reshape(-1)), dim=-1) + stride // 2
return points
def fcos_target(self, points, gt_bboxes_list, gt_labels_list):
assert len(points) == len(self.regress_ranges)
num_levels = len(points)
# expand regress ranges to align with points
expanded_regress_ranges = [
points[i].new_tensor(self.regress_ranges[i])[None].expand_as(
points[i]) for i in range(num_levels)
]
# concat all levels points and regress ranges
concat_regress_ranges = torch.cat(expanded_regress_ranges, dim=0)
concat_points = torch.cat(points, dim=0)
# get labels and bbox_targets of each image
labels_list, bbox_targets_list = multi_apply(
self.fcos_target_single,
gt_bboxes_list,
gt_labels_list,
points=concat_points,
regress_ranges=concat_regress_ranges)
# split to per img, per level
num_points = [center.size(0) for center in points]
labels_list = [labels.split(num_points, 0) for labels in labels_list]
bbox_targets_list = [
bbox_targets.split(num_points, 0)
for bbox_targets in bbox_targets_list
]
# concat per level image
concat_lvl_labels = []
concat_lvl_bbox_targets = []
for i in range(num_levels):
concat_lvl_labels.append(
torch.cat([labels[i] for labels in labels_list]))
concat_lvl_bbox_targets.append(
torch.cat(
[bbox_targets[i] for bbox_targets in bbox_targets_list]))
return concat_lvl_labels, concat_lvl_bbox_targets
def fcos_target_single(self, gt_bboxes, gt_labels, points, regress_ranges):
num_points = points.size(0)
num_gts = gt_labels.size(0)
areas = (gt_bboxes[:, 2] - gt_bboxes[:, 0] + 1) * (
gt_bboxes[:, 3] - gt_bboxes[:, 1] + 1)
# TODO: figure out why these two are different
# areas = areas[None].expand(num_points, num_gts)
areas = areas[None].repeat(num_points, 1)
regress_ranges = regress_ranges[:, None, :].expand(
num_points, num_gts, 2)
gt_bboxes = gt_bboxes[None].expand(num_points, num_gts, 4)
xs, ys = points[:, 0], points[:, 1]
xs = xs[:, None].expand(num_points, num_gts)
ys = ys[:, None].expand(num_points, num_gts)
left = xs - gt_bboxes[..., 0]
right = gt_bboxes[..., 2] - xs
top = ys - gt_bboxes[..., 1]
bottom = gt_bboxes[..., 3] - ys
bbox_targets = torch.stack((left, top, right, bottom), -1)
# condition1: inside a gt bbox
inside_gt_bbox_mask = bbox_targets.min(-1)[0] > 0
# condition2: limit the regression range for each location
max_regress_distance = bbox_targets.max(-1)[0]
inside_regress_range = (
max_regress_distance >= regress_ranges[..., 0]) & (
max_regress_distance <= regress_ranges[..., 1])
# if there are still more than one objects for a location,
# we choose the one with minimal area
areas[inside_gt_bbox_mask == 0] = INF
areas[inside_regress_range == 0] = INF
min_area, min_area_inds = areas.min(dim=1)
labels = gt_labels[min_area_inds]
labels[min_area == INF] = 0
bbox_targets = bbox_targets[range(num_points), min_area_inds]
return labels, bbox_targets
def centerness_target(self, pos_bbox_targets):
# only calculate pos centerness targets, otherwise there may be nan
left_right = pos_bbox_targets[:, [0, 2]]
top_bottom = pos_bbox_targets[:, [1, 3]]
centerness_targets = (
left_right.min(dim=-1)[0] / left_right.max(dim=-1)[0]) * (
top_bottom.min(dim=-1)[0] / top_bottom.max(dim=-1)[0])
return torch.sqrt(centerness_targets)
|
Cream/CDARTS/CDARTS_detection/mmdet/models/anchor_heads/fcos_head.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_detection/mmdet/models/anchor_heads/fcos_head.py",
"repo_id": "Cream",
"token_count": 8639
}
| 275 |
from collections import defaultdict, OrderedDict
from functools import partial
class FeatureHooks:
def __init__(self, hooks, named_modules):
# setup feature hooks
modules = {k: v for k, v in named_modules}
for h in hooks:
hook_name = h['name']
m = modules[hook_name]
hook_fn = partial(self._collect_output_hook, hook_name)
if h['type'] == 'forward_pre':
m.register_forward_pre_hook(hook_fn)
elif h['type'] == 'forward':
m.register_forward_hook(hook_fn)
else:
assert False, "Unsupported hook type"
self._feature_outputs = defaultdict(OrderedDict)
def _collect_output_hook(self, name, *args):
x = args[-1] # tensor we want is last argument, output for fwd, input for fwd_pre
if isinstance(x, tuple):
x = x[0] # unwrap input tuple
self._feature_outputs[x.device][name] = x
def get_output(self, device):
output = tuple(self._feature_outputs[device].values())[::-1]
self._feature_outputs[device] = OrderedDict() # clear after reading
return output
|
Cream/CDARTS/CDARTS_detection/mmdet/models/backbones/feature_hooks.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_detection/mmdet/models/backbones/feature_hooks.py",
"repo_id": "Cream",
"token_count": 522
}
| 276 |
import torch.nn as nn
from mmcv.cnn.weight_init import normal_init, xavier_init
from ..backbones.resnet import Bottleneck
from ..registry import HEADS
from ..utils import ConvModule
from .bbox_head import BBoxHead
class BasicResBlock(nn.Module):
"""Basic residual block.
This block is a little different from the block in the ResNet backbone.
The kernel size of conv1 is 1 in this block while 3 in ResNet BasicBlock.
Args:
in_channels (int): Channels of the input feature map.
out_channels (int): Channels of the output feature map.
conv_cfg (dict): The config dict for convolution layers.
norm_cfg (dict): The config dict for normalization layers.
"""
def __init__(self,
in_channels,
out_channels,
conv_cfg=None,
norm_cfg=dict(type='BN')):
super(BasicResBlock, self).__init__()
# main path
self.conv1 = ConvModule(
in_channels,
in_channels,
kernel_size=3,
padding=1,
bias=False,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg)
self.conv2 = ConvModule(
in_channels,
out_channels,
kernel_size=1,
bias=False,
activation=None,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg)
# identity path
self.conv_identity = ConvModule(
in_channels,
out_channels,
kernel_size=1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
activation=None)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
identity = x
x = self.conv1(x)
x = self.conv2(x)
identity = self.conv_identity(identity)
out = x + identity
out = self.relu(out)
return out
@HEADS.register_module
class DoubleConvFCBBoxHead(BBoxHead):
r"""Bbox head used in Double-Head R-CNN
/-> cls
/-> shared convs ->
\-> reg
roi features
/-> cls
\-> shared fc ->
\-> reg
""" # noqa: W605
def __init__(self,
num_convs=0,
num_fcs=0,
conv_out_channels=1024,
fc_out_channels=1024,
conv_cfg=None,
norm_cfg=dict(type='BN'),
**kwargs):
kwargs.setdefault('with_avg_pool', True)
super(DoubleConvFCBBoxHead, self).__init__(**kwargs)
assert self.with_avg_pool
assert num_convs > 0
assert num_fcs > 0
self.num_convs = num_convs
self.num_fcs = num_fcs
self.conv_out_channels = conv_out_channels
self.fc_out_channels = fc_out_channels
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
# increase the channel of input features
self.res_block = BasicResBlock(self.in_channels,
self.conv_out_channels)
# add conv heads
self.conv_branch = self._add_conv_branch()
# add fc heads
self.fc_branch = self._add_fc_branch()
out_dim_reg = 4 if self.reg_class_agnostic else 4 * self.num_classes
self.fc_reg = nn.Linear(self.conv_out_channels, out_dim_reg)
self.fc_cls = nn.Linear(self.fc_out_channels, self.num_classes)
self.relu = nn.ReLU(inplace=True)
def _add_conv_branch(self):
"""Add the fc branch which consists of a sequential of conv layers"""
branch_convs = nn.ModuleList()
for i in range(self.num_convs):
branch_convs.append(
Bottleneck(
inplanes=self.conv_out_channels,
planes=self.conv_out_channels // 4,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg))
return branch_convs
def _add_fc_branch(self):
"""Add the fc branch which consists of a sequential of fc layers"""
branch_fcs = nn.ModuleList()
for i in range(self.num_fcs):
fc_in_channels = (
self.in_channels *
self.roi_feat_area if i == 0 else self.fc_out_channels)
branch_fcs.append(nn.Linear(fc_in_channels, self.fc_out_channels))
return branch_fcs
def init_weights(self):
normal_init(self.fc_cls, std=0.01)
normal_init(self.fc_reg, std=0.001)
for m in self.fc_branch.modules():
if isinstance(m, nn.Linear):
xavier_init(m, distribution='uniform')
def forward(self, x_cls, x_reg):
# conv head
x_conv = self.res_block(x_reg)
for conv in self.conv_branch:
x_conv = conv(x_conv)
if self.with_avg_pool:
x_conv = self.avg_pool(x_conv)
x_conv = x_conv.view(x_conv.size(0), -1)
bbox_pred = self.fc_reg(x_conv)
# fc head
x_fc = x_cls.view(x_cls.size(0), -1)
for fc in self.fc_branch:
x_fc = self.relu(fc(x_fc))
cls_score = self.fc_cls(x_fc)
return cls_score, bbox_pred
|
Cream/CDARTS/CDARTS_detection/mmdet/models/bbox_heads/double_bbox_head.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_detection/mmdet/models/bbox_heads/double_bbox_head.py",
"repo_id": "Cream",
"token_count": 2805
}
| 277 |
from mmdet.core import (bbox2roi, bbox_mapping, merge_aug_proposals,
merge_aug_bboxes, merge_aug_masks, multiclass_nms)
class RPNTestMixin(object):
def simple_test_rpn(self, x, img_meta, rpn_test_cfg):
rpn_outs = self.rpn_head(x)
proposal_inputs = rpn_outs + (img_meta, rpn_test_cfg)
proposal_list = self.rpn_head.get_bboxes(*proposal_inputs)
return proposal_list
def aug_test_rpn(self, feats, img_metas, rpn_test_cfg):
imgs_per_gpu = len(img_metas[0])
aug_proposals = [[] for _ in range(imgs_per_gpu)]
for x, img_meta in zip(feats, img_metas):
proposal_list = self.simple_test_rpn(x, img_meta, rpn_test_cfg)
for i, proposals in enumerate(proposal_list):
aug_proposals[i].append(proposals)
# reorganize the order of 'img_metas' to match the dimensions
# of 'aug_proposals'
aug_img_metas = []
for i in range(imgs_per_gpu):
aug_img_meta = []
for j in range(len(img_metas)):
aug_img_meta.append(img_metas[j][i])
aug_img_metas.append(aug_img_meta)
# after merging, proposals will be rescaled to the original image size
merged_proposals = [
merge_aug_proposals(proposals, aug_img_meta, rpn_test_cfg)
for proposals, aug_img_meta in zip(aug_proposals, aug_img_metas)
]
return merged_proposals
class BBoxTestMixin(object):
def simple_test_bboxes(self,
x,
img_meta,
proposals,
rcnn_test_cfg,
rescale=False):
"""Test only det bboxes without augmentation."""
rois = bbox2roi(proposals)
roi_feats = self.bbox_roi_extractor(
x[:len(self.bbox_roi_extractor.featmap_strides)], rois)
if self.with_shared_head:
roi_feats = self.shared_head(roi_feats)
out_list = self.bbox_head(roi_feats)
cls_score = out_list[0]
bbox_pred = out_list[1]
img_shape = img_meta[0]['img_shape']
scale_factor = img_meta[0]['scale_factor']
det_bboxes, det_labels = self.bbox_head.get_det_bboxes(
rois,
cls_score,
bbox_pred,
img_shape,
scale_factor,
rescale=rescale,
cfg=rcnn_test_cfg)
return det_bboxes, det_labels
def aug_test_bboxes(self, feats, img_metas, proposal_list, rcnn_test_cfg):
aug_bboxes = []
aug_scores = []
for x, img_meta in zip(feats, img_metas):
# only one image in the batch
img_shape = img_meta[0]['img_shape']
scale_factor = img_meta[0]['scale_factor']
flip = img_meta[0]['flip']
# TODO more flexible
proposals = bbox_mapping(proposal_list[0][:, :4], img_shape,
scale_factor, flip)
rois = bbox2roi([proposals])
# recompute feature maps to save GPU memory
roi_feats = self.bbox_roi_extractor(
x[:len(self.bbox_roi_extractor.featmap_strides)], rois)
if self.with_shared_head:
roi_feats = self.shared_head(roi_feats)
cls_score, bbox_pred = self.bbox_head(roi_feats)
bboxes, scores = self.bbox_head.get_det_bboxes(
rois,
cls_score,
bbox_pred,
img_shape,
scale_factor,
rescale=False,
cfg=None)
aug_bboxes.append(bboxes)
aug_scores.append(scores)
# after merging, bboxes will be rescaled to the original image size
merged_bboxes, merged_scores = merge_aug_bboxes(
aug_bboxes, aug_scores, img_metas, rcnn_test_cfg)
det_bboxes, det_labels = multiclass_nms(
merged_bboxes, merged_scores, rcnn_test_cfg.score_thr,
rcnn_test_cfg.nms, rcnn_test_cfg.max_per_img)
return det_bboxes, det_labels
class MaskTestMixin(object):
def simple_test_mask(self,
x,
img_meta,
det_bboxes,
det_labels,
rescale=False):
# image shape of the first image in the batch (only one)
ori_shape = img_meta[0]['ori_shape']
scale_factor = img_meta[0]['scale_factor']
if det_bboxes.shape[0] == 0:
segm_result = [[] for _ in range(self.mask_head.num_classes - 1)]
else:
# if det_bboxes is rescaled to the original image size, we need to
# rescale it back to the testing scale to obtain RoIs.
_bboxes = (
det_bboxes[:, :4] * scale_factor if rescale else det_bboxes)
mask_rois = bbox2roi([_bboxes])
mask_feats = self.mask_roi_extractor(
x[:len(self.mask_roi_extractor.featmap_strides)], mask_rois)
if self.with_shared_head:
mask_feats = self.shared_head(mask_feats)
mask_pred = self.mask_head(mask_feats)
segm_result = self.mask_head.get_seg_masks(
mask_pred, _bboxes, det_labels, self.test_cfg.rcnn, ori_shape,
scale_factor, rescale)
return segm_result
def aug_test_mask(self, feats, img_metas, det_bboxes, det_labels):
if det_bboxes.shape[0] == 0:
segm_result = [[] for _ in range(self.mask_head.num_classes - 1)]
else:
aug_masks = []
for x, img_meta in zip(feats, img_metas):
img_shape = img_meta[0]['img_shape']
scale_factor = img_meta[0]['scale_factor']
flip = img_meta[0]['flip']
_bboxes = bbox_mapping(det_bboxes[:, :4], img_shape,
scale_factor, flip)
mask_rois = bbox2roi([_bboxes])
mask_feats = self.mask_roi_extractor(
x[:len(self.mask_roi_extractor.featmap_strides)],
mask_rois)
if self.with_shared_head:
mask_feats = self.shared_head(mask_feats)
mask_pred = self.mask_head(mask_feats)
# convert to numpy array to save memory
aug_masks.append(mask_pred.sigmoid().cpu().numpy())
merged_masks = merge_aug_masks(aug_masks, img_metas,
self.test_cfg.rcnn)
ori_shape = img_metas[0][0]['ori_shape']
segm_result = self.mask_head.get_seg_masks(
merged_masks,
det_bboxes,
det_labels,
self.test_cfg.rcnn,
ori_shape,
scale_factor=1.0,
rescale=False)
return segm_result
|
Cream/CDARTS/CDARTS_detection/mmdet/models/detectors/test_mixins.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_detection/mmdet/models/detectors/test_mixins.py",
"repo_id": "Cream",
"token_count": 3836
}
| 278 |
from .fcn_mask_head import FCNMaskHead
from ..registry import HEADS
from ..utils import ConvModule
@HEADS.register_module
class HTCMaskHead(FCNMaskHead):
def __init__(self, *args, **kwargs):
super(HTCMaskHead, self).__init__(*args, **kwargs)
self.conv_res = ConvModule(
self.conv_out_channels,
self.conv_out_channels,
1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg)
def init_weights(self):
super(HTCMaskHead, self).init_weights()
self.conv_res.init_weights()
def forward(self, x, res_feat=None, return_logits=True, return_feat=True):
if res_feat is not None:
res_feat = self.conv_res(res_feat)
x = x + res_feat
for conv in self.convs:
x = conv(x)
res_feat = x
outs = []
if return_logits:
x = self.upsample(x)
if self.upsample_method == 'deconv':
x = self.relu(x)
mask_pred = self.conv_logits(x)
outs.append(mask_pred)
if return_feat:
outs.append(res_feat)
return outs if len(outs) > 1 else outs[0]
|
Cream/CDARTS/CDARTS_detection/mmdet/models/mask_heads/htc_mask_head.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_detection/mmdet/models/mask_heads/htc_mask_head.py",
"repo_id": "Cream",
"token_count": 595
}
| 279 |
from mmdet.utils import Registry
BACKBONES = Registry('backbone')
NECKS = Registry('neck')
ROI_EXTRACTORS = Registry('roi_extractor')
SHARED_HEADS = Registry('shared_head')
HEADS = Registry('head')
LOSSES = Registry('loss')
DETECTORS = Registry('detector')
|
Cream/CDARTS/CDARTS_detection/mmdet/models/registry.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_detection/mmdet/models/registry.py",
"repo_id": "Cream",
"token_count": 86
}
| 280 |
import torch
from torch.autograd import Function
from .. import deform_pool_cuda
class DeformRoIPoolingFunction(Function):
@staticmethod
def forward(ctx,
data,
rois,
offset,
spatial_scale,
out_size,
out_channels,
no_trans,
group_size=1,
part_size=None,
sample_per_part=4,
trans_std=.0):
ctx.spatial_scale = spatial_scale
ctx.out_size = out_size
ctx.out_channels = out_channels
ctx.no_trans = no_trans
ctx.group_size = group_size
ctx.part_size = out_size if part_size is None else part_size
ctx.sample_per_part = sample_per_part
ctx.trans_std = trans_std
assert 0.0 <= ctx.trans_std <= 1.0
if not data.is_cuda:
raise NotImplementedError
n = rois.shape[0]
output = data.new_empty(n, out_channels, out_size, out_size)
output_count = data.new_empty(n, out_channels, out_size, out_size)
deform_pool_cuda.deform_psroi_pooling_cuda_forward(
data, rois, offset, output, output_count, ctx.no_trans,
ctx.spatial_scale, ctx.out_channels, ctx.group_size, ctx.out_size,
ctx.part_size, ctx.sample_per_part, ctx.trans_std)
if data.requires_grad or rois.requires_grad or offset.requires_grad:
ctx.save_for_backward(data, rois, offset)
ctx.output_count = output_count
return output
@staticmethod
def backward(ctx, grad_output):
if not grad_output.is_cuda:
raise NotImplementedError
data, rois, offset = ctx.saved_tensors
output_count = ctx.output_count
grad_input = torch.zeros_like(data)
grad_rois = None
grad_offset = torch.zeros_like(offset)
deform_pool_cuda.deform_psroi_pooling_cuda_backward(
grad_output, data, rois, offset, output_count, grad_input,
grad_offset, ctx.no_trans, ctx.spatial_scale, ctx.out_channels,
ctx.group_size, ctx.out_size, ctx.part_size, ctx.sample_per_part,
ctx.trans_std)
return (grad_input, grad_rois, grad_offset, None, None, None, None,
None, None, None, None)
deform_roi_pooling = DeformRoIPoolingFunction.apply
|
Cream/CDARTS/CDARTS_detection/mmdet/ops/dcn/functions/deform_pool.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_detection/mmdet/ops/dcn/functions/deform_pool.py",
"repo_id": "Cream",
"token_count": 1195
}
| 281 |
from setuptools import setup
from torch.utils.cpp_extension import BuildExtension, CUDAExtension
setup(
name='masked_conv2d_cuda',
ext_modules=[
CUDAExtension('masked_conv2d_cuda', [
'src/masked_conv2d_cuda.cpp',
'src/masked_conv2d_kernel.cu',
]),
],
cmdclass={'build_ext': BuildExtension})
|
Cream/CDARTS/CDARTS_detection/mmdet/ops/masked_conv/setup.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_detection/mmdet/ops/masked_conv/setup.py",
"repo_id": "Cream",
"token_count": 162
}
| 282 |
from torch.nn.modules.module import Module
from ..functions.roi_align import RoIAlignFunction
class RoIAlign(Module):
def __init__(self, out_size, spatial_scale, sample_num=0):
super(RoIAlign, self).__init__()
self.out_size = out_size
self.spatial_scale = float(spatial_scale)
self.sample_num = int(sample_num)
def forward(self, features, rois):
return RoIAlignFunction.apply(features, rois, self.out_size,
self.spatial_scale, self.sample_num)
|
Cream/CDARTS/CDARTS_detection/mmdet/ops/roi_align/modules/roi_align.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_detection/mmdet/ops/roi_align/modules/roi_align.py",
"repo_id": "Cream",
"token_count": 235
}
| 283 |
from torch.autograd import Function
from torch.autograd.function import once_differentiable
from .. import sigmoid_focal_loss_cuda
class SigmoidFocalLossFunction(Function):
@staticmethod
def forward(ctx, input, target, gamma=2.0, alpha=0.25):
ctx.save_for_backward(input, target)
num_classes = input.shape[1]
ctx.num_classes = num_classes
ctx.gamma = gamma
ctx.alpha = alpha
loss = sigmoid_focal_loss_cuda.forward(input, target, num_classes,
gamma, alpha)
return loss
@staticmethod
@once_differentiable
def backward(ctx, d_loss):
input, target = ctx.saved_tensors
num_classes = ctx.num_classes
gamma = ctx.gamma
alpha = ctx.alpha
d_loss = d_loss.contiguous()
d_input = sigmoid_focal_loss_cuda.backward(input, target, d_loss,
num_classes, gamma, alpha)
return d_input, None, None, None, None
sigmoid_focal_loss = SigmoidFocalLossFunction.apply
|
Cream/CDARTS/CDARTS_detection/mmdet/ops/sigmoid_focal_loss/functions/sigmoid_focal_loss.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_detection/mmdet/ops/sigmoid_focal_loss/functions/sigmoid_focal_loss.py",
"repo_id": "Cream",
"token_count": 516
}
| 284 |
import os
import argparse
import os.path as osp
import torch
import torch.distributed as dist
import shutil
import tempfile
import mmcv
from mmcv.runner import load_checkpoint, get_dist_info
from mmcv.parallel import MMDataParallel, MMDistributedDataParallel
from mmdet.apis import init_dist
from mmdet.core import results2json, coco_eval, wrap_fp16_model
from mmdet.datasets import build_dataloader, build_dataset
from mmdet.models import build_detector
def single_gpu_test(model, data_loader, show=False):
model.eval()
results = []
dataset = data_loader.dataset
prog_bar = mmcv.ProgressBar(len(dataset))
for i, data in enumerate(data_loader):
with torch.no_grad():
result = model(return_loss=False, rescale=not show, **data)
results.append(result)
if show:
model.module.show_result(data, result, dataset.img_norm_cfg)
batch_size = data['img'][0].size(0)
for _ in range(0, batch_size, 100):
prog_bar.update()
return results
def multi_gpu_test(model, data_loader, tmpdir=None):
model.eval()
results = []
dataset = data_loader.dataset
rank, world_size = get_dist_info()
if rank == 0:
prog_bar = mmcv.ProgressBar(len(dataset))
for i, data in enumerate(data_loader):
with torch.no_grad():
result = model(return_loss=False, rescale=True, **data)
results.append(result)
if rank == 0:
batch_size = data['img'][0].size(0)
for _ in range(0, batch_size * world_size, 500):
prog_bar.update()
# collect results from all ranks
results = collect_results(results, len(dataset), tmpdir)
return results
def collect_results(result_part, size, tmpdir=None):
rank, world_size = get_dist_info()
# create a tmp dir if it is not specified
if tmpdir is None:
MAX_LEN = 512
# 32 is whitespace
dir_tensor = torch.full((MAX_LEN, ),
32,
dtype=torch.uint8,
device='cuda')
if rank == 0:
tmpdir = tempfile.mkdtemp()
tmpdir = torch.tensor(
bytearray(tmpdir.encode()), dtype=torch.uint8, device='cuda')
dir_tensor[:len(tmpdir)] = tmpdir
dist.broadcast(dir_tensor, 0)
tmpdir = dir_tensor.cpu().numpy().tobytes().decode().rstrip()
else:
mmcv.mkdir_or_exist(tmpdir)
# dump the part result to the dir
mmcv.dump(result_part, osp.join(tmpdir, 'part_{}.pkl'.format(rank)))
dist.barrier()
# collect all parts
if rank != 0:
return None
else:
# load results of all parts from tmp dir
part_list = []
for i in range(world_size):
part_file = osp.join(tmpdir, 'part_{}.pkl'.format(i))
part_list.append(mmcv.load(part_file))
# sort the results
ordered_results = []
for res in zip(*part_list):
ordered_results.extend(list(res))
# the dataloader may pad some samples
ordered_results = ordered_results[:size]
# remove tmp dir
shutil.rmtree(tmpdir)
return ordered_results
def parse_args():
parser = argparse.ArgumentParser(description='MMDet test detector')
parser.add_argument('--config', help='test config file path')
parser.add_argument('--checkpoint', help='checkpoint file')
parser.add_argument('--out', help='output result file')
parser.add_argument(
'--eval',
type=str,
nargs='+',
choices=['proposal', 'proposal_fast', 'bbox', 'segm', 'keypoints'],
help='eval types')
parser.add_argument('--show', action='store_true', help='show results')
parser.add_argument('--tmpdir', help='tmp dir for writing some results')
parser.add_argument(
'--launcher',
choices=['none', 'pytorch', 'slurm', 'mpi'],
default='none',
help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
args, unparsed = parser.parse_known_args()
if 'LOCAL_RANK' not in os.environ:
os.environ['LOCAL_RANK'] = str(args.local_rank)
return args
def main():
args = parse_args()
if args.out is not None and not args.out.endswith(('.pkl', '.pickle')):
raise ValueError('The output file must be a pkl file.')
cfg = mmcv.Config.fromfile(args.config)
# set cudnn_benchmark
if cfg.get('cudnn_benchmark', False):
torch.backends.cudnn.benchmark = True
cfg.model.pretrained = None
cfg.data.test.test_mode = True
# init distributed env first, since logger depends on the dist info.
if args.launcher == 'none':
distributed = False
else:
distributed = True
init_dist(args.launcher, **cfg.dist_params)
# build the dataloader
# TODO: support multiple images per gpu (only minor changes are needed)
dataset = build_dataset(cfg.data.test)
data_loader = build_dataloader(
dataset,
imgs_per_gpu=1,
workers_per_gpu=cfg.data.workers_per_gpu,
dist=distributed,
shuffle=False)
# build the model and load checkpoint
model = build_detector(cfg.model, train_cfg=None, test_cfg=cfg.test_cfg)
fp16_cfg = cfg.get('fp16', None)
if fp16_cfg is not None:
wrap_fp16_model(model)
checkpoint = load_checkpoint(model, args.checkpoint, map_location='cpu')
# old versions did not save class info in checkpoint, this walkaround is
# for backward compatibility
if 'CLASSES' in checkpoint['meta']:
model.CLASSES = checkpoint['meta']['CLASSES']
else:
model.CLASSES = dataset.CLASSES
if not distributed:
model = MMDataParallel(model, device_ids=[0])
outputs = single_gpu_test(model, data_loader, args.show)
else:
model = MMDistributedDataParallel(model.cuda())
outputs = multi_gpu_test(model, data_loader, args.tmpdir)
rank, _ = get_dist_info()
if args.out and rank == 0:
print('\nwriting results to {}'.format(args.out))
mmcv.dump(outputs, args.out)
eval_types = args.eval
if eval_types:
print('Starting evaluate {}'.format(' and '.join(eval_types)))
if eval_types == ['proposal_fast']:
result_file = args.out
coco_eval(result_file, eval_types, dataset.coco)
else:
if not isinstance(outputs[0], dict):
result_files = results2json(dataset, outputs, args.out)
coco_eval(result_files, eval_types, dataset.coco)
else:
for name in outputs[0]:
print('\nEvaluating {}'.format(name))
outputs_ = [out[name] for out in outputs]
result_file = args.out + '.{}'.format(name)
result_files = results2json(dataset, outputs_,
result_file)
coco_eval(result_files, eval_types, dataset.coco)
if __name__ == '__main__':
main()
|
Cream/CDARTS/CDARTS_detection/test.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_detection/test.py",
"repo_id": "Cream",
"token_count": 3205
}
| 285 |
import math
import torch
import random
import numpy as np
import torch.nn as nn
from numpy import int64 as int64
import torchvision.transforms as transforms
from PIL import Image, ImageOps, ImageFilter
class Normalize(object):
"""Normalize a tensor image with mean and standard deviation.
Args:
mean (tuple): means for each channel.
std (tuple): standard deviations for each channel.
"""
def __init__(self, mean=(0., 0., 0.), std=(1., 1., 1.)):
self.mean = mean
self.std = std
def __call__(self, sample):
img = sample['image']
mask = sample['label']
img = np.array(img).astype(np.float32)
mask = np.array(mask).astype(np.float32)
img /= 255.0
img -= self.mean
img /= self.std
return {'image': img,
'label': mask}
class ToTensor(object):
"""Convert ndarrays in sample to Tensors."""
def __call__(self, sample):
# swap color axis because
# numpy image: H x W x C
# torch image: C X H X W
img = sample['image']
mask = sample['label']
img = np.array(img).astype(np.float32).transpose((2, 0, 1))
mask = np.array(mask).astype(np.float32)
img = torch.from_numpy(img).float()
mask = torch.from_numpy(mask).float()
return {'image': img,
'label': mask}
class RandomHorizontalFlip(object):
def __call__(self, sample):
img = sample['image']
mask = sample['label']
if random.random() < 0.5:
img = img.transpose(Image.FLIP_LEFT_RIGHT)
mask = mask.transpose(Image.FLIP_LEFT_RIGHT)
return {'image': img,
'label': mask}
class RandomRotate(object):
def __init__(self, degree):
self.degree = degree
def __call__(self, sample):
img = sample['image']
mask = sample['label']
rotate_degree = random.uniform(-1 * self.degree, self.degree)
img = img.rotate(rotate_degree, Image.BILINEAR)
mask = mask.rotate(rotate_degree, Image.NEAREST)
return {'image': img,
'label': mask}
class RandomGaussianBlur(object):
def __call__(self, sample):
img = sample['image']
mask = sample['label']
if random.random() < 0.5:
img = img.filter(ImageFilter.GaussianBlur(
radius=random.random()))
return {'image': img,
'label': mask}
class RandomScaleCrop(object):
def __init__(self, base_size, crop_size, fill=0):
self.base_size = base_size
self.crop_size = crop_size
self.fill = fill
def __call__(self, sample):
img = sample['image']
mask = sample['label']
# random scale (short edge)
short_size = random.randint(int(self.base_size * 0.5), int(self.base_size * 2.0))
w, h = img.size
if h > w:
ow = short_size
oh = int(1.0 * h * ow / w)
else:
oh = short_size
ow = int(1.0 * w * oh / h)
img = img.resize((ow, oh), Image.BILINEAR)
mask = mask.resize((ow, oh), Image.NEAREST)
# pad crop
if short_size < self.crop_size:
padh = self.crop_size - oh if oh < self.crop_size else 0
padw = self.crop_size - ow if ow < self.crop_size else 0
img = ImageOps.expand(img, border=(0, 0, padw, padh), fill=0)
mask = ImageOps.expand(mask, border=(0, 0, padw, padh), fill=self.fill)
# random crop crop_size
w, h = img.size
x1 = random.randint(0, w - self.crop_size)
y1 = random.randint(0, h - self.crop_size)
img = img.crop((x1, y1, x1 + self.crop_size, y1 + self.crop_size))
mask = mask.crop((x1, y1, x1 + self.crop_size, y1 + self.crop_size))
return {'image': img,
'label': mask}
class FixScaleCrop(object):
def __init__(self, crop_size):
self.crop_size = crop_size
def __call__(self, sample):
img = sample['image']
mask = sample['label']
w, h = img.size
if w > h:
oh = self.crop_size
ow = int(1.0 * w * oh / h)
else:
ow = self.crop_size
oh = int(1.0 * h * ow / w)
img = img.resize((ow, oh), Image.BILINEAR)
mask = mask.resize((ow, oh), Image.NEAREST)
# center crop
w, h = img.size
x1 = int(round((w - self.crop_size) / 2.))
y1 = int(round((h - self.crop_size) / 2.))
img = img.crop((x1, y1, x1 + self.crop_size, y1 + self.crop_size))
mask = mask.crop((x1, y1, x1 + self.crop_size, y1 + self.crop_size))
return {'image': img,
'label': mask}
# resize to 512*1024
class FixedResize(object):
"""change the short edge length to size"""
def __init__(self, resize=512):
self.size1 = resize # size= 512
def __call__(self, sample):
img = sample['image']
mask = sample['label']
assert img.size == mask.size
w, h = img.size
if w > h:
oh = self.size1
ow = int(1.0 * w * oh / h)
else:
ow = self.size1
oh = int(1.0 * h * ow / w)
img = img.resize((ow, oh), Image.BILINEAR)
mask = mask.resize((ow, oh), Image.NEAREST)
return {'image': img,
'label': mask}
# random crop 321*321
class RandomCrop(object):
def __init__(self, crop_size=320):
self.crop_size = crop_size
def __call__(self, sample):
img = sample['image']
mask = sample['label']
w, h = img.size
x1 = random.randint(0, w - self.crop_size)
y1 = random.randint(0, h - self.crop_size)
img = img.crop((x1, y1, x1 + self.crop_size, y1 + self.crop_size))
mask = mask.crop((x1, y1, x1 + self.crop_size, y1 + self.crop_size))
return {'image': img,
'label': mask}
class RandomScale(object):
def __init__(self, scales=(1,)):
self.scales = scales
def __call__(self, sample):
img = sample['image']
mask = sample['label']
w, h = img.size
scale = random.choice(self.scales)
w, h = int(w * scale), int(h * scale)
return {'image': img,
'label': mask}
class Retrain_Preprocess(object):
def __init__(self, flip_prob, scale_range, crop, mean, std):
self.flip_prob = flip_prob
self.scale_range = scale_range
self.crop = crop
self.data_transforms = transforms.Compose([transforms.ToTensor(),
transforms.Normalize(mean=mean, std=std)])
def __call__(self, sample):
if self.flip_prob is not None and random.random() < self.flip_prob:
sample['image'] = sample['image'].transpose(Image.FLIP_LEFT_RIGHT)
sample['label'] = sample['label'].transpose(Image.FLIP_LEFT_RIGHT)
if self.scale_range is not None:
w, h = sample['image'].size
rand_log_scale = math.log(self.scale_range[0], 2) + random.random() * \
(math.log(self.scale_range[1], 2) - math.log(self.scale_range[0], 2))
random_scale = math.pow(2, rand_log_scale)
new_size = (int(round(w * random_scale)), int(round(h * random_scale)))
sample['image'] = sample['image'].resize(new_size, Image.ANTIALIAS)
sample['label'] = sample['label'].resize(new_size, Image.NEAREST)
sample['image'] = self.data_transforms(sample['image'])
sample['label'] = torch.LongTensor(np.array(sample['label']).astype(int64))
if self.crop:
image, mask = sample['image'], sample['label']
h, w = image.shape[1], image.shape[2]
pad_tb = max(0, self.crop[0] - h)
pad_lr = max(0, self.crop[1] - w)
image = nn.ZeroPad2d((0, pad_lr, 0, pad_tb))(image)
mask = nn.ConstantPad2d((0, pad_lr, 0, pad_tb), 255)(mask)
h, w = image.shape[1], image.shape[2]
i = random.randint(0, h - self.crop[0])
j = random.randint(0, w - self.crop[1])
sample['image'] = image[:, i:i + self.crop[0], j:j + self.crop[1]]
sample['label'] = mask[i:i + self.crop[0], j:j + self.crop[1]]
return sample
class transform_tr(object):
def __init__(self, args, mean, std):
if args.multi_scale is None:
self.composed_transforms = transforms.Compose([
FixedResize(resize=args.resize),
RandomCrop(crop_size=args.crop_size),
# tr.RandomScaleCrop(base_size=self.args.base_size, crop_size=self.args.crop_size, fill=255),
# tr.RandomGaussianBlur(),
Normalize(mean, std),
ToTensor()])
else:
self.composed_transforms = transforms.Compose([
FixedResize(resize=args.resize),
RandomScale(scales=args.multi_scale),
RandomCrop(crop_size=args.crop_size),
# tr.RandomScaleCrop(base_size=self.args.base_size, crop_size=self.args.crop_size, fill=255),
# tr.RandomGaussianBlur(),
Normalize(mean, std),
ToTensor()])
def __call__(self, sample):
return self.composed_transforms(sample)
class transform_val(object):
def __init__(self, args, mean, std):
self.composed_transforms = transforms.Compose([
FixedResize(resize=args.resize),
FixScaleCrop(crop_size=args.crop_size), # TODO:CHECK THIS
Normalize(mean, std),
ToTensor()])
def __call__(self, sample):
return self.composed_transforms(sample)
class transform_val(object):
def __init__(self, args, mean, std):
self.composed_transforms = transforms.Compose([
FixedResize(resize=args.crop_size),
Normalize(mean, std),
ToTensor()])
def __call__(self, sample):
return self.composed_transforms(sample)
class transform_ts(object):
def __init__(self, args, mean, std):
self.composed_transforms = transforms.Compose([
FixedResize(resize=args.crop_size),
Normalize(mean, std),
ToTensor()])
def __call__(self, sample):
return self.composed_transforms(sample)
class transform_retr(object):
def __init__(self, args, mean, std):
crop_size = (args.crop_size, args.crop_size) if isinstance(args.crop_size, int) else args.crop_size
self.composed_transforms = Retrain_Preprocess(0.5, (0.5, 2), crop_size, mean, std)
def __call__(self, sample):
return self.composed_transforms(sample)
class transform_reval(object): # we use multi_scale evaluate in evaluate.py so dont need resize in dataset
def __init__(self, args, mean, std):
self.composed_transforms = Retrain_Preprocess(None, None, None, mean, std)
def __call__(self, sample):
return self.composed_transforms(sample)
|
Cream/CDARTS/CDARTS_segmentation/dataloaders/custom_transforms.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_segmentation/dataloaders/custom_transforms.py",
"repo_id": "Cream",
"token_count": 5277
}
| 286 |
# ------------------------------------------------------------------------------
# Builds transformation for both image and labels.
# Written by Bowen Cheng ([email protected])
# ------------------------------------------------------------------------------
from . import transforms as T
def build_transforms(dataset, is_train=True):
if is_train:
min_scale = dataset.min_scale
max_scale = dataset.max_scale
scale_step_size = dataset.scale_step_size
crop_h = dataset.crop_h
crop_w = dataset.crop_w
pad_value = dataset.pad_value
ignore_label = dataset.label_pad_value
flip_prob = 0.5 if dataset.mirror else 0
mean = dataset.mean
std = dataset.std
else:
# no data augmentation
min_scale = 1
max_scale = 1
scale_step_size = 0
flip_prob = 0
crop_h = dataset.crop_h
crop_w = dataset.crop_w
pad_value = dataset.pad_value
ignore_label = dataset.label_pad_value
mean = dataset.mean
std = dataset.std
transforms = T.Compose(
[
T.RandomScale(
min_scale,
max_scale,
scale_step_size
),
T.RandomCrop(
crop_h,
crop_w,
pad_value,
ignore_label,
random_pad=is_train
),
T.RandomHorizontalFlip(flip_prob),
T.ToTensor(),
T.Normalize(
mean,
std
)
]
)
return transforms
|
Cream/CDARTS/CDARTS_segmentation/dataloaders/transforms/build.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_segmentation/dataloaders/transforms/build.py",
"repo_id": "Cream",
"token_count": 789
}
| 287 |
from .distributed_sampler import TrainingSampler, InferenceSampler
|
Cream/CDARTS/CDARTS_segmentation/segmentation/data/samplers/__init__.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_segmentation/segmentation/data/samplers/__init__.py",
"repo_id": "Cream",
"token_count": 17
}
| 288 |
# ------------------------------------------------------------------------------
# Reference: https://github.com/pytorch/vision/blob/master/torchvision/models/mnasnet.py
# Modified by Bowen Cheng ([email protected])
# ------------------------------------------------------------------------------
import math
import warnings
import torch
import torch.nn as nn
from torchvision.models.utils import load_state_dict_from_url
__all__ = ['MNASNet', 'mnasnet0_5', 'mnasnet0_75', 'mnasnet1_0', 'mnasnet1_3']
_MODEL_URLS = {
"mnasnet0_5":
"https://download.pytorch.org/models/mnasnet0.5_top1_67.823-3ffadce67e.pth",
"mnasnet0_75": None,
"mnasnet1_0":
"https://download.pytorch.org/models/mnasnet1.0_top1_73.512-f206786ef8.pth",
"mnasnet1_3": None
}
# Paper suggests 0.9997 momentum, for TensorFlow. Equivalent PyTorch momentum is
# 1.0 - tensorflow.
_BN_MOMENTUM = 1 - 0.9997
class _InvertedResidual(nn.Module):
def __init__(self, in_ch, out_ch, kernel_size, stride, expansion_factor,
bn_momentum=0.1):
super(_InvertedResidual, self).__init__()
assert stride in [1, 2]
assert kernel_size in [3, 5]
mid_ch = in_ch * expansion_factor
self.apply_residual = (in_ch == out_ch and stride == 1)
self.layers = nn.Sequential(
# Pointwise
nn.Conv2d(in_ch, mid_ch, 1, bias=False),
nn.BatchNorm2d(mid_ch, momentum=bn_momentum),
nn.ReLU(inplace=True),
# Depthwise
nn.Conv2d(mid_ch, mid_ch, kernel_size, padding=kernel_size // 2,
stride=stride, groups=mid_ch, bias=False),
nn.BatchNorm2d(mid_ch, momentum=bn_momentum),
nn.ReLU(inplace=True),
# Linear pointwise. Note that there's no activation.
nn.Conv2d(mid_ch, out_ch, 1, bias=False),
nn.BatchNorm2d(out_ch, momentum=bn_momentum))
def forward(self, input):
if self.apply_residual:
return self.layers(input) + input
else:
return self.layers(input)
def _stack(in_ch, out_ch, kernel_size, stride, exp_factor, repeats,
bn_momentum):
""" Creates a stack of inverted residuals. """
assert repeats >= 1
# First one has no skip, because feature map size changes.
first = _InvertedResidual(in_ch, out_ch, kernel_size, stride, exp_factor,
bn_momentum=bn_momentum)
remaining = []
for _ in range(1, repeats):
remaining.append(
_InvertedResidual(out_ch, out_ch, kernel_size, 1, exp_factor,
bn_momentum=bn_momentum))
return nn.Sequential(first, *remaining)
def _round_to_multiple_of(val, divisor, round_up_bias=0.9):
""" Asymmetric rounding to make `val` divisible by `divisor`. With default
bias, will round up, unless the number is no more than 10% greater than the
smaller divisible value, i.e. (83, 8) -> 80, but (84, 8) -> 88. """
assert 0.0 < round_up_bias < 1.0
new_val = max(divisor, int(val + divisor / 2) // divisor * divisor)
return new_val if new_val >= round_up_bias * val else new_val + divisor
def _get_depths(alpha):
""" Scales tensor depths as in reference MobileNet code, prefers rouding up
rather than down. """
depths = [32, 16, 24, 40, 80, 96, 192, 320]
return [_round_to_multiple_of(depth * alpha, 8) for depth in depths]
class MNASNet(torch.nn.Module):
""" MNASNet, as described in https://arxiv.org/pdf/1807.11626.pdf. This
implements the B1 variant of the model.
>>> model = MNASNet(1000, 1.0)
>>> x = torch.rand(1, 3, 224, 224)
>>> y = model(x)
>>> y.dim()
1
>>> y.nelement()
1000
"""
# Version 2 adds depth scaling in the initial stages of the network.
_version = 2
def __init__(self, alpha, num_classes=1000, dropout=0.2):
super(MNASNet, self).__init__()
assert alpha > 0.0
self.alpha = alpha
self.num_classes = num_classes
depths = _get_depths(alpha)
layers = [
# First layer: regular conv.
nn.Conv2d(3, depths[0], 3, padding=1, stride=2, bias=False),
nn.BatchNorm2d(depths[0], momentum=_BN_MOMENTUM),
nn.ReLU(inplace=True),
# Depthwise separable, no skip.
nn.Conv2d(depths[0], depths[0], 3, padding=1, stride=1,
groups=depths[0], bias=False),
nn.BatchNorm2d(depths[0], momentum=_BN_MOMENTUM),
nn.ReLU(inplace=True),
nn.Conv2d(depths[0], depths[1], 1, padding=0, stride=1, bias=False),
nn.BatchNorm2d(depths[1], momentum=_BN_MOMENTUM),
# MNASNet blocks: stacks of inverted residuals.
_stack(depths[1], depths[2], 3, 2, 3, 3, _BN_MOMENTUM),
_stack(depths[2], depths[3], 5, 2, 3, 3, _BN_MOMENTUM),
_stack(depths[3], depths[4], 5, 2, 6, 3, _BN_MOMENTUM),
_stack(depths[4], depths[5], 3, 1, 6, 2, _BN_MOMENTUM),
_stack(depths[5], depths[6], 5, 2, 6, 4, _BN_MOMENTUM),
_stack(depths[6], depths[7], 3, 1, 6, 1, _BN_MOMENTUM),
# Final mapping to classifier input.
# nn.Conv2d(depths[7], 1280, 1, padding=0, stride=1, bias=False),
# nn.BatchNorm2d(1280, momentum=_BN_MOMENTUM),
# nn.ReLU(inplace=True),
]
self.layers = nn.Sequential(*layers)
# self.classifier = nn.Sequential(nn.Dropout(p=dropout, inplace=True),
# nn.Linear(1280, num_classes))
self._initialize_weights()
def forward(self, x):
outputs = {}
for i, module in enumerate(self.layers):
x = module(x)
outputs['layer_%d' % (i + 1)] = x
return outputs
# x = self.layers(x)
# # Equivalent to global avgpool and removing H and W dimensions.
# x = x.mean([2, 3])
# return self.classifier(x)
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode="fan_out",
nonlinearity="relu")
if m.bias is not None:
nn.init.zeros_(m.bias)
elif isinstance(m, nn.BatchNorm2d):
nn.init.ones_(m.weight)
nn.init.zeros_(m.bias)
elif isinstance(m, nn.Linear):
nn.init.kaiming_uniform_(m.weight, mode="fan_out",
nonlinearity="sigmoid")
nn.init.zeros_(m.bias)
def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict,
missing_keys, unexpected_keys, error_msgs):
version = local_metadata.get("version", None)
assert version in [1, 2]
if version == 1 and not self.alpha == 1.0:
# In the initial version of the model (v1), stem was fixed-size.
# All other layer configurations were the same. This will patch
# the model so that it's identical to v1. Model with alpha 1.0 is
# unaffected.
depths = _get_depths(self.alpha)
v1_stem = [
nn.Conv2d(3, 32, 3, padding=1, stride=2, bias=False),
nn.BatchNorm2d(32, momentum=_BN_MOMENTUM),
nn.ReLU(inplace=True),
nn.Conv2d(32, 32, 3, padding=1, stride=1, groups=32,
bias=False),
nn.BatchNorm2d(32, momentum=_BN_MOMENTUM),
nn.ReLU(inplace=True),
nn.Conv2d(32, 16, 1, padding=0, stride=1, bias=False),
nn.BatchNorm2d(16, momentum=_BN_MOMENTUM),
_stack(16, depths[2], 3, 2, 3, 3, _BN_MOMENTUM),
]
for idx, layer in enumerate(v1_stem):
self.layers[idx] = layer
# The model is now identical to v1, and must be saved as such.
self._version = 1
warnings.warn(
"A new version of MNASNet model has been implemented. "
"Your checkpoint was saved using the previous version. "
"This checkpoint will load and work as before, but "
"you may want to upgrade by training a newer model or "
"transfer learning from an updated ImageNet checkpoint.",
UserWarning)
super(MNASNet, self)._load_from_state_dict(
state_dict, prefix, local_metadata, strict, missing_keys,
unexpected_keys, error_msgs)
def _load_pretrained(model_name, model, progress):
if model_name not in _MODEL_URLS or _MODEL_URLS[model_name] is None:
raise ValueError(
"No checkpoint is available for model type {}".format(model_name))
checkpoint_url = _MODEL_URLS[model_name]
model.load_state_dict(
load_state_dict_from_url(checkpoint_url, progress=progress), strict=False)
def mnasnet0_5(pretrained=False, progress=True, **kwargs):
"""MNASNet with depth multiplier of 0.5 from
`"MnasNet: Platform-Aware Neural Architecture Search for Mobile"
<https://arxiv.org/pdf/1807.11626.pdf>`_.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
model = MNASNet(0.5, **kwargs)
if pretrained:
_load_pretrained("mnasnet0_5", model, progress)
return model
def mnasnet0_75(pretrained=False, progress=True, **kwargs):
"""MNASNet with depth multiplier of 0.75 from
`"MnasNet: Platform-Aware Neural Architecture Search for Mobile"
<https://arxiv.org/pdf/1807.11626.pdf>`_.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
model = MNASNet(0.75, **kwargs)
if pretrained:
_load_pretrained("mnasnet0_75", model, progress)
return model
def mnasnet1_0(pretrained=False, progress=True, **kwargs):
"""MNASNet with depth multiplier of 1.0 from
`"MnasNet: Platform-Aware Neural Architecture Search for Mobile"
<https://arxiv.org/pdf/1807.11626.pdf>`_.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
model = MNASNet(1.0, **kwargs)
if pretrained:
_load_pretrained("mnasnet1_0", model, progress)
return model
def mnasnet1_3(pretrained=False, progress=True, **kwargs):
"""MNASNet with depth multiplier of 1.3 from
`"MnasNet: Platform-Aware Neural Architecture Search for Mobile"
<https://arxiv.org/pdf/1807.11626.pdf>`_.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
model = MNASNet(1.3, **kwargs)
if pretrained:
_load_pretrained("mnasnet1_3", model, progress)
return model
if __name__ == '__main__':
import torch
model = mnasnet0_5(pretrained=False)
print(model)
data = torch.zeros(1, 3, 224, 224)
results = model.forward(data)
for key in results.keys():
print(key, results[key].size())
|
Cream/CDARTS/CDARTS_segmentation/segmentation/model/backbone/mnasnet.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_segmentation/segmentation/model/backbone/mnasnet.py",
"repo_id": "Cream",
"token_count": 5326
}
| 289 |
# ------------------------------------------------------------------------------
# DeepLabV3+ meta architecture.
# Written by Bowen Cheng ([email protected])
# ------------------------------------------------------------------------------
from collections import OrderedDict
import torch
from torch import nn
from .base import BaseSegmentationModel
from segmentation.model.decoder import DeepLabV3PlusDecoder
from segmentation.utils import AverageMeter
__all__ = ["DeepLabV3Plus"]
class DeepLabV3Plus(BaseSegmentationModel):
"""
Implements DeepLabV3+ model from
`"Encoder-Decoder with Atrous Separable Convolution for Semantic Image Segmentation"
<https://arxiv.org/abs/1802.02611>`_.
Arguments:
backbone (nn.Module): the network used to compute the features for the model.
The backbone should return an OrderedDict[Tensor], with the key being
"out" for the last feature map used, and "aux" if an auxiliary classifier
is used.
in_channels (int): number of input channels from the backbone
feature_key (str): name of input feature from backbone
low_level_channels (int): channels of low-level features
low_level_key (str): name of low-level features used in decoder
low_level_channels_project (int): channels of low-level features after projection in decoder
decoder_channels (int): number of channels in decoder
atrous_rates (tuple): atrous rates for ASPP
num_classes (int): number of classes
semantic_loss (nn.Module): loss function
semantic_loss_weight (float): loss weight
"""
def __init__(self, backbone, in_channels, feature_key, low_level_channels, low_level_key,
low_level_channels_project, decoder_channels, atrous_rates, num_classes,
semantic_loss, semantic_loss_weight, **kwargs):
decoder = DeepLabV3PlusDecoder(in_channels, feature_key, low_level_channels, low_level_key,
low_level_channels_project, decoder_channels, atrous_rates, num_classes)
super(DeepLabV3Plus, self).__init__(backbone, decoder)
self.semantic_loss = semantic_loss
self.semantic_loss_weight = semantic_loss_weight
self.loss_meter_dict = OrderedDict()
self.loss_meter_dict['Loss'] = AverageMeter()
# Initialize parameters.
self._init_params()
def loss(self, results, targets=None):
batch_size = results['semantic'].size(0)
if targets is not None:
semantic_loss = self.semantic_loss(results['semantic'], targets['semantic']) * self.semantic_loss_weight
self.loss_meter_dict['Loss'].update(semantic_loss.detach().cpu().item(), batch_size)
results['loss'] = semantic_loss
return results
|
Cream/CDARTS/CDARTS_segmentation/segmentation/model/meta_arch/deeplabv3plus.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_segmentation/segmentation/model/meta_arch/deeplabv3plus.py",
"repo_id": "Cream",
"token_count": 1035
}
| 290 |
# ------------------------------------------------------------------------------
# Saves output to png image for visualization.
# Reference: https://github.com/tensorflow/models/blob/master/research/deeplab/utils/save_annotation.py
# Reference: https://github.com/facebookresearch/detectron2/blob/master/detectron2/utils/colormap.py
# Written by Bowen Cheng ([email protected])
# ------------------------------------------------------------------------------
import numpy as np
import PIL.Image as img
from PIL import ImageDraw
from .flow_vis import flow_compute_color
# Refence: https://github.com/facebookresearch/detectron2/blob/master/detectron2/utils/colormap.py#L14
_COLORS = np.array(
[
0.000, 0.447, 0.741,
0.850, 0.325, 0.098,
0.929, 0.694, 0.125,
0.494, 0.184, 0.556,
0.466, 0.674, 0.188,
0.301, 0.745, 0.933,
0.635, 0.078, 0.184,
0.300, 0.300, 0.300,
0.600, 0.600, 0.600,
1.000, 0.000, 0.000,
1.000, 0.500, 0.000,
0.749, 0.749, 0.000,
0.000, 1.000, 0.000,
0.000, 0.000, 1.000,
0.667, 0.000, 1.000,
0.333, 0.333, 0.000,
0.333, 0.667, 0.000,
0.333, 1.000, 0.000,
0.667, 0.333, 0.000,
0.667, 0.667, 0.000,
0.667, 1.000, 0.000,
1.000, 0.333, 0.000,
1.000, 0.667, 0.000,
1.000, 1.000, 0.000,
0.000, 0.333, 0.500,
0.000, 0.667, 0.500,
0.000, 1.000, 0.500,
0.333, 0.000, 0.500,
0.333, 0.333, 0.500,
0.333, 0.667, 0.500,
0.333, 1.000, 0.500,
0.667, 0.000, 0.500,
0.667, 0.333, 0.500,
0.667, 0.667, 0.500,
0.667, 1.000, 0.500,
1.000, 0.000, 0.500,
1.000, 0.333, 0.500,
1.000, 0.667, 0.500,
1.000, 1.000, 0.500,
0.000, 0.333, 1.000,
0.000, 0.667, 1.000,
0.000, 1.000, 1.000,
0.333, 0.000, 1.000,
0.333, 0.333, 1.000,
0.333, 0.667, 1.000,
0.333, 1.000, 1.000,
0.667, 0.000, 1.000,
0.667, 0.333, 1.000,
0.667, 0.667, 1.000,
0.667, 1.000, 1.000,
1.000, 0.000, 1.000,
1.000, 0.333, 1.000,
1.000, 0.667, 1.000,
0.333, 0.000, 0.000,
0.500, 0.000, 0.000,
0.667, 0.000, 0.000,
0.833, 0.000, 0.000,
1.000, 0.000, 0.000,
0.000, 0.167, 0.000,
0.000, 0.333, 0.000,
0.000, 0.500, 0.000,
0.000, 0.667, 0.000,
0.000, 0.833, 0.000,
0.000, 1.000, 0.000,
0.000, 0.000, 0.167,
0.000, 0.000, 0.333,
0.000, 0.000, 0.500,
0.000, 0.000, 0.667,
0.000, 0.000, 0.833,
0.000, 0.000, 1.000,
0.000, 0.000, 0.000,
0.143, 0.143, 0.143,
0.857, 0.857, 0.857,
1.000, 1.000, 1.000
]
).astype(np.float32).reshape(-1, 3)
def random_color(rgb=False, maximum=255):
"""
Reference: https://github.com/facebookresearch/detectron2/blob/master/detectron2/utils/colormap.py#L111
Args:
rgb (bool): whether to return RGB colors or BGR colors.
maximum (int): either 255 or 1
Returns:
ndarray: a vector of 3 numbers
"""
idx = np.random.randint(0, len(_COLORS))
ret = _COLORS[idx] * maximum
if not rgb:
ret = ret[::-1]
return ret
def save_annotation(label,
save_dir,
filename,
add_colormap=True,
normalize_to_unit_values=False,
scale_values=False,
colormap=None,
image=None):
"""Saves the given label to image on disk.
Args:
label: The numpy array to be saved. The data will be converted
to uint8 and saved as png image.
save_dir: String, the directory to which the results will be saved.
filename: String, the image filename.
add_colormap: Boolean, add color map to the label or not.
normalize_to_unit_values: Boolean, normalize the input values to [0, 1].
scale_values: Boolean, scale the input values to [0, 255] for visualization.
colormap: A colormap for visualizing segmentation results.
image: merge label with image if provided
"""
# Add colormap for visualizing the prediction.
if add_colormap:
colored_label = label_to_color_image(label, colormap)
else:
colored_label = label
if normalize_to_unit_values:
min_value = np.amin(colored_label)
max_value = np.amax(colored_label)
range_value = max_value - min_value
if range_value != 0:
colored_label = (colored_label - min_value) / range_value
if scale_values:
colored_label = 255. * colored_label
if image is not None:
colored_label = 0.5 * colored_label + 0.5 * image
pil_image = img.fromarray(colored_label.astype(dtype=np.uint8))
with open('%s/%s.png' % (save_dir, filename), mode='wb') as f:
pil_image.save(f, 'PNG')
def label_to_color_image(label, colormap=None):
"""Adds color defined by the dataset colormap to the label.
Args:
label: A 2D array with integer type, storing the segmentation label.
colormap: A colormap for visualizing segmentation results.
Returns:
result: A 2D array with floating type. The element of the array
is the color indexed by the corresponding element in the input label
to the dataset color map.
Raises:
ValueError: If label is not of rank 2 or its value is larger than color
map maximum entry.
"""
if label.ndim != 2:
raise ValueError('Expect 2-D input label. Got {}'.format(label.shape))
if colormap is None:
raise ValueError('Expect a valid colormap.')
return colormap[label]
def save_instance_annotation(label,
save_dir,
filename,
stuff_id=0,
image=None):
"""Saves the given label to image on disk.
Args:
label: The numpy array to be saved. The data will be converted
to uint8 and saved as png image.
save_dir: String, the directory to which the results will be saved.
filename: String, the image filename.
stuff_id: Integer, id that not want to plot.
image: merge label with image if provided
"""
# Add colormap for visualizing the prediction.
ids = np.unique(label)
num_colors = len(ids)
colormap = np.zeros((num_colors, 3), dtype=np.uint8)
# Maps label to continuous value.
for i in range(num_colors):
label[label == ids[i]] = i
colormap[i, :] = random_color(rgb=True, maximum=255)
if ids[i] == stuff_id:
colormap[i, :] = np.array([0, 0, 0])
colored_label = colormap[label]
if image is not None:
colored_label = 0.5 * colored_label + 0.5 * image
pil_image = img.fromarray(colored_label.astype(dtype=np.uint8))
with open('%s/%s.png' % (save_dir, filename), mode='wb') as f:
pil_image.save(f, 'PNG')
def save_panoptic_annotation(label,
save_dir,
filename,
label_divisor,
colormap=None,
image=None):
"""Saves the given label to image on disk.
Args:
label: The numpy array to be saved. The data will be converted
to uint8 and saved as png image.
save_dir: String, the directory to which the results will be saved.
filename: String, the image filename.
label_divisor: An Integer, used to convert panoptic id = semantic id * label_divisor + instance_id.
colormap: A colormap for visualizing segmentation results.
image: merge label with image if provided
"""
if colormap is None:
raise ValueError('Expect a valid colormap.')
# Add colormap to label.
colored_label = np.zeros((label.shape[0], label.shape[1], 3), dtype=np.uint8)
taken_colors = set([0, 0, 0])
def _random_color(base, max_dist=30):
new_color = base + np.random.randint(low=-max_dist,
high=max_dist + 1,
size=3)
return tuple(np.maximum(0, np.minimum(255, new_color)))
for lab in np.unique(label):
mask = label == lab
base_color = colormap[lab // label_divisor]
if tuple(base_color) not in taken_colors:
taken_colors.add(tuple(base_color))
color = base_color
else:
while True:
color = _random_color(base_color)
if color not in taken_colors:
taken_colors.add(color)
break
colored_label[mask] = color
if image is not None:
colored_label = 0.5 * colored_label + 0.5 * image
pil_image = img.fromarray(colored_label.astype(dtype=np.uint8))
with open('%s/%s.png' % (save_dir, filename), mode='wb') as f:
pil_image.save(f, 'PNG')
def save_center_image(image,
center_points,
save_dir,
filename,
radius=3):
"""Saves image with center points.
Args:
image: The image.
center_points: List of tuple [(y, x)], center point coordinates.
save_dir: String, the directory to which the results will be saved.
filename: String, the image filename.
radius: Int, radius of the center point.
"""
pil_image = img.fromarray(image.astype(dtype=np.uint8))
draw = ImageDraw.Draw(pil_image)
r = radius
assigned_colors = [list(random_color(rgb=True, maximum=255)) + [255] for _ in range(len(center_points))]
for i, point in enumerate(center_points):
leftUpPoint = (point[1] - r, point[0] - r)
rightDownPoint = (point[1] + r, point[0] + r)
twoPointList = [leftUpPoint, rightDownPoint]
draw.ellipse(twoPointList, fill=tuple(assigned_colors[i]))
with open('%s/%s.png' % (save_dir, filename), mode='wb') as f:
pil_image.save(f, 'PNG')
def save_heatmap_image(image,
center_heatmap,
save_dir,
filename,
ratio=0.5):
"""Saves image with heatmap.
Args:
image: The image.
center_heatmap: Ndarray, center heatmap.
save_dir: String, the directory to which the results will be saved.
filename: String, the image filename.
radio: Float, ratio to mix heatmap and image, out = ratio * heatmap + (1 - ratio) * image.
"""
center_heatmap = center_heatmap[:, :, None] * np.array([255, 0, 0]).reshape((1, 1, 3))
center_heatmap = center_heatmap.clip(0, 255)
image = ratio * center_heatmap + (1 - ratio) * image
pil_image = img.fromarray(image.astype(dtype=np.uint8))
with open('%s/%s.png' % (save_dir, filename), mode='wb') as f:
pil_image.save(f, 'PNG')
def save_heatmap_and_center_image(image,
center_heatmap,
center_points,
save_dir,
filename,
ratio=0.5,
radius=25,
binarize_heatmap=True):
"""Saves image with non-negative heatmap and center radius.
Args:
image: The image.
center_heatmap: Ndarray, center heatmap.
center_points: List of tuple [(y, x)], center point coordinates.
save_dir: String, the directory to which the results will be saved.
filename: String, the image filename.
radio: Float, ratio to mix heatmap and image, out = ratio * heatmap + (1 - ratio) * image.
radius: Int, radius of the center point.
"""
if binarize_heatmap:
center_heatmap = (center_heatmap[:, :, None] > 0) * np.array([255, 0, 0]).reshape((1, 1, 3))
else:
center_heatmap = center_heatmap[:, :, None] * np.array([255, 0, 0]).reshape((1, 1, 3))
center_heatmap = center_heatmap.clip(0, 255)
image = ratio * center_heatmap + (1 - ratio) * image
pil_image = img.fromarray(image.astype(dtype=np.uint8))
draw = ImageDraw.Draw(pil_image)
r = radius
assigned_colors = [list(random_color(rgb=True, maximum=255)) + [255] for _ in range(len(center_points))]
for i, point in enumerate(center_points):
leftUpPoint = (point[1] - r, point[0] - r)
rightDownPoint = (point[1] + r, point[0] + r)
twoPointList = [leftUpPoint, rightDownPoint]
if binarize_heatmap:
draw.ellipse(twoPointList, outline='blue')
else:
draw.ellipse(twoPointList, fill=tuple(assigned_colors[i]))
with open('%s/%s.png' % (save_dir, filename), mode='wb') as f:
pil_image.save(f, 'PNG')
def save_offset_image(offset,
save_dir,
filename):
"""Saves image with heatmap.
Args:
image: The offset to save.
save_dir: String, the directory to which the results will be saved.
filename: String, the image filename.
"""
offset_image = flow_compute_color(offset[:, :, 1], offset[:, :, 0])
pil_image = img.fromarray(offset_image.astype(dtype=np.uint8))
with open('%s/%s.png' % (save_dir, filename), mode='wb') as f:
pil_image.save(f, 'PNG')
|
Cream/CDARTS/CDARTS_segmentation/segmentation/utils/save_annotation.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_segmentation/segmentation/utils/save_annotation.py",
"repo_id": "Cream",
"token_count": 6684
}
| 291 |
import numpy as np
class Evaluator(object):
def __init__(self, num_class):
self.num_class = num_class
self.confusion_matrix = np.zeros((self.num_class,)*2)
def Pixel_Accuracy(self):
Acc = np.diag(self.confusion_matrix).sum() / self.confusion_matrix.sum()
return Acc
def Pixel_Accuracy_Class(self):
Acc = np.diag(self.confusion_matrix) / self.confusion_matrix.sum(axis=1)
Acc = np.nanmean(Acc)
return Acc
def Mean_Intersection_over_Union(self):
MIoU = np.diag(self.confusion_matrix) / (
np.sum(self.confusion_matrix, axis=1) + np.sum(self.confusion_matrix, axis=0) -
np.diag(self.confusion_matrix))
MIoU = np.nanmean(MIoU)
return MIoU
def Frequency_Weighted_Intersection_over_Union(self):
freq = np.sum(self.confusion_matrix, axis=1) / np.sum(self.confusion_matrix)
iu = np.diag(self.confusion_matrix) / (
np.sum(self.confusion_matrix, axis=1) + np.sum(self.confusion_matrix, axis=0) -
np.diag(self.confusion_matrix))
FWIoU = (freq[freq > 0] * iu[freq > 0]).sum()
return FWIoU
def _generate_matrix(self, gt_image, pre_image):
mask = (gt_image >= 0) & (gt_image < self.num_class)
label = self.num_class * gt_image[mask].astype('int') + pre_image[mask]
count = np.bincount(label, minlength=self.num_class**2)
confusion_matrix = count.reshape(self.num_class, self.num_class)
return confusion_matrix
def add_batch(self, gt_image, pre_image):
assert gt_image.shape == pre_image.shape
self.confusion_matrix += self._generate_matrix(gt_image, pre_image)
def reset(self):
self.confusion_matrix = np.zeros((self.num_class,) * 2)
|
Cream/CDARTS/CDARTS_segmentation/tools/utils/metrics.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_segmentation/tools/utils/metrics.py",
"repo_id": "Cream",
"token_count": 865
}
| 292 |
_BASE_: Base-PanopticDeepLab-OS16.yaml
MODEL:
WEIGHTS: "detectron2://DeepLab/R-52.pkl"
PIXEL_MEAN: [123.675, 116.280, 103.530]
PIXEL_STD: [58.395, 57.120, 57.375]
BACKBONE:
NAME: "build_resnet_deeplab_backbone"
RESNETS:
DEPTH: 50
NORM: "SyncBN"
RES5_MULTI_GRID: [1, 2, 4]
STEM_TYPE: "deeplab"
STEM_OUT_CHANNELS: 128
STRIDE_IN_1X1: False
SOLVER:
MAX_ITER: 90000
INPUT:
FORMAT: "RGB"
CROP:
SIZE: (512, 1024)
|
Cream/CDARTS/CDARTS_segmentation/train/configs/Cityscapes-PanopticSegmentation/panoptic_deeplab_R_52_os16_mg124_poly_90k_bs32_crop_512_1024.yaml/0
|
{
"file_path": "Cream/CDARTS/CDARTS_segmentation/train/configs/Cityscapes-PanopticSegmentation/panoptic_deeplab_R_52_os16_mg124_poly_90k_bs32_crop_512_1024.yaml",
"repo_id": "Cream",
"token_count": 244
}
| 293 |
from __future__ import division
import os
import sys
import time
import glob
import json
import logging
import argparse
from tqdm import tqdm
import torch
import torch.nn as nn
import torch.utils
import torch.nn.functional as F
import torch.optim as optim
import torch.distributed as dist
from tensorboardX import SummaryWriter
import numpy as np
import _init_paths
from ptflops import get_model_complexity_info
from dataloader import get_train_loader, CyclicIterator
from datasets import Cityscapes
import dataloaders
from utils.init_func import init_weight
from utils.lr_scheduler import Iter_LR_Scheduler
from seg_opr.loss_opr import ProbOhemCrossEntropy2d
from eval import SegEvaluator
from test import SegTester
from utils.darts_utils import create_exp_dir, save, plot_op, plot_path_width, objective_acc_lat
from utils.dist_utils import reduce_tensor, ModelEma
from cydas import CyDASseg as Network
import seg_metrics
import yaml
import timm
from timm.optim import create_optimizer
from utils.pyt_utils import AverageMeter, to_cuda, get_loss_info_str, compute_hist, compute_hist_np, load_pretrain
from detectron2.config import get_cfg
from detectron2.engine import launch, default_setup, default_argument_parser
import detectron2.data.transforms as T
from detectron2.structures import BitMasks, ImageList, Instances
from detectron2.data import MetadataCatalog, DatasetMapper, build_detection_train_loader, build_detection_test_loader
from detectron2.config import configurable
from detectron2.data.build import _test_loader_from_config, trivial_batch_collator
from detectron2.data.samplers import InferenceSampler, RepeatFactorTrainingSampler, TrainingSampler
from detectron2.data.common import AspectRatioGroupedDataset, DatasetFromList, MapDataset
from detectron2.projects.panoptic_deeplab import (
PanopticDeeplabDatasetMapper,
add_panoptic_deeplab_config,
)
## dist train
try:
import apex
from apex import amp
from apex.parallel import DistributedDataParallel as DDP
from apex.parallel import convert_syncbn_model
has_apex = True
except ImportError:
from torch.nn.parallel import DistributedDataParallel as DDP
has_apex = False
def adjust_learning_rate(base_lr, power, optimizer, epoch, total_epoch):
for param_group in optimizer.param_groups:
param_group['lr'] = param_group['lr'] * power
# The first arg parser parses out only the --config argument, this argument is used to
# load a yaml file containing key-values that override the defaults for the main parser below
config_parser = parser = argparse.ArgumentParser(description='Training Config', add_help=False)
parser.add_argument('-c', '--config', default='../configs/ade/cydas.yaml', type=str, metavar='FILE',
help='YAML config file specifying default arguments')
parser = argparse.ArgumentParser(description='PyTorch Training')
parser.add_argument('--det2_cfg', type=str, default='configs/ADE20K/base.yaml', help='')
parser.add_argument('--save', type=str, default='../OUTPUT/train_', help='')
parser.add_argument('--exp_name', type=str, default='ade20k', help='')
parser.add_argument('--pretrain', type=str, default=None, help='resume path')
parser.add_argument('--resume', type=str, default='../OUTPUT/train/', help='resume path')
parser.add_argument("--local_rank", default=0, type=int)
parser.add_argument("--num_classes", default=150, type=int)
parser.add_argument("--max_iteration", default=160000, type=int)
parser.add_argument("--world_size", default=1, type=int)
parser.add_argument("--eval_height", default=1025, type=int, help='train height')
parser.add_argument("--eval_width", default=2049, type=int, help='train width')
parser.add_argument("--test_epoch", default=250, type=int, help='Epochs for test')
parser.add_argument("--batch_size", default=12, type=int, help='batch size')
parser.add_argument("--Fch", default=12, type=int, help='Fch')
parser.add_argument('--stem_head_width', type=float, default=1.0, help='base learning rate')
## new retrain ###
parser.add_argument('--sched', default='step', type=str, metavar='SCHEDULER',
help='LR scheduler (default: "step"')
parser.add_argument('--epochs', type=int, default=4000, help='num of training epochs')
parser.add_argument('--dataset', type=str, default='cityscapes', help='pascal or cityscapes')
parser.add_argument('--base_lr', type=float, default=0.05, help='base learning rate')
parser.add_argument('--warmup_start_lr', type=float, default=5e-6, help='warm up learning rate')
parser.add_argument('--lr-step', type=float, default=None)
parser.add_argument('--warmup-iters', type=int, default=1000)
parser.add_argument('--min-lr', type=float, default=None)
parser.add_argument('--layers', type=int, default=20, help='layers')
parser.add_argument('--size_divisibility', type=int, default=32, help='size_divisibility')
parser.add_argument('--crop_size', type=int, default=769, help='image crop size')
parser.add_argument('--resize', type=int, default=769, help='image crop size')
parser.add_argument("--image_height", default=513, type=int, help='train height')
parser.add_argument("--image_width", default=1025, type=int, help='train width')
parser.add_argument('--workers', type=int, default=4, help='number of data loading workers')
parser.add_argument('--dist', type=bool, default=True)
parser.add_argument('--autodeeplab', type=str, default='train_seg')
parser.add_argument('--max-iteration', default=1000000, type=bool)
parser.add_argument('--mode', default='poly', type=str, help='how lr decline')
parser.add_argument('--train_mode', type=str, default='iter', choices=['iter', 'epoch'])
parser.add_argument("--data_path", default='/home/hongyuan/data/cityscapes', type=str, help='If specified, replace config.load_path')
parser.add_argument("--load_path", default='', type=str, help='If specified, replace config.load_path')
parser.add_argument("--json_file", default='jsons/0.json', type=str, help='model_arch')
parser.add_argument("--seed", default=12345, type=int, help="random seed")
parser.add_argument('--sync_bn', action='store_false',
help='Enable NVIDIA Apex or Torch synchronized BatchNorm.')
parser.add_argument('--random_sample', action='store_true',
help='Random sample path.')
parser.add_argument('--drop_path_prob', type=float, default=0.0, help='drop path prob')
# Optimizer parameters
parser.add_argument('--opt', default='sgd', type=str, metavar='OPTIMIZER',
help='Optimizer (default: "sgd"')
parser.add_argument('--opt-eps', default=1e-8, type=float, metavar='EPSILON',
help='Optimizer Epsilon (default: 1e-8)')
parser.add_argument('--momentum', type=float, default=0.9, metavar='M',
help='SGD momentum (default: 0.9)')
parser.add_argument('--weight-decay', type=float, default=0.0001,
help='weight decay (default: 0.0001)')
# Model Exponential Moving Average
parser.add_argument('--model-ema', action='store_true', default=False,
help='Enable tracking moving average of model weights')
parser.add_argument('--model-ema-force-cpu', action='store_true', default=False,
help='Force ema to be tracked on CPU, rank=0 node only. Disables EMA validation.')
parser.add_argument('--model-ema-decay', type=float, default=0.9998,
help='decay factor for model weights moving average (default: 0.9998)')
# train val
parser.add_argument('--bn_eps', type=float, default=1e-5, help='bn eps')
parser.add_argument('--bn_momentum', type=float, default=0.01, help='bn momentum')
parser.add_argument('--ignore', type=int, default=255, help='semantic ignore')
parser.add_argument('--eval_flip', action='store_true', default=False,
help='semantic eval flip')
def _parse_args():
# Do we have a config file to parse?
args_config, remaining = config_parser.parse_known_args()
if args_config.config:
with open(args_config.config, 'r') as f:
cfg = yaml.safe_load(f)
parser.set_defaults(**cfg)
# The main arg parser parses the rest of the args, the usual
# defaults will have been overridden if config file specified.
args = parser.parse_args(remaining)
# Cache the args as a text string to save them in the output dir later
args_text = yaml.safe_dump(args.__dict__, default_flow_style=False)
return args, args_text
def build_sem_seg_train_aug(cfg):
augs = [
T.ResizeShortestEdge(
cfg.INPUT.MIN_SIZE_TRAIN, cfg.INPUT.MAX_SIZE_TRAIN, cfg.INPUT.MIN_SIZE_TRAIN_SAMPLING
)
]
if cfg.INPUT.CROP.ENABLED:
augs.append(T.RandomCrop(cfg.INPUT.CROP.TYPE, cfg.INPUT.CROP.SIZE))
augs.append(T.RandomFlip())
return augs
def setup(args):
"""
Create configs and perform basic setups.
"""
cfg = get_cfg()
add_panoptic_deeplab_config(cfg)
cfg.merge_from_file(args.config_file)
# cfg.merge_from_list(args.opts)
cfg.freeze()
default_setup(cfg, args)
return cfg
@configurable(from_config=_test_loader_from_config)
def build_batch_test_loader(dataset, *, mapper, sampler=None, num_workers=0):
"""
Similar to `build_detection_train_loader`, but uses a batch size of 1,
and :class:`InferenceSampler`. This sampler coordinates all workers to
produce the exact set of all samples.
This interface is experimental.
Args:
dataset (list or torch.utils.data.Dataset): a list of dataset dicts,
or a map-style pytorch dataset. They can be obtained by using
:func:`DatasetCatalog.get` or :func:`get_detection_dataset_dicts`.
mapper (callable): a callable which takes a sample (dict) from dataset
and returns the format to be consumed by the model.
When using cfg, the default choice is ``DatasetMapper(cfg, is_train=False)``.
sampler (torch.utils.data.sampler.Sampler or None): a sampler that produces
indices to be applied on ``dataset``. Default to :class:`InferenceSampler`,
which splits the dataset across all workers.
num_workers (int): number of parallel data loading workers
Returns:
DataLoader: a torch DataLoader, that loads the given detection
dataset, with test-time transformation and batching.
Examples:
::
data_loader = build_detection_test_loader(
DatasetRegistry.get("my_test"),
mapper=DatasetMapper(...))
# or, instantiate with a CfgNode:
data_loader = build_detection_test_loader(cfg, "my_test")
"""
if isinstance(dataset, list):
dataset = DatasetFromList(dataset, copy=False)
if mapper is not None:
dataset = MapDataset(dataset, mapper)
if sampler is None:
sampler = InferenceSampler(len(dataset))
# Always use 1 image per worker during inference since this is the
# standard when reporting inference time in papers.
batch_sampler = torch.utils.data.sampler.BatchSampler(sampler, 4, drop_last=False)
data_loader = torch.utils.data.DataLoader(
dataset,
num_workers=num_workers,
batch_sampler=batch_sampler,
collate_fn=trivial_batch_collator,
)
return data_loader
def main():
args, args_text = _parse_args()
# dist init
torch.distributed.init_process_group(backend='nccl', init_method='env://')
torch.cuda.set_device(args.local_rank)
args.world_size = torch.distributed.get_world_size()
args.local_rank = torch.distributed.get_rank()
args.save = args.save + args.exp_name
# detectron2 data loader ###########################
# det2_args = default_argument_parser().parse_args()
det2_args = args
det2_args.config_file = args.det2_cfg
cfg = setup(det2_args)
mapper = DatasetMapper(cfg, augmentations=build_sem_seg_train_aug(cfg))
det2_dataset = iter(build_detection_train_loader(cfg, mapper=mapper))
det2_val = build_batch_test_loader(cfg, cfg.DATASETS.TEST[0])
len_det2_train = 20210 // cfg.SOLVER.IMS_PER_BATCH
if args.local_rank == 0:
create_exp_dir(args.save, scripts_to_save=glob.glob('*.py')+glob.glob('*.sh'))
logger = SummaryWriter(args.save)
log_format = '%(asctime)s %(message)s'
logging.basicConfig(stream=sys.stdout, level=logging.INFO, format=log_format, datefmt='%m/%d %I:%M:%S %p')
fh = logging.FileHandler(os.path.join(args.save, 'log.txt'))
fh.setFormatter(logging.Formatter(log_format))
logging.getLogger().addHandler(fh)
logging.info("args = %s", str(args))
else:
logger = None
# preparation ################
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed_all(args.seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
# config network and criterion ################
gt_down_sampling = 1
min_kept = int(args.batch_size * args.image_height * args.image_width // (16 * gt_down_sampling ** 2))
ohem_criterion = ProbOhemCrossEntropy2d(ignore_label=255, thresh=0.7, min_kept=min_kept, use_weight=False)
# data loader ###########################
num_classes = args.num_classes
with open(args.json_file, 'r') as f:
# dict_a = json.loads(f, cls=NpEncoder)
model_dict = json.loads(f.read())
width_mult_list = [4./12, 6./12, 8./12, 10./12, 1.,]
model = Network(Fch=args.Fch, num_classes=num_classes, stem_head_width=(args.stem_head_width, args.stem_head_width))
last = model_dict["lasts"]
if args.local_rank == 0:
with torch.cuda.device(0):
macs, params = get_model_complexity_info(model, (3, args.eval_height, args.eval_width), as_strings=True,
print_per_layer_stat=True, verbose=True)
logging.info('{:<30} {:<8}'.format('Computational complexity: ', macs))
logging.info('{:<30} {:<8}'.format('Number of parameters: ', params))
with open(os.path.join(args.save, 'args.yaml'), 'w') as f:
f.write(args_text)
init_weight(model, nn.init.kaiming_normal_, torch.nn.BatchNorm2d, args.bn_eps, args.bn_momentum, mode='fan_in', nonlinearity='relu')
if args.pretrain:
model.backbone = load_pretrain(model.backbone, args.pretrain)
model = model.cuda()
# if args.sync_bn:
# if has_apex:
# model = apex.parallel.convert_syncbn_model(model)
# else:
# model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)
# Optimizer ###################################
base_lr = args.base_lr
if args.opt == "sgd":
optimizer = torch.optim.SGD(model.parameters(), lr=base_lr, momentum=args.momentum, weight_decay=args.weight_decay)
elif args.opt == "adam":
optimizer = torch.optim.Adam(model.parameters(), lr=base_lr, betas=(0.9, 0.999), eps=1e-08)
elif args.opt == "adamw":
optimizer = torch.optim.AdamW(model.parameters(), lr=base_lr, betas=(0.9, 0.999), eps=1e-08, weight_decay=args.weight_decay)
else:
optimizer = create_optimizer(args, model)
if args.sched == "raw":
lr_scheduler =None
else:
max_iteration = args.epochs * len_det2_train
lr_scheduler = Iter_LR_Scheduler(args, max_iteration, len_det2_train)
start_epoch = 0
if os.path.exists(os.path.join(args.save, 'last.pth.tar')):
args.resume = os.path.join(args.save, 'last.pth.tar')
if args.resume:
model_state_file = args.resume
if os.path.isfile(model_state_file):
checkpoint = torch.load(model_state_file, map_location=torch.device('cpu'))
start_epoch = checkpoint['start_epoch']
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
logging.info('Loaded checkpoint (starting from iter {})'.format(checkpoint['start_epoch']))
model_ema = None
if args.model_ema:
# Important to create EMA model after cuda(), DP wrapper, and AMP but before SyncBN and DDP wrapper
model_ema = ModelEma(
model,
decay=args.model_ema_decay,
device='cpu' if args.model_ema_force_cpu else '',
resume=None)
if model_ema:
eval_model = model_ema.ema
else:
eval_model = model
if has_apex:
model = DDP(model, delay_allreduce=True)
else:
model = DDP(model, device_ids=[args.local_rank])
best_valid_iou = 0.
best_epoch = 0
temp_iou = 0.
avg_loss = -1
logging.info("rank: {} world_size: {}".format(args.local_rank, args.world_size))
for epoch in range(start_epoch, args.epochs):
if args.local_rank == 0:
logging.info(args.load_path)
logging.info(args.save)
logging.info("lr: " + str(optimizer.param_groups[0]['lr']))
# training
drop_prob = args.drop_path_prob * epoch / args.epochs
# model.module.drop_path_prob(drop_prob)
train_mIoU = train(len_det2_train, det2_dataset, model, model_ema, ohem_criterion, num_classes, lr_scheduler, optimizer, logger, epoch, args, cfg)
# torch.cuda.empty_cache()
# if epoch > args.epochs // 3:
if epoch >= 0:
temp_iou, avg_loss = validation(det2_val, eval_model, ohem_criterion, num_classes, args, cfg)
torch.cuda.empty_cache()
if args.local_rank == 0:
logging.info("Epoch: {} train miou: {:.2f}".format(epoch+1, 100*train_mIoU))
if temp_iou > best_valid_iou:
best_valid_iou = temp_iou
best_epoch = epoch
if model_ema is not None:
torch.save({
'start_epoch': epoch + 1,
'state_dict': model_ema.ema.state_dict(),
'optimizer': optimizer.state_dict(),
# 'lr_scheduler': lr_scheduler.state_dict(),
}, os.path.join(args.save, 'best_checkpoint.pth.tar'))
else:
torch.save({
'start_epoch': epoch + 1,
'state_dict': model.module.state_dict(),
'optimizer': optimizer.state_dict(),
# 'lr_scheduler': lr_scheduler.state_dict(),
}, os.path.join(args.save, 'best_checkpoint.pth.tar'))
logger.add_scalar("mIoU/val", temp_iou, epoch)
logging.info("[Epoch %d/%d] valid mIoU %.4f eval loss %.4f"%(epoch + 1, args.epochs, temp_iou, avg_loss))
logging.info("Best valid mIoU %.4f Epoch %d"%(best_valid_iou, best_epoch))
if model_ema is not None:
torch.save({
'start_epoch': epoch + 1,
'state_dict': model_ema.ema.state_dict(),
'optimizer': optimizer.state_dict(),
# 'lr_scheduler': lr_scheduler.state_dict(),
}, os.path.join(args.save, 'last.pth.tar'))
else:
torch.save({
'start_epoch': epoch + 1,
'state_dict': model.module.state_dict(),
'optimizer': optimizer.state_dict(),
# 'lr_scheduler': lr_scheduler.state_dict(),
}, os.path.join(args.save, 'last.pth.tar'))
def train(len_det2_train, det2_dataset, model, model_ema, criterion, num_classes, lr_scheduler, optimizer, logger, epoch, args, cfg):
model.train()
pixel_mean = cfg.MODEL.PIXEL_MEAN
pixel_std = cfg.MODEL.PIXEL_STD
pixel_mean = torch.Tensor(pixel_mean).view(-1, 1, 1).cuda()
pixel_std = torch.Tensor(pixel_std).view(-1, 1, 1).cuda()
metric = seg_metrics.Seg_Metrics(n_classes=num_classes)
lamb = 0.2
# for i, sample in enumerate(train_loader):
for i in range(len_det2_train):
cur_iter = epoch * len_det2_train + i
lr_scheduler(optimizer, cur_iter)
det2_data = next(det2_dataset)
det2_inputs = [x["image"].cuda(non_blocking=True) for x in det2_data]
det2_inputs = [(x - pixel_mean) / pixel_std for x in det2_inputs]
det2_inputs = ImageList.from_tensors(det2_inputs, args.size_divisibility).tensor
b, c, h, w = det2_inputs.shape
if h % 32 != 0 or w % 32 != 0:
logging.info("pass bad data!")
continue
det2_targets = [x["sem_seg"].cuda(non_blocking=True) for x in det2_data]
det2_targets = ImageList.from_tensors(det2_targets, args.size_divisibility, args.ignore).tensor
N = det2_inputs.size(0)
loss = 0
description = ""
logits8, logits16, logits32 = model(det2_inputs)
loss = loss + criterion(logits8, det2_targets)
if logits16 is not None:
loss = loss + lamb * criterion(logits16, det2_targets)
if logits32 is not None:
loss = loss + lamb * criterion(logits32, det2_targets)
inter, union = seg_metrics.batch_intersection_union(logits8.data, det2_targets, num_classes)
inter = reduce_tensor(torch.FloatTensor(inter).cuda(), args.world_size)
union = reduce_tensor(torch.FloatTensor(union).cuda(), args.world_size)
metric.update(inter.cpu().numpy(), union.cpu().numpy(), N)
if args.local_rank == 0:
description += "[mIoU%d: %.3f]"%(0, metric.get_scores())
torch.cuda.synchronize()
reduced_loss = loss
reduced_loss = reduce_tensor(reduced_loss.data, args.world_size)
if args.local_rank == 0 and i % 20 == 0:
logger.add_scalar('loss/train', reduced_loss, epoch*len_det2_train+i)
logging.info('epoch: {0}\t''iter: {1}/{2}\t''lr: {3:.6f}\t''loss: {4:.4f}'.format(
epoch + 1, i + 1, len_det2_train, lr_scheduler.get_lr(optimizer), reduced_loss))
loss.backward()
optimizer.step()
optimizer.zero_grad()
torch.cuda.synchronize()
if model_ema is not None:
model_ema.update(model)
return metric.get_scores()
def validation(val_loader, model, criterion, n_classes, args, cfg):
device = torch.device('cuda:{}'.format(args.local_rank))
pixel_mean = cfg.MODEL.PIXEL_MEAN
pixel_std = cfg.MODEL.PIXEL_STD
pixel_mean = torch.Tensor(pixel_mean).view(-1, 1, 1).cuda()
pixel_std = torch.Tensor(pixel_std).view(-1, 1, 1).cuda()
model.eval()
test_loss = 0.0
hist_size = (n_classes, n_classes)
hist = torch.zeros(hist_size, dtype=torch.float32).cuda()
for i, sample in enumerate(val_loader):
image = [x["image"].cuda(non_blocking=True) for x in sample]
image = [(x - pixel_mean) / pixel_std for x in image]
image = ImageList.from_tensors(image, args.size_divisibility).tensor
target = [x["sem_seg"].cuda(non_blocking=True) for x in sample]
target = ImageList.from_tensors(target, args.size_divisibility, args.ignore).tensor
N, H, W = target.shape
probs = torch.zeros((N, n_classes, H, W)).cuda()
probs.requires_grad = False
torch.cuda.synchronize()
if args.local_rank==0:
logging.info("Evaluation [{}/{}]".format(i+1, len(val_loader)))
with torch.no_grad():
output = model(image)
prob = F.softmax(output, 1)
probs += prob
loss = criterion(output, target).detach().data
dist.all_reduce(loss, dist.ReduceOp.SUM)
test_loss += loss
if args.eval_flip:
output = model(torch.flip(image, dims=(3,)))
output = torch.flip(output, dims=(3,))
prob = F.softmax(output, 1)
probs += prob
loss = criterion(output, target).detach().data
dist.all_reduce(loss, dist.ReduceOp.SUM)
test_loss += loss
preds = torch.argmax(probs, dim=1)
hist_once = compute_hist(preds, target, n_classes, args.ignore)
hist = hist + hist_once
torch.cuda.synchronize()
if args.eval_flip:
avg_loss = test_loss / 2*len(val_loader)
else:
avg_loss = test_loss / len(val_loader)
dist.all_reduce(hist, dist.ReduceOp.SUM)
hist = hist.cpu().numpy().astype(np.float32)
IOUs = np.diag(hist) / (np.sum(hist, axis=0) + np.sum(hist, axis=1) - np.diag(hist))
mIOU = np.mean(IOUs)
return mIOU*100, avg_loss
if __name__ == '__main__':
main()
|
Cream/CDARTS/CDARTS_segmentation/train/train_ade20k_cydas.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_segmentation/train/train_ade20k_cydas.py",
"repo_id": "Cream",
"token_count": 10511
}
| 294 |
import torch
import torch.nn as nn
import torch.nn.functional as F
import logging
import copy
from models.search_cells import SearchCell
from models.augment_cells import InferCell
from models.aux_head import DistillHeadCIFAR
from models.ops import ResNetBasicblock, OPS, NAS_BENCH_201
from utils.genotypes import Structure
class CDARTSController(nn.Module):
""" CDARTS Controller"""
def __init__(self, config, criterion, n_nodes=4, stem_multiplier=3, track_running_stats=True):
"""
args:
"""
super(CDARTSController, self).__init__()
# some settings
self.n_nodes = n_nodes
self.criterion = criterion
self.layer_num = config.layer_num
self.c_in = config.input_channels
self.num_classes = config.n_classes
# cifar10 or imagenet
self.model_type = config.model_type
self.stem_multiplier = stem_multiplier
self.init_channel = config.init_channels
self.ensemble_sum = config.ensemble_sum
self.use_ensemble_param = config.ensemble_param
self.bn_affine = config.bn_affine
self.fix_head = config.fix_head
self.share_fc = config.share_fc
self.layers = [6, 6, 5]
self.layers_reduction = [True, True, False]
self.augment_layers = [6, 6, 5]
self.num_edge = None
self.edge2index = None
self.nas_genotype = None
self.cell_connects = {}
self.search_space = NAS_BENCH_201
self.op_names = copy.deepcopy(self.search_space)
self.track_running_stats = track_running_stats
self.fc_super = None
self.fc_nas = None
self.distill_aux_c1 = None
self.distill_aux_c2 = None
self.feature_extractor = None
self.gap = nn.AdaptiveAvgPool2d(1)
self.nas_layers = nn.ModuleList([None, None, None])
self.super_layers = nn.ModuleList()
self.super_layers_arch = nn.ModuleList()
self.super_layers_pool = nn.ModuleList()
self.super_layers_pool_arch = nn.ModuleList()
self.model_main = None
self.build_init_model()
######################## ---------------------------- ########################
######################## Functions for update modules ########################
######################## ---------------------------- ########################
def build_init_model(self):
self.extractor_grad = True
if self.model_type == 'cifar':
self.feature_extractor = self.cifar_stem(self.init_channel * self.stem_multiplier)
else:
raise Exception("error! not support now!")
c_p = self.init_channel * self.stem_multiplier
c_cur = self.init_channel
for layer_idx in range(self.layer_num):
reduction = self.layers_reduction[layer_idx]
super_layer = self.add_super_layer(c_cur, c_p, reduction, self.layers[layer_idx])
super_layer_pool = self.add_super_layer(c_cur, c_p, reduction, self.augment_layers[layer_idx])
self.super_layers.append(super_layer)
self.super_layers_pool.append(super_layer_pool)
if reduction:
c_cur = c_cur * 2
else:
c_cur = c_cur
c_p = c_cur
if layer_idx == self.layer_num-3:
self.distill_aux_c1 = c_p
if layer_idx == self.layer_num-2:
self.distill_aux_c2 = c_p
self.fc_super = nn.Linear(c_p, self.num_classes)
if self.share_fc:
self.fc_nas = self.fc_super
else:
self.fc_nas = nn.Linear(c_p, self.num_classes)
if self.use_ensemble_param:
self.ensemble_param = nn.Parameter(0.333*torch.rand(3), requires_grad=True)
else:
self.ensemble_param = nn.Parameter(0.333*torch.ones(3), requires_grad=False)
if self.model_type == 'cifar':
self.distill_aux_head1 = DistillHeadCIFAR(self.distill_aux_c1, 6, self.num_classes, bn_affine=self.bn_affine)
self.distill_aux_head2 = DistillHeadCIFAR(self.distill_aux_c2, 6, self.num_classes, bn_affine=self.bn_affine)
else:
raise Exception("error! not support now!")
self._arch_parameters = nn.Parameter( 1e-3*torch.randn(self.num_edge, len(self.search_space)) )
self.fix_structure()
def fix_structure(self):
if self.fix_head:
for n, p in self.distill_aux_head1.named_parameters():
p.requires_grad = False
for n, p in self.distill_aux_head2.named_parameters():
p.requires_grad = False
def build_nas_model(self, genotype):
c_p = self.init_channel * self.stem_multiplier
c_cur = self.init_channel
for i in range(self.layer_num):
reduction = self.layers_reduction[i]
self.nas_layers[i] = self.add_nas_layer(c_cur, c_p, reduction, genotype, self.augment_layers[i])
if reduction:
c_cur = c_cur * 2
else:
c_cur = c_cur
c_p = c_cur
def param_copy_plus(self, target_model, model):
if model:
for target_param, param in zip(target_model.parameters(), model.parameters()):
target_param.data.copy_(param.data)
def param_copy_plus1(self, target_model, model):
model_dict_keys = model.state_dict().keys()
for n, p in target_model.named_parameters():
if n in model_dict_keys:
p.data.copy_(model.state_dict()[n])
def copy_params_from_super_layer(self):
for layer_idx in range(self.layer_num):
super_layer = self.super_layers_pool[layer_idx]
nas_layer = self.nas_layers[layer_idx]
for super_cell, nas_cell in zip(super_layer, nas_layer):
if isinstance(super_cell, ResNetBasicblock) and isinstance(nas_cell, ResNetBasicblock):
self.param_copy_plus(nas_cell, super_cell)
else:
for edge_key, nas_op in zip(super_cell._modules['edges'].keys(), nas_cell._modules['layers']):
self.param_copy_plus(nas_op, super_cell._modules['edges'][edge_key][self.cell_connects[edge_key]])
def copy_params_from_nas_layer(self):
for layer_idx in range(self.layer_num):
super_layer = self.super_layers_pool[layer_idx]
nas_layer = self.nas_layers[layer_idx]
for super_cell, nas_cell in zip(super_layer, nas_layer):
if isinstance(super_cell, ResNetBasicblock) and isinstance(nas_cell, ResNetBasicblock):
self.param_copy_plus(super_cell, nas_cell)
else:
for edge_key, nas_op in zip(super_cell._modules['edges'].keys(), nas_cell._modules['layers']):
self.param_copy_plus(super_cell._modules['edges'][edge_key][self.cell_connects[edge_key]], nas_op)
######################## -------------------------- ########################
######################## Functions for layer search ########################
######################## -------------------------- ########################
def add_super_layer(self, C_cur, C_p, reduction_cur=False, cell_num=3):
cells = nn.ModuleList()
reduction_idx = cell_num - 1
for i in range(cell_num):
if i == reduction_idx and reduction_cur:
C_cur *= 2
reduction = True
else:
reduction = False
if reduction:
cell = ResNetBasicblock(C_p, C_cur, 2)
else:
cell = SearchCell(C_p, C_cur, 1, self.n_nodes, self.search_space, self.bn_affine, self.track_running_stats)
if self.num_edge is None: self.num_edge, self.edge2index = cell.num_edges, cell.edge2index
else: assert self.num_edge == cell.num_edges and self.edge2index == cell.edge2index, 'invalid {:} vs. {:}.'.format(self.num_edge, cell.num_edges)
cells.append(cell)
C_p = cell.out_dim
return cells
######################## ---------------------------- ########################
######################## Functions for layer generate ########################
######################## ---------------------------- ########################
def add_nas_layer(self, C_cur, C_p, reduction_cur, genotype, cell_num=3):
cells = nn.ModuleList()
reduction_idx = cell_num - 1
for i in range(cell_num):
if i == reduction_idx and reduction_cur:
C_cur *= 2
reduction = True
else:
reduction = False
if reduction:
cell = ResNetBasicblock(C_p, C_cur, 2, True)
else:
cell = InferCell(genotype, C_p, C_cur, 1)
cells.append(cell)
C_p = cell.out_dim
return cells
######################## ---------------------------- ########################
######################## Functions for stem ########################
######################## ---------------------------- ########################
def cifar_stem(self, init_channel):
C_in = self.c_in
C_cur = init_channel
feature_extractor = nn.ModuleList()
stem = nn.Sequential(
nn.Conv2d(C_in, C_cur, 3, 1, 1, bias=False),
nn.BatchNorm2d(C_cur)
)
feature_extractor.append(stem)
return feature_extractor
######################## ---------------------------- ########################
######################## Functions for forward ########################
######################## ---------------------------- ########################
def extract_features(self, im):
# feature_extractor is nn.ModuleList()
if len(self.feature_extractor) == 1:
s0 = self.feature_extractor[0](im)
s1 = s0
return [s0, s1]
elif len(self.feature_extractor) == 2:
s0 = self.feature_extractor[0](im)
s1 = self.feature_extractor[1](s0)
return [s0, s1]
else:
raise NotImplementedError
def get_aux_logits(self, idx, s1):
if idx == self.layer_num-3:
return self.distill_aux_head1(s1)
if idx == self.layer_num-2:
return self.distill_aux_head2(s1)
return None
def forward(self, x, super_flag=True, updateType='alpha'):
if super_flag:
super_layers = self.super_layers
nas_layers_num = 0
super_layers_num = len(self.super_layers)
else:
nas_layers = self.nas_layers
nas_layers_num = len(self.nas_layers)
super_layers_num = 0
outputs = []
s0, s1 = self.extract_features(x)
for i in range(nas_layers_num):
s1 = self.forward_nas_layer(s1, nas_layers[i])
logit = self.get_aux_logits(i, s1)
if logit is not None:
outputs.append(logit)
for j in range(super_layers_num):
k = nas_layers_num + j
s1 = self.forward_super_layer(s1, super_layers[k], updateType)
logit = self.get_aux_logits(k, s1)
if logit is not None:
outputs.append(logit)
out = self.gap(s1)
out = out.view(out.size(0), -1) # flatten
if super_flag:
logits = self.fc_super(out)
else:
logits = self.fc_nas(out)
outputs.append(logits)
logits_output = logits
ensemble_param = F.softmax(self.ensemble_param, dim=0)
if self.ensemble_sum:
em_output = ensemble_param[0] * outputs[0] + ensemble_param[1] * outputs[1] + ensemble_param[2] * outputs[2]
else:
em_output = torch.cat((ensemble_param[0] * outputs[0], ensemble_param[1] * outputs[1], ensemble_param[2] * outputs[2]), 0)
return logits_output, em_output
def forward_super_layer(self, s1, super_layer, updateType='alpha'):
if updateType == 'weight':
alphas = self._arch_parameters
else:
alphas = F.softmax(self._arch_parameters, dim=-1)
for cell in super_layer:
if isinstance(cell, SearchCell):
s1 = cell(s1, alphas)
else:
s1 = cell(s1)
return s1
def forward_nas_layer(self, s1, nas_layer):
for cell in nas_layer:
s1 = cell(s1)
return s1
def loss(self, X, y):
logits = self.forward(X)
return self.criterion(logits, y)
def genotype(self):
genotypes = []
for i in range(1, self.n_nodes):
xlist = []
for j in range(i):
node_str = '{:}<-{:}'.format(i, j)
with torch.no_grad():
weights = self._arch_parameters[ self.edge2index[node_str] ]
op_name = self.op_names[ weights.argmax().item() ]
self.cell_connects[node_str] = weights.argmax().item()
xlist.append((op_name, j))
genotypes.append( tuple(xlist) )
self.nas_genotype = Structure(genotypes)
return self.nas_genotype
def show_alphas(self):
with torch.no_grad():
return 'arch-parameters :\n{:}'.format( nn.functional.softmax(self._arch_parameters, dim=-1).cpu())
def get_message(self):
string = self.extra_repr()
for i, cell in enumerate(self.cells):
string += '\n {:02d}/{:02d} :: {:}'.format(i, len(self.cells), cell.extra_repr())
return string
def _save_arch_parameters(self):
self._saved_arch_parameters = self._arch_parameters.clone()
def softmax_arch_parameters(self):
self._save_arch_parameters()
self._arch_parameters.data.copy_(F.softmax(self._arch_parameters, dim=-1))
def restore_arch_parameters(self):
self._arch_parameters.data.copy_(self._saved_arch_parameters)
del self._saved_arch_parameters
def arch_parameters(self):
return [self._arch_parameters]
def l1_loss(self):
return torch.mean(torch.abs(self._arch_parameters[:, 0:1]))
|
Cream/CDARTS/benchmark201/models/cdarts_controller.py/0
|
{
"file_path": "Cream/CDARTS/benchmark201/models/cdarts_controller.py",
"repo_id": "Cream",
"token_count": 6852
}
| 295 |
""" CNN cell for architecture search """
import torch
import torch.nn as nn
from lib.models import ops
class SearchCell(nn.Module):
""" Cell for search
Each edge is mixed and continuous relaxed.
"""
def __init__(self, n_nodes, C_pp, C_p, C, reduction_p, reduction, is_slim=False):
"""
Args:
n_nodes: # of intermediate n_nodes
C_pp: C_out[k-2]
C_p : C_out[k-1]
C : C_in[k] (current)
reduction_p: flag for whether the previous cell is reduction cell or not
reduction: flag for whether the current cell is reduction cell or not
"""
super().__init__()
self.reduction = reduction
self.n_nodes = n_nodes
# If previous cell is reduction cell, current input size does not match with
# output size of cell[k-2]. So the output[k-2] should be reduced by preprocessing.
if reduction_p:
self.preproc0 = ops.FactorizedReduce(C_pp, C, affine=False)
else:
self.preproc0 = ops.StdConv(C_pp, C, 1, 1, 0, affine=False)
self.preproc1 = ops.StdConv(C_p, C, 1, 1, 0, affine=False)
# generate dag
self.dag = nn.ModuleList()
for i in range(self.n_nodes):
self.dag.append(nn.ModuleList())
for j in range(2+i): # include 2 input nodes
# reduction should be used only for input node
stride = 2 if reduction and j < 2 else 1
op = ops.MixedOp(C, stride, is_slim)
self.dag[i].append(op)
def forward(self, s0, s1, w_dag, w_edge):
s0 = self.preproc0(s0)
s1 = self.preproc1(s1)
states = [s0, s1]
for edges, w_list, w_edge_list in zip(self.dag, w_dag, w_edge):
s_cur = sum(w_edge_list[i] * edges[i](s, w) for i, (s, w) in enumerate(zip(states, w_list)))
states.append(s_cur)
s_out = torch.cat(states[2:], dim=1)
return s_out
|
Cream/CDARTS/lib/models/search_cells.py/0
|
{
"file_path": "Cream/CDARTS/lib/models/search_cells.py",
"repo_id": "Cream",
"token_count": 962
}
| 296 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# Written by Hao Du and Houwen Peng
# email: [email protected] and [email protected]
import torch
import numpy as np
import torch.nn.functional as F
from copy import deepcopy
# Prioritized Path Board
class PrioritizedBoard():
def __init__(self, cfg, CHOICE_NUM=6, sta_num=(4, 4, 4, 4, 4), acc_gap=5):
self.cfg = cfg
self.prioritized_board = []
self.choice_num = CHOICE_NUM
self.sta_num = sta_num
self.acc_gap = acc_gap
# select teacher from prioritized board
def select_teacher(self, model, random_cand):
if self.cfg.SUPERNET.PICK_METHOD == 'top1':
meta_value, teacher_cand = 0.5, sorted(
self.prioritized_board, reverse=True)[0][3]
elif self.cfg.SUPERNET.PICK_METHOD == 'meta':
meta_value, cand_idx, teacher_cand = -1000000000, -1, None
for now_idx, item in enumerate(self.prioritized_board):
inputx = item[4]
output = F.softmax(model(inputx, random_cand), dim=1)
weight = model.module.forward_meta(output - item[5])
if weight > meta_value:
meta_value = weight
cand_idx = now_idx
teacher_cand = self.prioritized_board[cand_idx][3]
assert teacher_cand is not None
meta_value = torch.sigmoid(-weight)
else:
raise ValueError('Method Not supported')
return meta_value, teacher_cand
def board_size(self):
return len(self.prioritized_board)
# get prob from config file
def get_prob(self):
if self.cfg.SUPERNET.HOW_TO_PROB == 'even' or (
self.cfg.SUPERNET.HOW_TO_PROB == 'teacher' and len(self.prioritized_board) == 0):
return None
elif self.cfg.SUPERNET.HOW_TO_PROB == 'pre_prob':
return self.cfg.SUPERNET.PRE_PROB
elif self.cfg.SUPERNET.HOW_TO_PROB == 'teacher':
op_dict = {}
for i in range(self.choice_num):
op_dict[i] = 0
for item in self.prioritized_board:
cand = item[3]
for block in cand:
for op in block:
op_dict[op] += 1
sum_op = 0
for i in range(self.choice_num):
sum_op = sum_op + op_dict[i]
prob = []
for i in range(self.choice_num):
prob.append(float(op_dict[i]) / sum_op)
del op_dict, sum_op
return prob
# sample random architecture
def get_cand_with_prob(self, prob=None):
if prob is None:
get_random_cand = [
np.random.choice(
self.choice_num,
item).tolist() for item in self.sta_num]
else:
get_random_cand = [
np.random.choice(
self.choice_num,
item,
prob).tolist() for item in self.sta_num]
return get_random_cand
def isUpdate(self, current_epoch, prec1, flops):
if current_epoch <= self.cfg.SUPERNET.META_STA_EPOCH:
return False
if len(self.prioritized_board) < self.cfg.SUPERNET.POOL_SIZE:
return True
if prec1 > self.prioritized_board[-1][1] + self.acc_gap:
return True
if prec1 > self.prioritized_board[-1][1] and flops < self.prioritized_board[-1][2]:
return True
return False
def update_prioritized_board(self, inputs, teacher_output, outputs, current_epoch, prec1, flops, cand):
if self.isUpdate(current_epoch, prec1, flops):
val_prec1 = prec1
training_data = deepcopy(inputs[:self.cfg.SUPERNET.SLICE].detach())
if len(self.prioritized_board) == 0:
features = deepcopy(outputs[:self.cfg.SUPERNET.SLICE].detach())
else:
features = deepcopy(
teacher_output[:self.cfg.SUPERNET.SLICE].detach())
self.prioritized_board.append(
(val_prec1,
prec1,
flops,
cand,
training_data,
F.softmax(
features,
dim=1)))
self.prioritized_board = sorted(self.prioritized_board, reverse=True)
if len(self.prioritized_board) > self.cfg.SUPERNET.POOL_SIZE:
self.prioritized_board = sorted(self.prioritized_board, reverse=True)
del self.prioritized_board[-1]
|
Cream/Cream/lib/models/PrioritizedBoard.py/0
|
{
"file_path": "Cream/Cream/lib/models/PrioritizedBoard.py",
"repo_id": "Cream",
"token_count": 2414
}
| 297 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# Written by Hao Du and Houwen Peng
# email: [email protected] and [email protected]
import os
import shutil
import argparse
import datetime
import _init_paths
from lib.config import cfg
parser = argparse.ArgumentParser(description='Cream of the Crop')
parser.add_argument('mode', type=str, default='train',
help='Mode in ["train", "retrain", "test"]')
parser.add_argument('cfg', type=str,
default='../experiments/configs/baseline.yaml',
help='configuration of cream')
args = parser.parse_args()
cfg.merge_from_file(args.cfg)
def main():
date = datetime.date.today().strftime('%m%d')
save_path = os.path.join(cfg.SAVE_PATH, "{}-{}".format(date, cfg.MODEL))
if not os.path.exists(save_path):
os.mkdir(save_path)
os.system("cp {} {}".format(args.cfg, os.path.join(save_path, 'config.yaml')))
if args.mode == 'train':
os.system("python -m "
"torch.distributed.launch "
"--nproc_per_node={} "
"tools/train.py "
"--cfg {}".format(cfg.NUM_GPU, args.cfg))
elif args.mode == 'retrain':
os.system("python -m "
"torch.distributed.launch "
"--nproc_per_node={} "
"tools/retrain.py "
"--cfg {}".format(cfg.NUM_GPU, args.cfg))
elif args.mode == 'test':
os.system("python -m "
"torch.distributed.launch "
"--nproc_per_node={} "
"tools/test.py "
"--cfg {}".format(cfg.NUM_GPU, args.cfg))
else:
raise ValueError('Mode not supported yet!')
if __name__ == '__main__':
main()
|
Cream/Cream/tools/main.py/0
|
{
"file_path": "Cream/Cream/tools/main.py",
"repo_id": "Cream",
"token_count": 872
}
| 298 |
"""
Implements the knowledge distillation loss, proposed in deit
"""
import torch
from torch.nn import functional as F
class DistillationLoss(torch.nn.Module):
"""
This module wraps a standard criterion and adds an extra knowledge distillation loss by
taking a teacher model prediction and using it as additional supervision.
"""
def __init__(self, base_criterion: torch.nn.Module, teacher_model: torch.nn.Module,
distillation_type: str, alpha: float, tau: float):
super().__init__()
self.base_criterion = base_criterion
self.teacher_model = teacher_model
assert distillation_type in ['none', 'soft', 'hard']
self.distillation_type = distillation_type
self.alpha = alpha
self.tau = tau
def forward(self, inputs, outputs, labels):
"""
Args:
inputs: The original inputs that are feed to the teacher model
outputs: the outputs of the model to be trained. It is expected to be
either a Tensor, or a Tuple[Tensor, Tensor], with the original output
in the first position and the distillation predictions as the second output
labels: the labels for the base criterion
"""
outputs_kd = None
if not isinstance(outputs, torch.Tensor):
# assume that the model outputs a tuple of [outputs, outputs_kd]
outputs, outputs_kd = outputs
base_loss = self.base_criterion(outputs, labels)
if self.distillation_type == 'none':
return base_loss
if outputs_kd is None:
raise ValueError("When knowledge distillation is enabled, the model is "
"expected to return a Tuple[Tensor, Tensor] with the output of the "
"class_token and the dist_token")
# don't backprop throught the teacher
with torch.no_grad():
teacher_outputs = self.teacher_model(inputs)
if self.distillation_type == 'soft':
T = self.tau
# taken from https://github.com/peterliht/knowledge-distillation-pytorch/blob/master/model/net.py#L100
# with slight modifications
distillation_loss = F.kl_div(
F.log_softmax(outputs_kd / T, dim=1),
F.log_softmax(teacher_outputs / T, dim=1),
reduction='sum',
log_target=True
) * (T * T) / outputs_kd.numel()
elif self.distillation_type == 'hard':
distillation_loss = F.cross_entropy(
outputs_kd, teacher_outputs.argmax(dim=1))
loss = base_loss * (1 - self.alpha) + distillation_loss * self.alpha
return loss
|
Cream/EfficientViT/classification/losses.py/0
|
{
"file_path": "Cream/EfficientViT/classification/losses.py",
"repo_id": "Cream",
"token_count": 1171
}
| 299 |
_base_ = 'coco_instance.py'
dataset_type = 'LVISV1Dataset'
data_root = 'data/lvis_v1/'
data = dict(
samples_per_gpu=2,
workers_per_gpu=2,
train=dict(
_delete_=True,
type='ClassBalancedDataset',
oversample_thr=1e-3,
dataset=dict(
type=dataset_type,
ann_file=data_root + 'annotations/lvis_v1_train.json',
img_prefix=data_root)),
val=dict(
type=dataset_type,
ann_file=data_root + 'annotations/lvis_v1_val.json',
img_prefix=data_root),
test=dict(
type=dataset_type,
ann_file=data_root + 'annotations/lvis_v1_val.json',
img_prefix=data_root))
evaluation = dict(metric=['bbox', 'segm'])
|
Cream/EfficientViT/downstream/configs/_base_/datasets/lvis_v1_instance.py/0
|
{
"file_path": "Cream/EfficientViT/downstream/configs/_base_/datasets/lvis_v1_instance.py",
"repo_id": "Cream",
"token_count": 375
}
| 300 |
# model settings
norm_cfg = dict(type='GN', num_groups=32, requires_grad=True)
model = dict(
type='RepPointsV2Detector',
pretrained=None,
backbone=dict(
type='SwinTransformer',
embed_dim=96,
depths=[2, 2, 6, 2],
num_heads=[3, 6, 12, 24],
window_size=7,
mlp_ratio=4.,
qkv_bias=True,
qk_scale=None,
drop_rate=0.,
attn_drop_rate=0.,
drop_path_rate=0.2,
ape=False,
patch_norm=True,
out_indices=(1, 2, 3),
use_checkpoint=False),
neck=dict(
type='BiFPN',
in_channels=[192, 384, 768],
out_channels=256,
start_level=0,
add_extra_convs=False,
num_outs=5,
no_norm_on_lateral=False,
num_repeat=2,
norm_cfg=norm_cfg
),
bbox_head=dict(
type='RepPointsV2Head',
num_classes=80,
in_channels=256,
feat_channels=256,
point_feat_channels=256,
stacked_convs=3,
shared_stacked_convs=1,
first_kernel_size=3,
kernel_size=1,
corner_dim=64,
num_points=9,
gradient_mul=0.1,
point_strides=[8, 16, 32, 64, 128],
point_base_scale=4,
norm_cfg=norm_cfg,
loss_cls=dict(
type='RPDQualityFocalLoss',
use_sigmoid=True,
beta=2.0,
loss_weight=1.0),
loss_bbox_init=dict(type='RPDGIoULoss', loss_weight=1.0),
loss_bbox_refine=dict(type='RPDGIoULoss', loss_weight=2.0),
loss_heatmap=dict(
type='GaussianFocalLoss',
alpha=2.0,
gamma=4.0,
loss_weight=0.25),
loss_offset=dict(type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0),
loss_sem=dict(
type='SEPFocalLoss',
gamma=2.0,
alpha=0.25,
loss_weight=0.1),
transform_method='exact_minmax'),
# training and testing settings
train_cfg = dict(
init=dict(
assigner=dict(type='PointAssignerV2', scale=4, pos_num=1),
allowed_border=-1,
pos_weight=-1,
debug=False),
heatmap=dict(
assigner=dict(type='PointHMAssigner', gaussian_bump=True, gaussian_iou=0.7),
allowed_border=-1,
pos_weight=-1,
debug=False),
refine=dict(
assigner=dict(type='ATSSAssignerV2', topk=9),
allowed_border=-1,
pos_weight=-1,
debug=False)),
test_cfg = dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.6),
max_per_img=100),
)
|
Cream/EfficientViT/downstream/configs/_base_/models/reppointsv2_swin_bifpn.py/0
|
{
"file_path": "Cream/EfficientViT/downstream/configs/_base_/models/reppointsv2_swin_bifpn.py",
"repo_id": "Cream",
"token_count": 1538
}
| 301 |
# Copyright (c) Open-MMLab. All rights reserved.
import io
import os
import os.path as osp
import pkgutil
import time
import warnings
from collections import OrderedDict
from importlib import import_module
from tempfile import TemporaryDirectory
import torch
import torchvision
from torch.optim import Optimizer
from torch.utils import model_zoo
from torch.nn import functional as F
import mmcv
from mmcv.fileio import FileClient
from mmcv.fileio import load as load_file
from mmcv.parallel import is_module_wrapper
from mmcv.utils import mkdir_or_exist
from mmcv.runner import get_dist_info
ENV_MMCV_HOME = 'MMCV_HOME'
ENV_XDG_CACHE_HOME = 'XDG_CACHE_HOME'
DEFAULT_CACHE_DIR = '~/.cache'
def _get_mmcv_home():
mmcv_home = os.path.expanduser(
os.getenv(
ENV_MMCV_HOME,
os.path.join(
os.getenv(ENV_XDG_CACHE_HOME, DEFAULT_CACHE_DIR), 'mmcv')))
mkdir_or_exist(mmcv_home)
return mmcv_home
def load_state_dict(module, state_dict, strict=False, logger=None):
"""Load state_dict to a module.
This method is modified from :meth:`torch.nn.Module.load_state_dict`.
Default value for ``strict`` is set to ``False`` and the message for
param mismatch will be shown even if strict is False.
Args:
module (Module): Module that receives the state_dict.
state_dict (OrderedDict): Weights.
strict (bool): whether to strictly enforce that the keys
in :attr:`state_dict` match the keys returned by this module's
:meth:`~torch.nn.Module.state_dict` function. Default: ``False``.
logger (:obj:`logging.Logger`, optional): Logger to log the error
message. If not specified, print function will be used.
"""
unexpected_keys = []
all_missing_keys = []
err_msg = []
metadata = getattr(state_dict, '_metadata', None)
state_dict = state_dict.copy()
if metadata is not None:
state_dict._metadata = metadata
# use _load_from_state_dict to enable checkpoint version control
def load(module, prefix=''):
# recursively check parallel module in case that the model has a
# complicated structure, e.g., nn.Module(nn.Module(DDP))
if is_module_wrapper(module):
module = module.module
local_metadata = {} if metadata is None else metadata.get(
prefix[:-1], {})
module._load_from_state_dict(state_dict, prefix, local_metadata, True,
all_missing_keys, unexpected_keys,
err_msg)
for name, child in module._modules.items():
if child is not None:
load(child, prefix + name + '.')
load(module)
load = None # break load->load reference cycle
# ignore "num_batches_tracked" of BN layers
missing_keys = [
key for key in all_missing_keys if 'num_batches_tracked' not in key
]
if unexpected_keys:
err_msg.append('unexpected key in source '
f'state_dict: {", ".join(unexpected_keys)}\n')
if missing_keys:
err_msg.append(
f'missing keys in source state_dict: {", ".join(missing_keys)}\n')
rank, _ = get_dist_info()
if len(err_msg) > 0 and rank == 0:
err_msg.insert(
0, 'The model and loaded state dict do not match exactly\n')
err_msg = '\n'.join(err_msg)
if strict:
raise RuntimeError(err_msg)
elif logger is not None:
logger.warning(err_msg)
else:
print(err_msg)
def load_url_dist(url, model_dir=None):
"""In distributed setting, this function only download checkpoint at local
rank 0."""
rank, world_size = get_dist_info()
rank = int(os.environ.get('LOCAL_RANK', rank))
if rank == 0:
checkpoint = model_zoo.load_url(url, model_dir=model_dir)
if world_size > 1:
torch.distributed.barrier()
if rank > 0:
checkpoint = model_zoo.load_url(url, model_dir=model_dir)
return checkpoint
def load_pavimodel_dist(model_path, map_location=None):
"""In distributed setting, this function only download checkpoint at local
rank 0."""
try:
from pavi import modelcloud
except ImportError:
raise ImportError(
'Please install pavi to load checkpoint from modelcloud.')
rank, world_size = get_dist_info()
rank = int(os.environ.get('LOCAL_RANK', rank))
if rank == 0:
model = modelcloud.get(model_path)
with TemporaryDirectory() as tmp_dir:
downloaded_file = osp.join(tmp_dir, model.name)
model.download(downloaded_file)
checkpoint = torch.load(downloaded_file, map_location=map_location)
if world_size > 1:
torch.distributed.barrier()
if rank > 0:
model = modelcloud.get(model_path)
with TemporaryDirectory() as tmp_dir:
downloaded_file = osp.join(tmp_dir, model.name)
model.download(downloaded_file)
checkpoint = torch.load(
downloaded_file, map_location=map_location)
return checkpoint
def load_fileclient_dist(filename, backend, map_location):
"""In distributed setting, this function only download checkpoint at local
rank 0."""
rank, world_size = get_dist_info()
rank = int(os.environ.get('LOCAL_RANK', rank))
allowed_backends = ['ceph']
if backend not in allowed_backends:
raise ValueError(f'Load from Backend {backend} is not supported.')
if rank == 0:
fileclient = FileClient(backend=backend)
buffer = io.BytesIO(fileclient.get(filename))
checkpoint = torch.load(buffer, map_location=map_location)
if world_size > 1:
torch.distributed.barrier()
if rank > 0:
fileclient = FileClient(backend=backend)
buffer = io.BytesIO(fileclient.get(filename))
checkpoint = torch.load(buffer, map_location=map_location)
return checkpoint
def get_torchvision_models():
model_urls = dict()
for _, name, ispkg in pkgutil.walk_packages(torchvision.models.__path__):
if ispkg:
continue
_zoo = import_module(f'torchvision.models.{name}')
if hasattr(_zoo, 'model_urls'):
_urls = getattr(_zoo, 'model_urls')
model_urls.update(_urls)
return model_urls
def get_external_models():
mmcv_home = _get_mmcv_home()
default_json_path = osp.join(mmcv.__path__[0], 'model_zoo/open_mmlab.json')
default_urls = load_file(default_json_path)
assert isinstance(default_urls, dict)
external_json_path = osp.join(mmcv_home, 'open_mmlab.json')
if osp.exists(external_json_path):
external_urls = load_file(external_json_path)
assert isinstance(external_urls, dict)
default_urls.update(external_urls)
return default_urls
def get_mmcls_models():
mmcls_json_path = osp.join(mmcv.__path__[0], 'model_zoo/mmcls.json')
mmcls_urls = load_file(mmcls_json_path)
return mmcls_urls
def get_deprecated_model_names():
deprecate_json_path = osp.join(mmcv.__path__[0],
'model_zoo/deprecated.json')
deprecate_urls = load_file(deprecate_json_path)
assert isinstance(deprecate_urls, dict)
return deprecate_urls
def _process_mmcls_checkpoint(checkpoint):
state_dict = checkpoint['state_dict']
new_state_dict = OrderedDict()
for k, v in state_dict.items():
if k.startswith('backbone.'):
new_state_dict[k[9:]] = v
new_checkpoint = dict(state_dict=new_state_dict)
return new_checkpoint
def _load_checkpoint(filename, map_location=None):
"""Load checkpoint from somewhere (modelzoo, file, url).
Args:
filename (str): Accept local filepath, URL, ``torchvision://xxx``,
``open-mmlab://xxx``. Please refer to ``docs/model_zoo.md`` for
details.
map_location (str | None): Same as :func:`torch.load`. Default: None.
Returns:
dict | OrderedDict: The loaded checkpoint. It can be either an
OrderedDict storing model weights or a dict containing other
information, which depends on the checkpoint.
"""
if filename.startswith('modelzoo://'):
warnings.warn('The URL scheme of "modelzoo://" is deprecated, please '
'use "torchvision://" instead')
model_urls = get_torchvision_models()
model_name = filename[11:]
checkpoint = load_url_dist(model_urls[model_name])
elif filename.startswith('torchvision://'):
model_urls = get_torchvision_models()
model_name = filename[14:]
checkpoint = load_url_dist(model_urls[model_name])
elif filename.startswith('open-mmlab://'):
model_urls = get_external_models()
model_name = filename[13:]
deprecated_urls = get_deprecated_model_names()
if model_name in deprecated_urls:
warnings.warn(f'open-mmlab://{model_name} is deprecated in favor '
f'of open-mmlab://{deprecated_urls[model_name]}')
model_name = deprecated_urls[model_name]
model_url = model_urls[model_name]
# check if is url
if model_url.startswith(('http://', 'https://')):
checkpoint = load_url_dist(model_url)
else:
filename = osp.join(_get_mmcv_home(), model_url)
if not osp.isfile(filename):
raise IOError(f'{filename} is not a checkpoint file')
checkpoint = torch.load(filename, map_location=map_location)
elif filename.startswith('mmcls://'):
model_urls = get_mmcls_models()
model_name = filename[8:]
checkpoint = load_url_dist(model_urls[model_name])
checkpoint = _process_mmcls_checkpoint(checkpoint)
elif filename.startswith(('http://', 'https://')):
checkpoint = load_url_dist(filename)
elif filename.startswith('pavi://'):
model_path = filename[7:]
checkpoint = load_pavimodel_dist(model_path, map_location=map_location)
elif filename.startswith('s3://'):
checkpoint = load_fileclient_dist(
filename, backend='ceph', map_location=map_location)
else:
if not osp.isfile(filename):
raise IOError(f'{filename} is not a checkpoint file')
checkpoint = torch.load(filename, map_location=map_location)
return checkpoint
def load_checkpoint(model,
filename,
map_location='cpu',
strict=False,
logger=None):
"""Load checkpoint from a file or URI.
Args:
model (Module): Module to load checkpoint.
filename (str): Accept local filepath, URL, ``torchvision://xxx``,
``open-mmlab://xxx``. Please refer to ``docs/model_zoo.md`` for
details.
map_location (str): Same as :func:`torch.load`.
strict (bool): Whether to allow different params for the model and
checkpoint.
logger (:mod:`logging.Logger` or None): The logger for error message.
Returns:
dict or OrderedDict: The loaded checkpoint.
"""
checkpoint = _load_checkpoint(filename, map_location)
# OrderedDict is a subclass of dict
if not isinstance(checkpoint, dict):
raise RuntimeError(
f'No state_dict found in checkpoint file {filename}')
# get state_dict from checkpoint
if 'state_dict' in checkpoint:
state_dict = checkpoint['state_dict']
elif 'model' in checkpoint:
state_dict = checkpoint['model']
else:
state_dict = checkpoint
# strip prefix of state_dict
if list(state_dict.keys())[0].startswith('module.'):
state_dict = {k[7:]: v for k, v in state_dict.items()}
# for MoBY, load model of online branch
if sorted(list(state_dict.keys()))[0].startswith('encoder'):
state_dict = {k.replace('encoder.', ''): v for k, v in state_dict.items() if k.startswith('encoder.')}
# reshape absolute position embedding
if state_dict.get('absolute_pos_embed') is not None:
absolute_pos_embed = state_dict['absolute_pos_embed']
N1, L, C1 = absolute_pos_embed.size()
N2, C2, H, W = model.absolute_pos_embed.size()
if N1 != N2 or C1 != C2 or L != H*W:
logger.warning("Error in loading absolute_pos_embed, pass")
else:
state_dict['absolute_pos_embed'] = absolute_pos_embed.view(N2, H, W, C2).permute(0, 3, 1, 2)
# interpolate position bias table if needed
relative_position_bias_table_keys = [k for k in state_dict.keys() if "relative_position_bias_table" in k]
for table_key in relative_position_bias_table_keys:
table_pretrained = state_dict[table_key]
table_current = model.state_dict()[table_key]
L1, nH1 = table_pretrained.size()
L2, nH2 = table_current.size()
if nH1 != nH2:
logger.warning(f"Error in loading {table_key}, pass")
else:
if L1 != L2:
S1 = int(L1 ** 0.5)
S2 = int(L2 ** 0.5)
table_pretrained_resized = F.interpolate(
table_pretrained.permute(1, 0).view(1, nH1, S1, S1),
size=(S2, S2), mode='bicubic')
state_dict[table_key] = table_pretrained_resized.view(nH2, L2).permute(1, 0)
# load state_dict
load_state_dict(model, state_dict, strict, logger)
return checkpoint
def weights_to_cpu(state_dict):
"""Copy a model state_dict to cpu.
Args:
state_dict (OrderedDict): Model weights on GPU.
Returns:
OrderedDict: Model weights on GPU.
"""
state_dict_cpu = OrderedDict()
for key, val in state_dict.items():
state_dict_cpu[key] = val.cpu()
return state_dict_cpu
def _save_to_state_dict(module, destination, prefix, keep_vars):
"""Saves module state to `destination` dictionary.
This method is modified from :meth:`torch.nn.Module._save_to_state_dict`.
Args:
module (nn.Module): The module to generate state_dict.
destination (dict): A dict where state will be stored.
prefix (str): The prefix for parameters and buffers used in this
module.
"""
for name, param in module._parameters.items():
if param is not None:
destination[prefix + name] = param if keep_vars else param.detach()
for name, buf in module._buffers.items():
# remove check of _non_persistent_buffers_set to allow nn.BatchNorm2d
if buf is not None:
destination[prefix + name] = buf if keep_vars else buf.detach()
def get_state_dict(module, destination=None, prefix='', keep_vars=False):
"""Returns a dictionary containing a whole state of the module.
Both parameters and persistent buffers (e.g. running averages) are
included. Keys are corresponding parameter and buffer names.
This method is modified from :meth:`torch.nn.Module.state_dict` to
recursively check parallel module in case that the model has a complicated
structure, e.g., nn.Module(nn.Module(DDP)).
Args:
module (nn.Module): The module to generate state_dict.
destination (OrderedDict): Returned dict for the state of the
module.
prefix (str): Prefix of the key.
keep_vars (bool): Whether to keep the variable property of the
parameters. Default: False.
Returns:
dict: A dictionary containing a whole state of the module.
"""
# recursively check parallel module in case that the model has a
# complicated structure, e.g., nn.Module(nn.Module(DDP))
if is_module_wrapper(module):
module = module.module
# below is the same as torch.nn.Module.state_dict()
if destination is None:
destination = OrderedDict()
destination._metadata = OrderedDict()
destination._metadata[prefix[:-1]] = local_metadata = dict(
version=module._version)
_save_to_state_dict(module, destination, prefix, keep_vars)
for name, child in module._modules.items():
if child is not None:
get_state_dict(
child, destination, prefix + name + '.', keep_vars=keep_vars)
for hook in module._state_dict_hooks.values():
hook_result = hook(module, destination, prefix, local_metadata)
if hook_result is not None:
destination = hook_result
return destination
def save_checkpoint(model, filename, optimizer=None, meta=None):
"""Save checkpoint to file.
The checkpoint will have 3 fields: ``meta``, ``state_dict`` and
``optimizer``. By default ``meta`` will contain version and time info.
Args:
model (Module): Module whose params are to be saved.
filename (str): Checkpoint filename.
optimizer (:obj:`Optimizer`, optional): Optimizer to be saved.
meta (dict, optional): Metadata to be saved in checkpoint.
"""
if meta is None:
meta = {}
elif not isinstance(meta, dict):
raise TypeError(f'meta must be a dict or None, but got {type(meta)}')
meta.update(mmcv_version=mmcv.__version__, time=time.asctime())
if is_module_wrapper(model):
model = model.module
if hasattr(model, 'CLASSES') and model.CLASSES is not None:
# save class name to the meta
meta.update(CLASSES=model.CLASSES)
checkpoint = {
'meta': meta,
'state_dict': weights_to_cpu(get_state_dict(model))
}
# save optimizer state dict in the checkpoint
if isinstance(optimizer, Optimizer):
checkpoint['optimizer'] = optimizer.state_dict()
elif isinstance(optimizer, dict):
checkpoint['optimizer'] = {}
for name, optim in optimizer.items():
checkpoint['optimizer'][name] = optim.state_dict()
if filename.startswith('pavi://'):
try:
from pavi import modelcloud
from pavi.exception import NodeNotFoundError
except ImportError:
raise ImportError(
'Please install pavi to load checkpoint from modelcloud.')
model_path = filename[7:]
root = modelcloud.Folder()
model_dir, model_name = osp.split(model_path)
try:
model = modelcloud.get(model_dir)
except NodeNotFoundError:
model = root.create_training_model(model_dir)
with TemporaryDirectory() as tmp_dir:
checkpoint_file = osp.join(tmp_dir, model_name)
with open(checkpoint_file, 'wb') as f:
torch.save(checkpoint, f)
f.flush()
model.create_file(checkpoint_file, name=model_name)
else:
mmcv.mkdir_or_exist(osp.dirname(filename))
# immediately flush buffer
with open(filename, 'wb') as f:
torch.save(checkpoint, f)
f.flush()
|
Cream/EfficientViT/downstream/mmcv_custom/checkpoint.py/0
|
{
"file_path": "Cream/EfficientViT/downstream/mmcv_custom/checkpoint.py",
"repo_id": "Cream",
"token_count": 7949
}
| 302 |
"""
Implements the knowledge distillation loss
"""
import torch
from torch.nn import functional as F
class DistillationLoss(torch.nn.Module):
"""
This module wraps a standard criterion and adds an extra knowledge distillation loss by
taking a teacher model prediction and using it as additional supervision.
"""
def __init__(self, base_criterion: torch.nn.Module, teacher_model: torch.nn.Module,
distillation_type: str, alpha: float, tau: float):
super().__init__()
self.base_criterion = base_criterion
self.teacher_model = teacher_model
assert distillation_type in ['none', 'soft', 'hard']
self.distillation_type = distillation_type
self.alpha = alpha
self.tau = tau
def forward(self, inputs, outputs, labels):
"""
Args:
inputs: The original inputs that are feed to the teacher model
outputs: the outputs of the model to be trained. It is expected to be
either a Tensor, or a Tuple[Tensor, Tensor], with the original output
in the first position and the distillation predictions as the second output
labels: the labels for the base criterion
"""
outputs_kd = None
if not isinstance(outputs, torch.Tensor):
# assume that the model outputs a tuple of [outputs, outputs_kd]
outputs, outputs_kd = outputs
base_loss = self.base_criterion(outputs, labels)
if self.distillation_type == 'none':
return base_loss
if outputs_kd is None:
outputs_kd = outputs
# don't backprop throught the teacher
with torch.no_grad():
teacher_outputs = self.teacher_model(inputs)
if self.distillation_type == 'soft':
T = self.tau
# taken from https://github.com/peterliht/knowledge-distillation-pytorch/blob/master/model/net.py#L100
# with slight modifications
distillation_loss = F.kl_div(
F.log_softmax(outputs_kd / T, dim=1),
F.log_softmax(teacher_outputs / T, dim=1),
reduction='sum',
log_target=True
) * (T * T) / outputs_kd.numel()
#We divide by outputs_kd.numel() to have the legacy PyTorch behavior.
#But we also experiments output_kd.size(0)
#see issue 61(https://github.com/facebookresearch/deit/issues/61) for more details
elif self.distillation_type == 'hard':
distillation_loss = F.cross_entropy(outputs_kd, teacher_outputs.argmax(dim=1))
loss = base_loss * (1 - self.alpha) + distillation_loss * self.alpha
return loss
|
Cream/MiniViT/Mini-DeiT/losses.py/0
|
{
"file_path": "Cream/MiniViT/Mini-DeiT/losses.py",
"repo_id": "Cream",
"token_count": 1151
}
| 303 |
# only for evaluation
DATA:
IMG_SIZE: 384
MODEL:
TYPE: swin
NAME: swin_base_patch4_window12_384
SWIN:
EMBED_DIM: 128
DEPTHS: [ 2, 2, 18, 2 ]
NUM_HEADS: [ 4, 8, 16, 32 ]
WINDOW_SIZE: 12
TEST:
CROP: False
|
Cream/MiniViT/Mini-Swin/configs/swin_base_patch4_window12_384.yaml/0
|
{
"file_path": "Cream/MiniViT/Mini-Swin/configs/swin_base_patch4_window12_384.yaml",
"repo_id": "Cream",
"token_count": 115
}
| 304 |
import torch
from timm.scheduler.cosine_lr import CosineLRScheduler
from timm.scheduler.step_lr import StepLRScheduler
from timm.scheduler.scheduler import Scheduler
def build_scheduler(config, optimizer, n_iter_per_epoch):
num_steps = int(config.TRAIN.EPOCHS * n_iter_per_epoch)
warmup_steps = int(config.TRAIN.WARMUP_EPOCHS * n_iter_per_epoch)
decay_steps = int(config.TRAIN.LR_SCHEDULER.DECAY_EPOCHS * n_iter_per_epoch)
lr_scheduler = None
if config.TRAIN.LR_SCHEDULER.NAME == 'cosine':
lr_scheduler = CosineLRScheduler(
optimizer,
t_initial=num_steps,
t_mul=1.,
lr_min=config.TRAIN.MIN_LR,
warmup_lr_init=config.TRAIN.WARMUP_LR,
warmup_t=warmup_steps,
cycle_limit=1,
t_in_epochs=False,
)
elif config.TRAIN.LR_SCHEDULER.NAME == 'linear':
lr_scheduler = LinearLRScheduler(
optimizer,
t_initial=num_steps,
lr_min_rate=0.01,
warmup_lr_init=config.TRAIN.WARMUP_LR,
warmup_t=warmup_steps,
t_in_epochs=False,
)
elif config.TRAIN.LR_SCHEDULER.NAME == 'step':
lr_scheduler = StepLRScheduler(
optimizer,
decay_t=decay_steps,
decay_rate=config.TRAIN.LR_SCHEDULER.DECAY_RATE,
warmup_lr_init=config.TRAIN.WARMUP_LR,
warmup_t=warmup_steps,
t_in_epochs=False,
)
return lr_scheduler
class LinearLRScheduler(Scheduler):
def __init__(self,
optimizer: torch.optim.Optimizer,
t_initial: int,
lr_min_rate: float,
warmup_t=0,
warmup_lr_init=0.,
t_in_epochs=True,
noise_range_t=None,
noise_pct=0.67,
noise_std=1.0,
noise_seed=42,
initialize=True,
) -> None:
super().__init__(
optimizer, param_group_field="lr",
noise_range_t=noise_range_t, noise_pct=noise_pct, noise_std=noise_std, noise_seed=noise_seed,
initialize=initialize)
self.t_initial = t_initial
self.lr_min_rate = lr_min_rate
self.warmup_t = warmup_t
self.warmup_lr_init = warmup_lr_init
self.t_in_epochs = t_in_epochs
if self.warmup_t:
self.warmup_steps = [(v - warmup_lr_init) / self.warmup_t for v in self.base_values]
super().update_groups(self.warmup_lr_init)
else:
self.warmup_steps = [1 for _ in self.base_values]
def _get_lr(self, t):
if t < self.warmup_t:
lrs = [self.warmup_lr_init + t * s for s in self.warmup_steps]
else:
t = t - self.warmup_t
total_t = self.t_initial - self.warmup_t
lrs = [v - ((v - v * self.lr_min_rate) * (t / total_t)) for v in self.base_values]
return lrs
def get_epoch_values(self, epoch: int):
if self.t_in_epochs:
return self._get_lr(epoch)
else:
return None
def get_update_values(self, num_updates: int):
if not self.t_in_epochs:
return self._get_lr(num_updates)
else:
return None
|
Cream/MiniViT/Mini-Swin/lr_scheduler.py/0
|
{
"file_path": "Cream/MiniViT/Mini-Swin/lr_scheduler.py",
"repo_id": "Cream",
"token_count": 1849
}
| 305 |
MODEL:
NAME: TinyViT-21M-224to384
TYPE: tiny_vit
DROP_PATH_RATE: 0.1
TINY_VIT:
DEPTHS: [ 2, 2, 6, 2 ]
NUM_HEADS: [ 3, 6, 12, 18 ]
WINDOW_SIZES: [ 12, 12, 24, 12 ]
EMBED_DIMS: [96, 192, 384, 576]
DATA:
IMG_SIZE: 384
TRAIN:
EPOCHS: 30
WARMUP_EPOCHS: 5
WEIGHT_DECAY: 1e-8
BASE_LR: 2e-05
WARMUP_LR: 2e-08
MIN_LR: 2e-07
EVAL_BN_WHEN_TRAINING: True
TEST:
CROP: False
AUG:
MIXUP: 0.0
CUTMIX: 0.0
|
Cream/TinyViT/configs/higher_resolution/tiny_vit_21m_224to384.yaml/0
|
{
"file_path": "Cream/TinyViT/configs/higher_resolution/tiny_vit_21m_224to384.yaml",
"repo_id": "Cream",
"token_count": 258
}
| 306 |
import os
import multiprocessing
import tempfile
class _Writer:
def __init__(self, path, rank):
self.msg_queue = multiprocessing.Queue()
self.worker = multiprocessing.Process(
target=self._async_manager_worker_fn,
args=(self.msg_queue, path, rank),
)
self.worker.start()
def write(self, key: str, value: bytes) -> bool:
self.msg_queue.put((key, value))
return True
class _WORKER_MSG:
KILL = 4
def _async_manager_worker_fn(self, msg_queue, path, rank):
# path: xxx/logits_top100_epoch0
rank_name = f'rank{rank}'
# logits_top100_epoch0_rank0
basename = os.path.basename(path) + f'_{rank_name}'
tmp_handle = tempfile.TemporaryDirectory(prefix='tinyvit_' + basename)
# tmp_dir/tinyvit_logits_top100_epoch0_rank0
temp_dirname = tmp_handle.name
tmp_filename = os.path.join(temp_dirname, rank_name)
# tmp_dir/tinyvit_logits_top100_epoch0_rank0/rank0-keys.txt
keys_fname = tmp_filename + '-keys.txt'
values_fname = tmp_filename + '-values.bin'
keys_file = open(keys_fname, 'w')
values_file = open(values_fname, 'wb')
keys = dict()
while 1:
item = msg_queue.get()
if item == _Writer._WORKER_MSG.KILL:
break
key, value = item
if key in keys:
continue
idx = len(keys)
keys[key] = idx
keys_file.write(key + '\n')
values_file.write(value)
keys_file.close()
values_file.close()
os.makedirs(path, exist_ok=True)
os.system(f'mv {temp_dirname}/* {path}/')
print(f"Save logits over: {path}")
def __del__(self):
if self.worker is not None:
self.msg_queue.put(_Writer._WORKER_MSG.KILL)
self.worker.join()
class _Reader:
def __init__(self, path: str, item_size: int, rank: int):
self.rank = rank
self.item_size = item_size
self.packages = self.search_packages(path)
self.packages_visited = [False] * len(self.packages)
# key -> package idx
self.keys = dict()
def read(self, key: str) -> bytes:
pkg_idx, value_idx = self.keys.get(key, (None, None))
if pkg_idx is None:
pkg_idx, value_idx = self.find_item_in_packages(key)
return self.packages[pkg_idx][value_idx]
def find_item_in_packages(self, key: str) -> (int, int):
for pkg_idx, pkg in enumerate(self.packages):
if not self.packages_visited[pkg_idx]:
self.packages_visited[pkg_idx] = True
# load keys
keys_fname = pkg.name + '-keys.txt'
with open(keys_fname, 'r') as keys_file:
for i, k in enumerate(keys_file.readlines()):
k = k.strip()
self.keys[k] = (pkg_idx, i)
if key in self.keys:
return self.keys[key]
raise KeyError(key)
def search_packages(self, path):
assert os.path.isdir(path), f'[Error] Reading logits fails. Path {path} not found.'
names = self.search_packages_names(path)
return [_Reader._PackageReader(name, self.item_size) for name in names]
def search_packages_names(self, path):
names = []
VALUES_POSTFIX = '-values.bin'
for name in os.listdir(path):
if name.endswith(VALUES_POSTFIX):
names.append(name[:-len(VALUES_POSTFIX)])
num_packages = len(names)
def rank_key_fn(name):
r = int(name[4:])
return (r - self.rank) % num_packages
# move the rankx-keys.txt to the front
names.sort(key=rank_key_fn)
names = list(map(lambda x: os.path.join(path, x), names))
return names
class _PackageReader:
def __init__(self, name, item_size):
self.name = name
self.item_size = item_size
# delay to create handle
self.values_file = None
def __getitem__(self, idx: int):
self._ensure_handle_created()
self.values_file.seek(self.item_size * idx)
return self.values_file.read(self.item_size)
def _ensure_handle_created(self):
if self.values_file is None:
values_fname = self.name + '-values.bin'
self.values_file = open(values_fname, 'rb')
class TxtManager:
def __init__(self, path: str, item_size: int, rank: int):
self.path = path
self.writer = None
self.reader = None
self.item_size = item_size
self.rank = rank
def write(self, key: str, value: bytes) -> bool:
if self.writer is None:
self.writer = _Writer(self.path, self.rank)
return self.writer.write(key, value)
def read(self, key: str) -> bytes:
if self.reader is None:
self.reader = _Reader(self.path, self.item_size, self.rank)
return self.reader.read(key)
|
Cream/TinyViT/data/augmentation/manager.py/0
|
{
"file_path": "Cream/TinyViT/data/augmentation/manager.py",
"repo_id": "Cream",
"token_count": 2518
}
| 307 |
# --------------------------------------------------------
# TinyViT Data Builder
# Copyright (c) 2022 Microsoft
# Based on the code: Swin Transformer
# (https://github.com/microsoft/swin-transformer)
# Adapted for TinyVIT
# --------------------------------------------------------
import os
import torch
import numpy as np
import torch.distributed as dist
from torchvision import datasets, transforms
from timm.data.constants import \
IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_INCEPTION_MEAN, IMAGENET_INCEPTION_STD
from timm.data import Mixup
from timm.data import create_transform
from .augmentation import create_transform as create_transform_record
from .augmentation.mixup import Mixup as Mixup_record
from .augmentation.dataset_wrapper import DatasetWrapper
from .imagenet22k_dataset import IN22KDataset
from .sampler import MyDistributedSampler
try:
from timm.data import TimmDatasetTar
except ImportError:
# for higher version of timm
from timm.data import ImageDataset as TimmDatasetTar
try:
from torchvision.transforms import InterpolationMode
def _pil_interp(method):
if method == 'bicubic':
return InterpolationMode.BICUBIC
elif method == 'lanczos':
return InterpolationMode.LANCZOS
elif method == 'hamming':
return InterpolationMode.HAMMING
else:
# default bilinear, do we want to allow nearest?
return InterpolationMode.BILINEAR
except:
from timm.data.transforms import _pil_interp
def build_loader(config):
config.defrost()
dataset_train, config.MODEL.NUM_CLASSES = build_dataset(
is_train=True, config=config)
config.freeze()
print(
f"local rank {config.LOCAL_RANK} / global rank {dist.get_rank()} successfully build train dataset")
dataset_val, _ = build_dataset(is_train=False, config=config)
print(
f"local rank {config.LOCAL_RANK} / global rank {dist.get_rank()} successfully build val dataset")
mixup_active = config.AUG.MIXUP > 0 or config.AUG.CUTMIX > 0. or config.AUG.CUTMIX_MINMAX is not None
sampler_train = MyDistributedSampler(
dataset_train, shuffle=True,
drop_last=False, padding=True, pair=mixup_active and config.DISTILL.ENABLED,
)
sampler_val = MyDistributedSampler(
dataset_val, shuffle=False,
drop_last=False, padding=False, pair=False,
)
# TinyViT Dataset Wrapper
if config.DISTILL.ENABLED:
dataset_train = DatasetWrapper(dataset_train,
logits_path=config.DISTILL.TEACHER_LOGITS_PATH,
topk=config.DISTILL.LOGITS_TOPK,
write=config.DISTILL.SAVE_TEACHER_LOGITS,
)
data_loader_train = torch.utils.data.DataLoader(
dataset_train, sampler=sampler_train,
batch_size=config.DATA.BATCH_SIZE,
num_workers=config.DATA.NUM_WORKERS,
pin_memory=config.DATA.PIN_MEMORY,
# modified for TinyViT, we save logits of all samples
drop_last=not config.DISTILL.SAVE_TEACHER_LOGITS,
)
data_loader_val = torch.utils.data.DataLoader(
dataset_val, sampler=sampler_val,
batch_size=config.DATA.BATCH_SIZE,
shuffle=False,
num_workers=config.DATA.NUM_WORKERS,
pin_memory=config.DATA.PIN_MEMORY,
drop_last=False
)
# setup mixup / cutmix
mixup_fn = None
if mixup_active:
mixup_t = Mixup if not config.DISTILL.ENABLED else Mixup_record
if config.DISTILL.ENABLED and config.AUG.MIXUP_MODE != "pair2":
# change to pair2 mode for saving logits
config.defrost()
config.AUG.MIXUP_MODE = 'pair2'
config.freeze()
mixup_fn = mixup_t(
mixup_alpha=config.AUG.MIXUP, cutmix_alpha=config.AUG.CUTMIX, cutmix_minmax=config.AUG.CUTMIX_MINMAX,
prob=config.AUG.MIXUP_PROB, switch_prob=config.AUG.MIXUP_SWITCH_PROB, mode=config.AUG.MIXUP_MODE,
label_smoothing=config.MODEL.LABEL_SMOOTHING, num_classes=config.MODEL.NUM_CLASSES)
return dataset_train, dataset_val, data_loader_train, data_loader_val, mixup_fn
def build_dataset(is_train, config):
transform = build_transform(is_train, config)
dataset_tar_t = TimmDatasetTar
if config.DATA.DATASET == 'imagenet':
prefix = 'train' if is_train else 'val'
# load tar dataset
data_dir = os.path.join(config.DATA.DATA_PATH, f'{prefix}.tar')
if os.path.exists(data_dir):
dataset = dataset_tar_t(data_dir, transform=transform)
else:
root = os.path.join(config.DATA.DATA_PATH, prefix)
dataset = datasets.ImageFolder(root, transform=transform)
nb_classes = 1000
elif config.DATA.DATASET == 'imagenet22k':
if is_train:
dataset = IN22KDataset(data_root=config.DATA.DATA_PATH, transform=transform,
fname_format=config.DATA.FNAME_FORMAT, debug=config.DATA.DEBUG)
nb_classes = 21841
else:
# load ImageNet-1k validation set
'''
datasets/
├── ImageNet-22k/ # the folder of IN-22k
└── ImageNet/ # the folder of IN-1k
'''
old_data_path = config.DATA.DATA_PATH
config.defrost()
config.DATA.DATA_PATH = os.path.normpath(
os.path.join(old_data_path, '../ImageNet'))
config.DATA.DATASET = 'imagenet'
dataset, nb_classes = build_dataset(is_train=False, config=config)
config.DATA.DATA_PATH = old_data_path
config.DATA.DATASET = 'imagenet22k'
config.freeze()
else:
raise NotImplementedError("We only support ImageNet Now.")
return dataset, nb_classes
def build_transform(is_train, config):
resize_im = config.DATA.IMG_SIZE > 32
# RGB: mean, std
rgbs = dict(
default=(IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD),
inception=(IMAGENET_INCEPTION_MEAN, IMAGENET_INCEPTION_STD),
clip=((0.48145466, 0.4578275, 0.40821073),
(0.26862954, 0.26130258, 0.27577711)),
)
mean, std = rgbs[config.DATA.MEAN_AND_STD_TYPE]
if is_train:
# this should always dispatch to transforms_imagenet_train
create_transform_t = create_transform if not config.DISTILL.ENABLED else create_transform_record
transform = create_transform_t(
input_size=config.DATA.IMG_SIZE,
is_training=True,
color_jitter=config.AUG.COLOR_JITTER if config.AUG.COLOR_JITTER > 0 else None,
auto_augment=config.AUG.AUTO_AUGMENT if config.AUG.AUTO_AUGMENT != 'none' else None,
re_prob=config.AUG.REPROB,
re_mode=config.AUG.REMODE,
re_count=config.AUG.RECOUNT,
interpolation=config.DATA.INTERPOLATION,
mean=mean,
std=std,
)
if not resize_im:
# replace RandomResizedCropAndInterpolation with
# RandomCrop
transform.transforms[0] = transforms.RandomCrop(
config.DATA.IMG_SIZE, padding=4)
return transform
t = []
if resize_im:
if config.TEST.CROP:
size = int((256 / 224) * config.DATA.IMG_SIZE)
t.append(
transforms.Resize(size, interpolation=_pil_interp(
config.DATA.INTERPOLATION)),
# to maintain same ratio w.r.t. 224 images
)
t.append(transforms.CenterCrop(config.DATA.IMG_SIZE))
else:
t.append(
transforms.Resize((config.DATA.IMG_SIZE, config.DATA.IMG_SIZE),
interpolation=_pil_interp(config.DATA.INTERPOLATION))
)
t.append(transforms.ToTensor())
t.append(transforms.Normalize(mean, std))
transform = transforms.Compose(t)
return transform
|
Cream/TinyViT/data/build.py/0
|
{
"file_path": "Cream/TinyViT/data/build.py",
"repo_id": "Cream",
"token_count": 3717
}
| 308 |
# --------------------------------------------------------
# TinyViT Utils
# Copyright (c) 2022 Microsoft
# --------------------------------------------------------
import torch
from torch import nn
class RemapLayer(nn.Module):
def __init__(self, fname):
super().__init__()
with open(fname) as fin:
self.mapping = torch.Tensor(
list(map(int, fin.readlines()))).to(torch.long)
def forward(self, x):
'''
x: [batch_size, class]
'''
B = len(x)
dummy_cls = x.new_zeros((B, 1))
expand_x = torch.cat([x, dummy_cls], dim=1)
return expand_x[:, self.mapping]
|
Cream/TinyViT/models/remap_layer.py/0
|
{
"file_path": "Cream/TinyViT/models/remap_layer.py",
"repo_id": "Cream",
"token_count": 279
}
| 309 |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import json
import os
import util.misc as utils
try:
from panopticapi.evaluation import pq_compute
except ImportError:
pass
class PanopticEvaluator(object):
def __init__(self, ann_file, ann_folder, output_dir="panoptic_eval"):
self.gt_json = ann_file
self.gt_folder = ann_folder
if utils.is_main_process():
if not os.path.exists(output_dir):
os.mkdir(output_dir)
self.output_dir = output_dir
self.predictions = []
def update(self, predictions):
for p in predictions:
with open(os.path.join(self.output_dir, p["file_name"]), "wb") as f:
f.write(p.pop("png_string"))
self.predictions += predictions
def synchronize_between_processes(self):
all_predictions = utils.all_gather(self.predictions)
merged_predictions = []
for p in all_predictions:
merged_predictions += p
self.predictions = merged_predictions
def summarize(self):
if utils.is_main_process():
json_data = {"annotations": self.predictions}
predictions_json = os.path.join(self.output_dir, "predictions.json")
with open(predictions_json, "w") as f:
f.write(json.dumps(json_data))
return pq_compute(self.gt_json, predictions_json, gt_folder=self.gt_folder, pred_folder=self.output_dir)
return None
|
Cream/iRPE/DETR-with-iRPE/datasets/panoptic_eval.py/0
|
{
"file_path": "Cream/iRPE/DETR-with-iRPE/datasets/panoptic_eval.py",
"repo_id": "Cream",
"token_count": 653
}
| 310 |
# 2D RPE Operators
## Build iRPE operators implemented by CUDA.
Although iRPE can be implemented by PyTorch native functions, the backward speed of PyTorch index function is very slow. We implement CUDA operators for more efficient training and recommend to build it. `nvcc` is necessary to build CUDA operators.
```bash
cd rpe_ops/
python setup.py install --user
```
## rpe\_index
The function [`rpe_index`](./rpe_index.py#L5) is equal to
```python
def rpe_index(input, index):
'''Y[b, h, i, j] = input[b, h, i, index[i, j]]
Parameters
----------
input: torch.Tensor, float32
The shape is (B, H, L_query, num_buckets)
index: torch.Tensor, int32
The shape is (L_query, L_key)
where B is the batch size, and H is the number of attention heads.
Returns
-------
Y: torch.Tensor, float32
The shape is (B, H, L_query, L_key)
'''
L_query, L_key = index.shape
num_buckets = input.size(-1)
B = len(input)
offset = torch.arange(0, L_query * num_buckets, num_buckets).view(-1, 1)
return input.flatten(2)[:, :, (index + offset).flatten()].view(B, -1, L_query, L_key)
```
|
Cream/iRPE/DETR-with-iRPE/rpe_ops/README.md/0
|
{
"file_path": "Cream/iRPE/DETR-with-iRPE/rpe_ops/README.md",
"repo_id": "Cream",
"token_count": 447
}
| 311 |
# How to equip iRPE ?
The implementation of iRPE (image relative position encoding) contains two parts, namely python part `irpe.py` and C++/CUDA part `rpe_ops`. The python code `irpe.py` is the basic part to implement the four kinds of relative position encoding mappings, and the C++/CUDA code `rpe_ops` accelerate the forward and backward procedure. We should copy two parts to the project directory which need iRPE.
Current implementation supports variable input resolution and non-square input.
- Step. 1 - Copy the iRPE files
Copy the file `irpe.py` and the directory `rpe_ops` to the project directory.
- Step. 2 - Create the configuration of RPE
- [Example in DeiT: rpe\_models.py#L14-L21](./DeiT-with-iRPE/rpe_models.py#L14-L21)
- [Example in DETR: models/transformer.py#L63-L69](./DETR-with-iRPE/models/transformer.py#L63-L69)
```python
from irpe import get_rpe_config
rpe_config = get_rpe_config(
ratio=1.9,
method="product",
mode='ctx',
shared_head=True,
skip=1,
rpe_on='k',
)
```
The meaning of arguments could be seen in [`help(get_rpe_config)`](./DeiT-with-iRPE/irpe.py#L823-L855).
- Step. 3 - Build the instance of RPE modules
- [Example in DeiT: rpe\_vision\_transformer.py#L63-L66](./DeiT-with-iRPE/rpe_vision_transformer.py#L63-L66)
- [Example in DETR: models/rpe\_attention/multi\_head\_attention.py#L94-L97](./DETR-with-iRPE/models/rpe_attention/multi_head_attention.py#L94-L97)
```python
from irpe import build_rpe
def __init__(self, ...):
...
# image relative position encoding
self.rpe_q, self.rpe_k, self.rpe_v = \
build_rpe(rpe_config,
head_dim=head_dim,
num_heads=num_heads)
```
`build_rpe` should be called in the function `__init__` of a `nn.Module`.
- Step. 4 - Add RPE on keys, queries and values
- [Example in DeiT: rpe\_vision\_transformer.py#L77-L92](./DeiT-with-iRPE/rpe_vision_transformer.py#L77-L92)
- [Example in DETR: rpe\_vision\_transformer.py#L327-L376](./DETR-with-iRPE/models/rpe_attention/rpe_attention_function.py#L327-L376)
In the `forward` function, we consider relative position encodings as a bias on `attn` and `attn @ v`.
```python
def forward(self, ...):
...
attn = (q @ k.transpose(-2, -1))
# image relative position on keys
if self.rpe_k is not None:
attn += self.rpe_k(q)
# image relative position on queries
if self.rpe_q is not None:
attn += self.rpe_q(k * self.scale).transpose(2, 3)
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
out = attn @ v
# image relative position on values
if self.rpe_v is not None:
out += self.rpe_v(attn)
x = out.transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
```
Notice that the shapes of `q`, `k` and `v` are all `(B, H, L, head_dim)`, where `B` is batch size, `H` is the number of heads, `L` is the sequence length, equal to `height * width` (+1 if class token exists). `head_dim` is the dimension of each head.
- Step. 5 [Optional, Recommend] - Build C++/CUDA operators for iRPE
Although iRPE can be implemented by PyTorch native functions, the backward speed of PyTorch index function is very slow. We implement CUDA operators for more efficient training and recommend to build it.
`nvcc` is necessary to build CUDA operators.
```bash
cd rpe_ops/
python setup.py install --user
```
|
Cream/iRPE/HOW_TO_EQUIP_iRPE.md/0
|
{
"file_path": "Cream/iRPE/HOW_TO_EQUIP_iRPE.md",
"repo_id": "Cream",
"token_count": 1355
}
| 312 |
from .ra_sampler import RASampler
|
CvT/lib/dataset/samplers/__init__.py/0
|
{
"file_path": "CvT/lib/dataset/samplers/__init__.py",
"repo_id": "CvT",
"token_count": 12
}
| 313 |
include version.py
include setup.py
|
anomalydetector/MANIFEST.in/0
|
{
"file_path": "anomalydetector/MANIFEST.in",
"repo_id": "anomalydetector",
"token_count": 9
}
| 314 |
from msanomalydetector.spectral_residual import SpectralResidual
from msanomalydetector.util import MAX_RATIO, THRESHOLD, MAG_WINDOW, SCORE_WINDOW, DetectMode
__all__ = ['SpectralResidual', 'MAX_RATIO', 'THRESHOLD', 'MAG_WINDOW', 'SCORE_WINDOW', 'DetectMode']
|
anomalydetector/msanomalydetector/__init__.py/0
|
{
"file_path": "anomalydetector/msanomalydetector/__init__.py",
"repo_id": "anomalydetector",
"token_count": 97
}
| 315 |
import unittest
import numpy as np
from msanomalydetector import boundary_utils
class TestBoundaryUnit(unittest.TestCase):
def test_calculate_boundary_unit(self):
data = [139809.0, 139706.0, 140562.0, 140534.0, 140568.0, 139934.0, 139392.0, 141714.0, 144167.0, 147127.0,
147450.0, 147991.0, 151621.0, 154912.0, 158443.0, 160899.0, 164170.0, 164339.0, 165780.0, 167373.0,
167654.0, 168863.0, 169472.0, 169830.0, 169632.0, 169028.0, 165843.0, 162517.0, 159335.0, 156503.0,
151731.0, 151612.0, 151911.0, 157120.0, 157027.0, 159949.0, 160263.0, 160073.0, 160001.0, 159721.0,
160138.0, 160292.0, 160280.0, 159822.0, 159482.0, 159384.0, 159038.0, 158901.0, 158899.0, 156036.0]
is_anomaly = [False, False, False, False, False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False, False, False, False, False,
False, False, True, True, True, False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False]
expected_output = \
[148560.58510638, 148567.58510638, 148574.58510638, 148576.08510638, 148577.58510638, 148864.08510638,
149150.58510638, 149763.83510638, 150377.08510638, 151857.08510638, 152018.58510638, 152289.08510638,
154104.08510638, 155749.58510638, 157515.08510638, 158743.08510638, 160378.58510638, 160463.08510638,
161183.58510638, 161183.58510638, 161183.58510638, 161183.58510638, 161183.58510638, 161183.58510638,
161183.58510638, 161183.58510638, 161183.58510638, 159552.08510638, 158425.08510638, 158330.08510638,
158294.08510638, 158268.08510638, 158268.08510638, 158268.08510638, 158268.08510638, 158204.58510638,
158154.08510638, 158154.08510638, 158154.08510638, 158154.08510638, 158154.08510638, 158154.08510638,
158179.33510638, 158204.58510638, 158179.33510638, 158154.08510638, 158094.33510638, 158034.58510638,
158010.08510638, 157985.58510638]
actual_output = boundary_utils.calculate_boundary_unit_entire(np.asarray(data, dtype=float), is_anomaly)
for e, v in zip(expected_output, actual_output):
self.assertAlmostEqual(e, v)
expected_last_unit = 156748.27551020408
actual_last_unit = boundary_utils.calculate_boundary_unit_last(np.asarray(data, dtype=float))
self.assertAlmostEqual(expected_last_unit, actual_last_unit)
def test_calculate_boundary_unit_negative(self):
data = [-21901.0, -31123.0, -33203.0, -33236.0, -54681.0, -112808.0, -5368.0, -40021.0, -35.0, -72593.0,
-30880.0, -34597.0, -6210.0, -5508.0, -28892.0, -41091.0, -34916.0, -31941.0, -31084.0, -7379.0,
-4883.0, -32563.0, -29919.0, -33599.0, -33019.0, -35218.0, -9520.0, -4454.0, -39660.0, -29634.0,
-35751.0, -39912.0, -46940.0, -28969.0, -20196.0, -57031.0, -45264.0, -44059.0, -29180.0, -34642.0,
-11041.0, -10455.0, -40181.0, -43345.0, -37045.0, -33232.0, -37800.0, -9240.0, -12108.0, -34654.0]
is_anomaly = [False, False, False, False, False, True, False, False, False, True, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False]
expected_output = [
33250.48958333333, 33258.73958333333, 33250.48958333333, 33258.73958333333, 33250.48958333333,
32730.489583333332, 32210.489583333332, 32730.489583333332, 33250.48958333333, 33250.48958333333,
33250.48958333333, 32619.489583333332, 32190.989583333332, 32190.989583333332, 32088.989583333332,
32190.989583333332, 32190.989583333332, 32619.489583333332, 32190.989583333332, 32190.989583333332,
32190.989583333332, 32190.989583333332, 32619.489583333332, 32930.48958333333, 32930.48958333333,
32619.489583333332, 32190.989583333332, 32930.48958333333, 33158.48958333333, 33448.48958333333,
33448.48958333333, 33969.98958333333, 33969.98958333333, 33969.98958333333, 33969.98958333333,
34524.48958333333, 35171.48958333333, 34524.48958333333, 35171.48958333333, 35171.48958333333,
33969.98958333333, 33969.98958333333, 33972.98958333333, 33975.98958333333, 33972.98958333333,
33969.98958333333, 33617.48958333333, 33969.98958333333, 33620.48958333333, 33975.98958333333]
actual_output = boundary_utils.calculate_boundary_unit_entire(np.asarray(data), is_anomaly)
for e, v in zip(expected_output, actual_output):
self.assertAlmostEqual(e, v)
expected_last_unit = 33197.17346938775
actual_last_unit = boundary_utils.calculate_boundary_unit_last(np.asarray(data))
self.assertAlmostEqual(expected_last_unit, actual_last_unit)
def test_calculate_margin(self):
self.assertAlmostEqual(boundary_utils.calculate_margin(10, 0), 1843316.2871148242)
self.assertAlmostEqual(boundary_utils.calculate_margin(10, 5), 502228.4038287002)
self.assertAlmostEqual(boundary_utils.calculate_margin(10, 25), 3359.7473532360186)
self.assertAlmostEqual(boundary_utils.calculate_margin(10, 95), 0.0014700521929794912)
self.assertAlmostEqual(boundary_utils.calculate_margin(10, 99), 0.00016994687082728675)
self.assertAlmostEqual(boundary_utils.calculate_margin(10, 100), 0.0)
self.assertAlmostEqual(boundary_utils.calculate_margin(345969.3476, 79.7333448252325), 3762.3800000299298)
def test_calculate_anomaly_score(self):
self.assertAlmostEqual(boundary_utils.calculate_anomaly_score(10, 15, 5, False), 0)
self.assertAlmostEqual(boundary_utils.calculate_anomaly_score(10, 15, 5, True), 0.5)
self.assertAlmostEqual(boundary_utils.calculate_anomaly_score(10+1e-5, 10, 1, True), 0.005884191895350754)
self.assertAlmostEqual(boundary_utils.calculate_anomaly_score(10+1e-7, 10, 1, True), 5.884191859812512e-05)
if __name__ == '__main__':
unittest.main()
|
anomalydetector/tests/test_boundary_utils.py/0
|
{
"file_path": "anomalydetector/tests/test_boundary_utils.py",
"repo_id": "anomalydetector",
"token_count": 3115
}
| 316 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from typing import Iterable, List, Mapping, OrderedDict
from archai.common import utils
class DelimitedText:
def __init__(self) -> None:
self._data: OrderedDict[str, List[str]] = OrderedDict()
def add_from_file(self, filepath: str, has_header: bool, delimiter: str = "\t") -> None:
filepath = utils.full_path(filepath)
header = None if has_header else []
with open(filepath, "r") as f:
line = f.readline()
while line:
cols = line.rstrip("\n").split(sep=delimiter)
if header is None:
header = cols
else:
self.add_from_cols(cols, header)
def add_from_text(self, text: str, has_header: bool, delimiter: str = "\t") -> None:
header = None if has_header else []
for line in text.splitlines():
cols = line.rstrip("\n").split(sep=delimiter)
if header is None:
header = cols
else:
self.add_from_cols(cols, header)
def add_from_cols(self, cols: Iterable, header: List[str]) -> None:
for i, col in enumerate(cols):
key = header[i] if len(header) > i else str(i)
if key not in self._data:
self._data[key] = []
self._data[key].append(str(col))
def get_col(self, col_name: str) -> List[str]:
return self._data[col_name]
def set_col(self, col_name: str, vals: List[str]) -> None:
self._data[col_name] = vals
def set_data(self, d: Mapping[str, List[str]]) -> None:
self._data = OrderedDict(d)
def add_from_cols_list(self, cols_list: Iterable[Iterable], header: List[str]) -> None:
for cols in cols_list:
self.add_from_cols(cols, header)
def save(self, filepath: str, has_header=True, delimiter: str = "\t") -> None:
keys = list(self._data.keys())
with open(filepath, "w") as f:
if has_header:
f.write(delimiter.join(keys) + "\n")
for vals in zip(*(self._data[key] for key in keys)):
f.write(delimiter.join(vals) + "\n")
def __len__(self) -> int:
return len(self._data)
|
archai/archai/common/delimited_text.py/0
|
{
"file_path": "archai/archai/common/delimited_text.py",
"repo_id": "archai",
"token_count": 1100
}
| 317 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from typing import Any, Callable, Dict, List, Optional, Tuple
import cv2
import lmdb
import msgpack
import numpy as np
import torch
from torch.utils.data import Dataset
from archai.common.ordered_dict_logger import OrderedDictLogger
logger = OrderedDictLogger(source=__name__)
class TensorpackLmdbDataset(Dataset):
"""Tensorpack LMDB dataset."""
def __init__(
self,
lmdb_file_path: str,
img_key: str,
mask_key: Optional[str] = None,
serializer: Optional[str] = "msgpack",
img_size: Optional[Tuple[int, ...]] = None,
img_format: Optional[str] = "numpy",
ones_mask: Optional[bool] = False,
zeroes_mask: Optional[bool] = False,
raise_errors: Optional[bool] = True,
is_bgr: Optional[bool] = True,
valid_resolutions: Optional[List[Tuple]] = None,
augmentation_fn: Optional[Callable] = None,
mask_interpolation_method: int = cv2.INTER_NEAREST,
) -> None:
"""Initialize Tensorpack LMDB dataset.
Args:
lmdb_file_path: Path to the LMDB file.
img_key: Image key in LMDB file.
mask_key: Mask key in LMDB file.
serializer: Serializer used to serialize data in LMDB file.
img_size: Image size.
img_format: Image format.
ones_mask: Whether mask is composed of ones.
zeroes_mask: Whether mask is composed of zeroes.
raise_errors: Whether to raise errors.
is_bgr: Whether image is in BGR format.
valid_resolutions: Valid resolutions.
augmentation_fn: Augmentation function.
mask_interpolation_method: Mask interpolation method.
"""
self.lmdb_file_path = lmdb_file_path
self.db = lmdb.open(
lmdb_file_path,
subdir=False,
readonly=True,
lock=False,
readahead=True,
map_size=1099511627776 * 2,
max_readers=100,
)
self.img_key = img_key
self.mask_key = mask_key
self.txn = self.db.begin()
self.keys = [k for k, _ in self.txn.cursor() if k != b"__keys__"]
self.img_size = img_size
self.serializer = serializer
self.img_format = img_format
self.ones_mask = ones_mask
self.zeroes_mask = zeroes_mask
assert not (self.ones_mask and self.zeroes_mask), "`ones_mask` and `zeroes_mask` are mutually exclusive."
if self.mask_key is None:
assert (
self.ones_mask or self.zeroes_mask
), "`ones_mask` or `zeroes_mask` must be True if `mask_key` is None."
self.is_bgr = is_bgr
self.raise_errors = raise_errors
self.valid_resolutions = valid_resolutions
self.augmentation_fn = augmentation_fn
self.mask_interpolation_method = mask_interpolation_method
def __len__(self) -> int:
"""Return length of the dataset."""
return len(self.keys)
def _get_datapoint(self, idx: int) -> Dict[str, Any]:
"""Get a data point from the dataset.
Args:
idx: Index of the data point.
Returns:
Data point.
"""
key = self.keys[idx]
value = self.txn.get(key)
if self.serializer == "msgpack":
sample = msgpack.loads(value)
else:
raise NotImplementedError(f"Unsupported serializer {self.serializer}")
for d_key in [self.img_key, self.mask_key]:
if d_key and d_key not in sample:
available_keys = sample.keys() if isinstance(sample, dict) else []
raise KeyError(f"{d_key} not found in sample. Available keys: {available_keys}")
if d_key and isinstance(sample[d_key], dict) and b"data" in sample[d_key]:
sample[d_key] = sample[d_key][b"data"]
return sample
def __getitem__(self, idx: int) -> Dict[str, Any]:
"""Get a sample from the dataset.
Args:
idx: Index of the sample.
Returns:
Sample.
"""
try:
sample = self._get_datapoint(idx)
if self.img_format == "numpy":
img = np.frombuffer(sample[self.img_key], dtype=np.uint8).reshape((-1, 1))
img = cv2.imdecode(img, cv2.IMREAD_COLOR)
img = img[..., ::-1].copy() if self.is_bgr else img
if self.ones_mask:
mask = np.ones(img.shape[:2], dtype=np.uint8)
elif self.zeroes_mask or len(sample[self.mask_key]) == 0:
mask = np.zeros(img.shape[:2], dtype=np.uint8)
else:
mask_cv2_buf = np.frombuffer(sample[self.mask_key], dtype=np.uint8).reshape((-1, 1))
mask = cv2.imdecode(mask_cv2_buf, cv2.IMREAD_GRAYSCALE)
sample = {"image": img, "mask": mask}
if self.augmentation_fn:
sample = self.augmentation_fn(**sample)
if self.img_size:
sample["image"] = cv2.resize(sample["image"], self.img_size)
sample["mask"] = cv2.resize(
sample["mask"], self.img_size, interpolation=self.mask_interpolation_method
)
if self.valid_resolutions:
assert img.shape[:2] in self.valid_resolutions
assert mask.shape[:2] in self.valid_resolutions
else:
raise NotImplementedError(f"Unsupported image format: {self.img_format}")
return {
"image": torch.tensor(sample["image"].transpose(2, 0, 1) / 255.0, dtype=torch.float),
"mask": torch.tensor(sample["mask"], dtype=torch.long),
"dataset_path": self.lmdb_file_path,
"key": self.keys[idx],
}
except Exception as e:
if self.raise_errors:
raise e
else:
logger.error(f"Sample {idx} from dataset {self.lmdb_file_path} could not be loaded.")
|
archai/archai/datasets/cv/tensorpack_lmdb_dataset_provider_utils.py/0
|
{
"file_path": "archai/archai/datasets/cv/tensorpack_lmdb_dataset_provider_utils.py",
"repo_id": "archai",
"token_count": 3076
}
| 318 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from typing import Optional
from archai.datasets.nlp.tokenizer_utils.bbpe_tokenizer import BbpeTokenizer
class Gpt2Tokenizer(BbpeTokenizer):
"""GPT-2 based tokenizer."""
def __init__(
self,
save_path: str,
vocab_size: Optional[int] = 50257,
pad_vocab_size: Optional[bool] = True,
bos_token: Optional[str] = "<|endoftext|>",
eos_token: Optional[str] = "<|endoftext|>",
unk_token: Optional[str] = "<|unk|>",
pad_token: Optional[str] = None,
min_frequency: Optional[int] = None,
model_max_length: Optional[int] = 1024,
add_prefix_space: Optional[bool] = True,
add_prefix_new_line: Optional[bool] = True,
sorted_vocab: Optional[bool] = False,
) -> None:
"""Define the tokenization pipeline.
Args:
save_path: Path to save the tokenizer.
vocab_size: Maximum size of vocabulary.
pad_vocab_size: Whether vocabulary size should be padded to a multiple of 8.
bos_token: Begin-of-sentence token.
eos_token: End-of-sentence token.
unk_token: Unknown token.
pad_token: Padding token.
min_frequency: Minimum frequency of tokens.
model_max_length: Maximum length of sequence.
add_prefix_space: Whether a prefix space token should be added.
add_prefix_new_line: Whether a prefix new line token should be added.
sorted_vocab: Whether vocabulary should be sorted.
"""
# GPT2Tokenizer
# vocab_size: 50257
# bos = eos = unk = '<|endoftext|>'
# sep_token = None
# max_model_input_sizes: {'gpt2': 1024, 'gpt2-medium': 1024, 'gpt2-large': 1024}
# max_len = max_len_sentence_pair = max_len_single_sentence = 1024
# mask_token = None
# default vocab size for GPT-2 is 50257
super().__init__(
save_path=save_path,
vocab_size=vocab_size,
pad_vocab_size=pad_vocab_size,
bos_token=bos_token,
eos_token=eos_token,
unk_token=unk_token,
pad_token=pad_token,
min_frequency=min_frequency,
model_max_length=model_max_length,
add_prefix_space=add_prefix_space,
add_prefix_new_line=add_prefix_new_line,
sorted_vocab=sorted_vocab,
)
|
archai/archai/datasets/nlp/tokenizer_utils/gpt2_tokenizer.py/0
|
{
"file_path": "archai/archai/datasets/nlp/tokenizer_utils/gpt2_tokenizer.py",
"repo_id": "archai",
"token_count": 1146
}
| 319 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import numpy as np
import yaml
from tqdm import tqdm
from archai.discrete_search.api.archai_model import ArchaiModel
from archai.discrete_search.api.model_evaluator import (
AsyncModelEvaluator,
ModelEvaluator,
)
class SearchConstraint:
def __init__(self, name, evaluator, constraint):
self.name = name
self.evaluator = evaluator
self.constraint = constraint
class SearchObjective:
def __init__(self, name, model_evaluator, higher_is_better, compute_intensive, constraint):
self.name = name
self.evaluator = model_evaluator
self.higher_is_better = higher_is_better
self.compute_intensive = compute_intensive
self.constraint = constraint
class SearchObjectives:
"""Search objectives and constraints."""
def __init__(self, cache_objective_evaluation: Optional[bool] = True) -> None:
"""Create, evaluate and cache search objectives and constraints for search algorithms.
Besides objectives, this class also supports registering search constraints,
which are used to filter out candidate architectures that do not meet certain
criteria (e.g., number of parameters, FLOPs). Constraints are typically evaluated
multiple times by search algorithms and should not be computationally expensive to
evaluate.
Args:
cache_objective_evaluation: If `True`, objective evaluations are cached using the
tuple `(obj_name, archid, budget)` as key.
"""
self._cache_objective_evaluation = cache_objective_evaluation
self._objs = {}
self._extra_constraints = {}
# Cache key: (obj_name, archid, budget)
self._cache: Dict[Tuple[str, str, Optional[float]], Optional[float]] = {}
@property
def objective_names(self) -> List[str]:
"""Return a list of all objective names."""
return list(self._objs.keys())
@property
def cheap_objective_names(self) -> List[str]:
"""Return a list of cheap objective names."""
return list(self.cheap_objectives.keys())
@property
def expensive_objective_names(self) -> List[str]:
"""Return a list of expensive objective names."""
return list(self.expensive_objectives.keys())
@property
def objectives(self) -> Dict[str, SearchObjective]:
"""Return a dictionary of all objectives."""
return self._objs
@property
def cheap_objectives(self) -> Dict[str, SearchObjective]:
"""Return a dictionary of cheap objectives."""
return self._filter_objs(self._objs, lambda x: not x.compute_intensive)
@property
def expensive_objectives(self) -> Dict[str, SearchObjective]:
"""Return a dictionary of expensive objectives."""
return self._filter_objs(self._objs, lambda x: x.compute_intensive)
@property
def constraints(self) -> Dict[str, SearchConstraint]:
"""Return a dictionary of all the constraints."""
return self._extra_constraints
def add_objective(
self,
name: str,
model_evaluator: Union[ModelEvaluator, AsyncModelEvaluator],
higher_is_better: bool,
compute_intensive: Optional[bool] = True,
constraint: Optional[Tuple[float, float]] = None,
) -> None:
"""Add an objective function to the `SearchObjectives` object.
Args:
name: The name of the objective.
model_evaluator: The model evaluator responsible for evaluating the objective.
higher_is_better: Whether the objective should be maximized (`True`) or minimized (`False`).
compute_intensive: If `True`, the objective is considered computationally expensive
and will be estimated using surrogate models when possible.
constraint: Objective constraint used to filter out candidate architectures.
Expects `(lower_bound, upper_bound)` tuple. Can only be set if
`compute_intensive` is set to `False`.
"""
assert isinstance(model_evaluator, (ModelEvaluator, AsyncModelEvaluator))
assert name not in self._objs, f"There is already an objective {name}."
assert name not in self._extra_constraints, f"There is already an constraint named {name}."
obj = SearchObjective(name, model_evaluator, higher_is_better, compute_intensive, constraint)
if compute_intensive:
assert constraint is None, "Constraints can only be set for cheap objectives (compute_intensive=False)."
self._objs[name] = obj
def add_constraint(
self, name: str, model_evaluator: Union[ModelEvaluator, AsyncModelEvaluator], constraint: Tuple[float, float]
) -> None:
"""Add a search constraint to the `SearchObjectives` object.
Constraints are typically evaluated multiple times by search algorithms to validate
candidate architectures and should not be computationally expensive to evaluate.
Args:
name: The name of the constraint.
model_evaluator: The model evaluator responsible for evaluating the constraint.
constraint: The valid range of the constraint. Expects a `(lower_bound, upper_bound)`
tuple.
"""
assert isinstance(model_evaluator, (ModelEvaluator, AsyncModelEvaluator))
assert name not in self._objs, f"There is already an objective {name}."
assert name not in self._extra_constraints, f"There is already an constraint named {name}."
self._extra_constraints[name] = SearchConstraint(name, model_evaluator, constraint)
def _filter_objs(self, objs: Dict[str, Dict], query_fn: Callable) -> Dict[str, Dict]:
return {obj_name: obj_dict for obj_name, obj_dict in objs.items() if query_fn(obj_dict)}
def _eval_objs(
self,
objs: Dict[str, Dict],
models: List[ArchaiModel],
budgets: Optional[Dict[str, List[Any]]] = None,
progress_bar: Optional[bool] = False,
) -> Dict[str, np.ndarray]:
if not objs or not models:
return {}
# Sets `None` budget for objectives not specified in `budgets`
budgets = budgets or {}
budgets = {obj_name: budgets.get(obj_name, [None] * len(models)) for obj_name in objs}
# Splits `objs` in sync and async
sync_objs = self._filter_objs(objs, lambda x: isinstance(x.evaluator, ModelEvaluator))
async_objs = self._filter_objs(objs, lambda x: isinstance(x.evaluator, AsyncModelEvaluator))
# Initializes evaluation results with cached results
eval_results = {
obj_name: [
self._cache.get((obj_name, model.archid, budget))
for model, budget in zip(models, budgets[obj_name])
]
for obj_name in objs
}
# Saves model indices that are not in the cache and need to be evaluated
eval_indices = {
obj_name: [i for i, result in enumerate(obj_results) if result is None]
for obj_name, obj_results in eval_results.items()
}
# Dispatches jobs for all async objectives first
for obj_name, obj_d in async_objs.items():
pbar = (
tqdm(eval_indices[obj_name], desc=f'Dispatching jobs for "{obj_name}"...')
if progress_bar
else eval_indices[obj_name]
)
for i in pbar:
obj_d.evaluator.send(models[i], budgets[obj_name][i])
# Calculates synchronous objectives in order
for obj_name, obj_d in sync_objs.items():
pbar = (
tqdm(eval_indices[obj_name], desc=f'Calculating "{obj_name}"...')
if progress_bar
else eval_indices[obj_name]
)
for i in pbar:
eval_results[obj_name][i] = obj_d.evaluator.evaluate(
models[i], budgets[obj_name][i]
)
# Gets results from async objectives
pbar = (
tqdm(async_objs.items(), desc="Gathering results from async objectives...")
if progress_bar
else async_objs.items()
)
for obj_name, obj_d in pbar:
results = obj_d.evaluator.fetch_all()
assert len(eval_indices[obj_name]) == len(results), "Received a different amount of results than expected."
for result_i, eval_i in enumerate(eval_indices[obj_name]):
eval_results[obj_name][eval_i] = results[result_i]
# Updates cache
if self._cache_objective_evaluation:
for obj_name in objs:
for i in eval_indices[obj_name]:
cache_tuple = (
obj_name,
models[i].archid,
budgets[obj_name][i],
)
self._cache[cache_tuple] = eval_results[obj_name][i]
assert len(set(len(r) for r in eval_results.values())) == 1
return {obj_name: np.array(obj_results, dtype=np.float64) for obj_name, obj_results in eval_results.items()}
def _get_valid_arch_indices(
self, objs_or_constraints: Dict[str, Dict], results: Dict[str, np.ndarray]
) -> np.ndarray:
eval_lens = {len(r) for r in results.values()}
assert len(eval_lens) == 1
if list(eval_lens)[0] == 0:
return np.array([])
valid_mask = np.logical_and.reduce(
[
(obj_r >= objs_or_constraints[obj_name].constraint[0])
& (obj_r <= objs_or_constraints[obj_name].constraint[1])
for obj_name, obj_r in results.items()
]
)
return np.where(valid_mask)[0]
def validate_constraints(
self,
models: List[ArchaiModel],
progress_bar: Optional[bool] = False,
) -> Tuple[Dict[str, np.ndarray], np.ndarray]:
"""Evaluate constraints for a list of models and returns the indices of models that
satisfy all constraints.
Args:
models: List of models to evaluate.
progress_bar: Whether to show progress bar.
Returns:
Evaluation results and indices of models that satisfy all constraints.
"""
# Gets all constraints from cheap_objectives that have constraints and extra_constraints
constraints = dict(
self._extra_constraints, **self._filter_objs(self.cheap_objectives, lambda x: x.constraint is not None)
)
if not constraints:
return {}, np.arange(len(models))
eval_results = self._eval_objs(constraints, models, budgets=None, progress_bar=progress_bar)
return eval_results, self._get_valid_arch_indices(constraints, eval_results)
def is_model_valid(self, model: ArchaiModel) -> bool:
"""Check if a model satisfies all constraints.
Args:
model: Model to check.
Returns:
`True` if model is valid, `False` otherwise.
"""
_, idx = self.validate_constraints([model], progress_bar=False)
return len(idx) > 0
def eval_cheap_objs(
self,
models: List[ArchaiModel],
budgets: Optional[Dict[str, List]] = None,
progress_bar: Optional[bool] = False,
) -> Dict[str, np.ndarray]:
"""Evaluate all cheap objectives for a list of models.
Args:
models: List of models to evaluate.
budgets: Budgets for each objective.
progress_bar: Whether to show progress bar.
Returns:
Dictionary with evaluation results.
"""
return self._eval_objs(self.cheap_objectives, models, budgets, progress_bar)
def eval_expensive_objs(
self,
models: List[ArchaiModel],
budgets: Optional[Dict[str, List]] = None,
progress_bar: Optional[bool] = False,
) -> Dict[str, np.ndarray]:
"""Evaluate all expensive objective functions for a list of models.
Args:
models: List of models to evaluate.
budgets: Budgets for each objective.
progress_bar: Whether to show progress bar. Defaults to False.
Returns:
Dictionary with evaluation results.
"""
return self._eval_objs(self.expensive_objectives, models, budgets, progress_bar)
def eval_all_objs(
self,
models: List[ArchaiModel],
budgets: Optional[Dict[str, List]] = None,
progress_bar: Optional[bool] = False,
) -> Dict[str, np.ndarray]:
"""Evaluate all objective functions for a list of models.
Args:
models: List of models to evaluate.
budgets: Budgets for each objective.
progress_bar: Whether to show progress bar.
Returns:
Dictionary with evaluation results.
"""
return self._eval_objs(self._objs, models, budgets, progress_bar)
def save_cache(self, file_path: str) -> None:
"""Save the state of the `SearchObjectives` object to a YAML file.
Args:
file_path: Path to save YAML file.
"""
with open(file_path, "w", encoding="utf-8") as f:
yaml.dump(self._cache, f)
def load_cache(self, file_path: str) -> None:
"""Load the state of the `SearchObjectives` object from a YAML file.
Args:
file_path: Path to YAML file.
"""
with open(file_path, "r", encoding="utf-8") as f:
self._cache = yaml.load(f)
def lookup_cache(self, obj_name: str, arch_id: str, budget: Optional[int]) -> Optional[float]:
"""Look up the cache for a specific objective, architecture and budget.
Args:
obj_name: Name of objective.
arch_id: Architecture ID.
budget: Budget.
Returns:
Evaluation result if found in cache, `None` otherwise.
"""
return self._cache.get((obj_name, arch_id, budget), None)
|
archai/archai/discrete_search/api/search_objectives.py/0
|
{
"file_path": "archai/archai/discrete_search/api/search_objectives.py",
"repo_id": "archai",
"token_count": 5982
}
| 320 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import statistics
from typing import Any, Dict, List, Optional, Union
import torch
from archai.discrete_search.evaluators.pt_profiler_utils.pt_profiler_model import (
ProfilerModel,
)
def profile(
model: torch.nn.Module,
forward_args: Optional[List[Any]] = None,
forward_kwargs: Optional[Dict[str, Any]] = None,
num_warmups: Optional[int] = 1,
num_samples: Optional[int] = 1,
use_cuda: Optional[bool] = False,
use_median: Optional[bool] = False,
ignore_layers: Optional[List[str]] = None,
) -> Dict[str, Union[float, int]]:
"""Profile a PyTorch model.
Outputs FLOPs, MACs, number of parameters, latency and peak memory.
Args:
model: PyTorch model.
forward_args: `model.forward()` arguments used for profilling.
forward_kwargs: `model.forward()` keyword arguments used for profilling.
num_warmups: Number of warmup runs before profilling.
num_samples: Number of runs after warmup.
use_cuda: Whether to use CUDA instead of CPU.
use_median: Whether to use median instead of mean to average memory and latency.
ignore_layers: List of layer names that should be ignored during profiling.
Returns:
FLOPs, MACs, number of parameters, latency (seconds) and peak memory (bytes).
"""
assert isinstance(model, torch.nn.Module), "`model` must be a PyTorch model."
forward_args = forward_args if forward_args is not None else []
forward_args = [forward_args] if isinstance(forward_args, torch.Tensor) else forward_args
forward_kwargs = forward_kwargs or {}
if use_cuda:
# Ensures that model and all inputs are put on CUDA before profiling
model.to("cuda")
forward_args = tuple([arg.to("cuda") for arg in forward_args])
forward_kwargs = {key: value.to("cuda") for key, value in forward_kwargs.items()}
profiler = ProfilerModel(model)
model.eval()
for _ in range(num_warmups):
with torch.no_grad():
_ = model(*forward_args, **forward_kwargs)
result = {"flops": None, "macs": None, "n_parameters": None, "latency": [], "peak_memory": []}
for _ in range(num_samples):
profiler.start(ignore_layers=ignore_layers)
with torch.no_grad():
_ = model(*forward_args, **forward_kwargs)
result.update(
{"flops": profiler.get_flops(), "macs": profiler.get_macs(), "n_parameters": profiler.get_params()}
)
result["latency"].append(profiler.get_latency())
result["peak_memory"].append(profiler.get_peak_memory())
profiler.end()
if use_cuda:
# Ensures that model and all inputs are put on CPU after profiling to avoid
# overloading GPU memory
model.to("cpu")
forward_args = tuple([arg.to("cpu") for arg in forward_args])
forward_kwargs = {key: value.to("cpu") for key, value in forward_kwargs.items()}
stat = statistics.median if use_median else statistics.mean
result["latency"] = stat(result["latency"])
result["peak_memory"] = stat(result["peak_memory"])
return result
|
archai/archai/discrete_search/evaluators/pt_profiler_utils/pt_profiler_eval.py/0
|
{
"file_path": "archai/archai/discrete_search/evaluators/pt_profiler_utils/pt_profiler_eval.py",
"repo_id": "archai",
"token_count": 1209
}
| 321 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from collections import OrderedDict
from typing import Any, Callable, Dict, Union
from archai.discrete_search.search_spaces.config.discrete_choice import DiscreteChoice
def flatten_dict(odict: Dict[str, Any]) -> dict:
"""Flatten a nested dictionary into a single level dictionary.
Args:
odict: Nested dictionary.
Returns:
Flattened dictionary.
"""
fdict = dict()
def _flatten(prefix: str, d: Dict[str, Any]) -> Dict[str, Any]:
prefix = prefix + "." if prefix else prefix
if isinstance(d, dict):
for k, v in d.items():
flat_v = _flatten(prefix + k, v)
if flat_v is not None:
fdict[prefix + k] = flat_v
else:
return d
_flatten("", odict)
return fdict
def order_dict_keys(base_dict: OrderedDict, target_dict: Dict[str, Any]) -> OrderedDict:
"""Order the keys of a target dictionary based on a base dictionary.
Args:
base_dict (OrderedDict[str, Any]): Dictionary with the desired key order.
target_dict (Dict[str, Any]): Dictionary to be ordered.
Returns:
OrderedDict[str, Any]: Ordered version of `target_dict` dictionary.
"""
ordered_dict = OrderedDict()
for k in base_dict:
if k in target_dict:
ordered_dict[k] = target_dict[k]
return ordered_dict
def replace_ptree_choices(
config_tree: Union[Dict, DiscreteChoice], repl_fn: Callable[[DiscreteChoice], Any]
) -> OrderedDict:
"""Replace all DiscreteChoice nodes in a tree with the output of a function.
Args:
config_tree: Tree with DiscreteChoice nodes.
repl_fn: Function to replace DiscreteChoice nodes.
Returns:
Replaced tree.
"""
def _replace_tree_nodes(node, repl_fn, ref_map):
if isinstance(node, dict):
output_tree = OrderedDict()
for param_name, param in node.items():
output_tree[param_name] = _replace_tree_nodes(param, repl_fn, ref_map)
elif isinstance(node, DiscreteChoice):
if id(node) not in ref_map:
ref_map[id(node)] = repl_fn(node)
return ref_map[id(node)]
else:
return node
return output_tree
return _replace_tree_nodes(config_tree, repl_fn, {})
def replace_ptree_pair_choices(
query_tree: Union[Dict, DiscreteChoice], aux_tree: Union[Dict, Any], repl_fn: Callable[[DiscreteChoice, Any], Any]
) -> OrderedDict:
"""Replace all DiscreteChoice nodes in a tree with the output of a function and an auxilary tree.
Args:
query_tree: Tree with DiscreteChoice nodes.
aux_tree: Auxiliary tree with DiscreteChoice nodes.
repl_fn: Function that takes a `query_node` and an `aux_node` and returns a replacement for `query_node`.
Returns:
Replaced tree.
"""
def _replace_tree_nodes(query_node, aux_node, repl_fn, ref_map):
if isinstance(query_node, dict):
output_tree = OrderedDict()
for param_name, param in query_node.items():
assert param_name in aux_node, "`aux_tree` must be identical to `query_tree` apart from terminal nodes"
output_tree[param_name] = _replace_tree_nodes(param, aux_node[param_name], repl_fn, ref_map)
elif isinstance(query_node, DiscreteChoice):
if id(query_node) not in ref_map:
ref_map[id(query_node)] = repl_fn(query_node, aux_node)
return ref_map[id(query_node)]
else:
return query_node
return output_tree
return _replace_tree_nodes(query_tree, aux_tree, repl_fn, {})
|
archai/archai/discrete_search/search_spaces/config/utils.py/0
|
{
"file_path": "archai/archai/discrete_search/search_spaces/config/utils.py",
"repo_id": "archai",
"token_count": 1576
}
| 322 |
import torch
from torch import nn
from typing import Optional
from transformers.models.gpt2.configuration_gpt2 import GPT2Config
from archai.discrete_search.search_spaces.config import ArchConfig
try:
from flash_attn.ops.fused_dense import FusedDense
except ImportError:
FusedDense = None
from .utils import get_optim_flag
from .ops import OPS
class MixedAttentionBlock(nn.Module):
def __init__(self, arch_config: ArchConfig, hf_config: GPT2Config,
hidden_size: int, layer_idx: Optional[int] = None) -> None:
super().__init__()
self.total_heads = arch_config.pick('total_heads')
self.op_allocation = {
op_name: round(self.total_heads * op_prop)
for op_name, op_prop in arch_config.pick('op_allocation')
}
self.hf_config = hf_config
self.hidden_size = hidden_size
self.layer_idx = layer_idx
self.head_size = hidden_size // self.total_heads
assert hidden_size % self.total_heads == 0
assert sum(list(self.op_allocation.values())) == self.total_heads, \
'Invalid allocation'
op_kwargs = {
'hidden_size': self.hidden_size,
'total_heads': self.total_heads,
'hf_config': self.hf_config,
'layer_idx': self.layer_idx
}
self.ops = nn.ModuleList([
OPS[op_name].cls(
arch_config=arch_config.pick(op_name) if OPS[op_name].requires_extra_config else None,
op_heads=self.op_allocation[op_name],
**op_kwargs
) for op_name, op_heads in self.op_allocation.items()
if op_heads > 0
])
self.resid_dropout = nn.Dropout(self.hf_config.resid_pdrop)
self.fused_dense = get_optim_flag(self.hf_config, 'fused_dense')
if self.fused_dense:
assert FusedDense is not None, 'Need to install fused_mlp'
self.out_proj = FusedDense(self.hidden_size, self.hidden_size)
else:
self.out_proj = nn.Linear(self.hidden_size, self.hidden_size)
def forward(self, hidden_states, **kwargs):
# Concatenates outputs from each op in the embedding dim
output = [op(hidden_states, **kwargs)[0] for op in self.ops]
output = torch.cat(output, dim=-1)
return self.out_proj(output), None
|
archai/archai/discrete_search/search_spaces/nlp/tfpp/mixed_op.py/0
|
{
"file_path": "archai/archai/discrete_search/search_spaces/nlp/tfpp/mixed_op.py",
"repo_id": "archai",
"token_count": 1117
}
| 323 |
# Copied from https://github.com/HazyResearch/state-spaces/blob/06dbbdfd0876501a7f12bf3262121badbc7658af/src/models/hippo/hippo.py
""" Definitions of A and B matrices for various HiPPO operators. """
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from scipy import special as ss
from einops import rearrange, repeat
from opt_einsum import contract
def embed_c2r(A):
A = rearrange(A, '... m n -> ... m () n ()')
A = np.pad(A, ((0, 0), (0, 1), (0, 0), (0, 1))) + \
np.pad(A, ((0, 0), (1, 0), (0, 0), (1,0)))
return rearrange(A, 'm x n y -> (m x) (n y)')
# TODO take in 'torch' option to return torch instead of numpy, and converts the shape of B from (N, 1) to (N)
def transition(measure, N, **measure_args):
""" A, B transition matrices for different measures
measure: the type of measure
legt - Legendre (translated)
legs - Legendre (scaled)
glagt - generalized Laguerre (translated)
lagt, tlagt - previous versions of (tilted) Laguerre with slightly different normalization
"""
# Laguerre (translated)
if measure == 'lagt':
b = measure_args.get('beta', 1.0)
A = np.eye(N) / 2 - np.tril(np.ones((N, N)))
B = b * np.ones((N, 1))
# Generalized Laguerre
# alpha 0, beta small is most stable (limits to the 'lagt' measure)
# alpha 0, beta 1 has transition matrix A = [lower triangular 1]
elif measure == 'glagt':
alpha = measure_args.get('alpha', 0.0)
beta = measure_args.get('beta', 0.01)
A = -np.eye(N) * (1 + beta) / 2 - np.tril(np.ones((N, N)), -1)
B = ss.binom(alpha + np.arange(N), np.arange(N))[:, None]
L = np.exp(.5 * (ss.gammaln(np.arange(N)+alpha+1) - ss.gammaln(np.arange(N)+1)))
A = (1./L[:, None]) * A * L[None, :]
B = (1./L[:, None]) * B * np.exp(-.5 * ss.gammaln(1-alpha)) * beta**((1-alpha)/2)
# Legendre (translated)
elif measure == 'legt':
Q = np.arange(N, dtype=np.float64)
R = (2*Q + 1) ** .5
j, i = np.meshgrid(Q, Q)
A = R[:, None] * np.where(i < j, (-1.)**(i-j), 1) * R[None, :]
B = R[:, None]
A = -A
# Halve again for timescale correctness
A *= 0.5
B *= 0.5
# LMU: equivalent to LegT up to normalization
elif measure == 'lmu':
Q = np.arange(N, dtype=np.float64)
R = (2*Q + 1)[:, None] # / theta
j, i = np.meshgrid(Q, Q)
A = np.where(i < j, -1, (-1.)**(i-j+1)) * R
B = (-1.)**Q[:, None] * R
# Legendre (scaled)
elif measure == 'legs':
q = np.arange(N, dtype=np.float64)
col, row = np.meshgrid(q, q)
r = 2 * q + 1
M = -(np.where(row >= col, r, 0) - np.diag(q))
T = np.sqrt(np.diag(2 * q + 1))
A = T @ M @ np.linalg.inv(T)
B = np.diag(T)[:, None]
B = B.copy() # Otherwise "UserWarning: given NumPY array is not writeable..." after torch.as_tensor(B)
elif measure == 'legsd':
q = np.arange(N, dtype=np.float64)
col, row = np.meshgrid(q, q)
r = 2 * q + 1
M = -(np.where(row >= col, r, 0) - np.diag(q))
T = np.sqrt(np.diag(2 * q + 1))
A = T @ M @ np.linalg.inv(T)
B = np.diag(T)[:, None]
B = B.copy() # Otherwise "UserWarning: given NumPY array is not writeable..." after torch.as_tensor(B)
A += .5 * B*B[None, :, 0]
B = B / 2.0
elif measure in ['fourier_diag', 'foud']:
freqs = np.arange(N//2)
d = np.stack([freqs, np.zeros(N//2)], axis=-1).reshape(-1)[:-1]
A = 2*np.pi*(-np.diag(d, 1) + np.diag(d, -1))
A = A - .5 * np.eye(N)
B = np.zeros(N)
B[0::2] = 2**.5
B[0] = 1
B = B[:, None]
elif measure in ['fourier', 'fout']:
freqs = np.arange(N//2)
d = np.stack([np.zeros(N//2), freqs], axis=-1).reshape(-1)[1:]
A = np.pi*(-np.diag(d, 1) + np.diag(d, -1))
B = np.zeros(N)
B[0::2] = 2**.5
B[0] = 1
# Subtract off rank correction - this corresponds to the other endpoint u(t-1) in this case
A = A - B[:, None] * B[None, :]
B = B[:, None]
elif measure == 'fourier_decay':
freqs = np.arange(N//2)
d = np.stack([np.zeros(N//2), freqs], axis=-1).reshape(-1)[1:]
A = np.pi*(-np.diag(d, 1) + np.diag(d, -1))
B = np.zeros(N)
B[0::2] = 2**.5
B[0] = 1
# Subtract off rank correction - this corresponds to the other endpoint u(t-1) in this case
A = A - .5 * B[:, None] * B[None, :]
B = .5 * B[:, None]
elif measure == 'fourier2': # Double everything: orthonormal on [0, 1]
freqs = 2*np.arange(N//2)
d = np.stack([np.zeros(N//2), freqs], axis=-1).reshape(-1)[1:]
A = np.pi*(-np.diag(d, 1) + np.diag(d, -1))
B = np.zeros(N)
B[0::2] = 2**.5
B[0] = 1
# Subtract off rank correction - this corresponds to the other endpoint u(t-1) in this case
A = A - B[:, None] * B[None, :] * 2
B = B[:, None] * 2
elif measure == 'random':
A = np.random.randn(N, N) / N
B = np.random.randn(N, 1)
elif measure == 'diagonal':
A = -np.diag(np.exp(np.random.randn(N)))
B = np.random.randn(N, 1)
else:
raise NotImplementedError
return A, B
def rank_correction(measure, N, rank=1, dtype=torch.float):
""" Return low-rank matrix L such that A + L is normal """
if measure == 'legs':
assert rank >= 1
P = torch.sqrt(.5+torch.arange(N, dtype=dtype)).unsqueeze(0) # (1 N)
elif measure == 'legt':
assert rank >= 2
P = torch.sqrt(1+2*torch.arange(N, dtype=dtype)) # (N)
P0 = P.clone()
P0[0::2] = 0.
P1 = P.clone()
P1[1::2] = 0.
P = torch.stack([P0, P1], dim=0) # (2 N)
P *= 2**(-0.5) # Halve the rank correct just like the original matrix was halved
elif measure == 'lagt':
assert rank >= 1
P = .5**.5 * torch.ones(1, N, dtype=dtype)
elif measure in ['fourier', 'fout']:
P = torch.zeros(N)
P[0::2] = 2**.5
P[0] = 1
P = P.unsqueeze(0)
elif measure == 'fourier_decay':
P = torch.zeros(N)
P[0::2] = 2**.5
P[0] = 1
P = P.unsqueeze(0)
P = P / 2**.5
elif measure == 'fourier2':
P = torch.zeros(N)
P[0::2] = 2**.5
P[0] = 1
P = 2**.5 * P.unsqueeze(0)
elif measure in ['fourier_diag', 'foud', 'legsd']:
P = torch.zeros(1, N, dtype=dtype)
else: raise NotImplementedError
d = P.size(0)
if rank > d:
P = torch.cat([P, torch.zeros(rank-d, N, dtype=dtype)], dim=0) # (rank N)
return P
def initial_C(measure, N, dtype=torch.float):
""" Return C that captures the other endpoint in the HiPPO approximation """
if measure == 'legt':
C = (torch.arange(N, dtype=dtype)*2+1)**.5 * (-1)**torch.arange(N)
elif measure == 'fourier':
C = torch.zeros(N)
C[0::2] = 2**.5
C[0] = 1
else:
C = torch.zeros(N, dtype=dtype) # (N)
return C
def nplr(measure, N, rank=1, dtype=torch.float, diagonalize_precision=True):
""" Return w, p, q, V, B such that
(w - p q^*, B) is unitarily equivalent to the original HiPPO A, B by the matrix V
i.e. A = V[w - p q^*]V^*, B = V B
"""
assert dtype == torch.float or dtype == torch.double
cdtype = torch.cfloat if dtype == torch.float else torch.cdouble
A, B = transition(measure, N)
A = torch.as_tensor(A, dtype=dtype) # (N, N)
B = torch.as_tensor(B, dtype=dtype)[:, 0] # (N,)
P = rank_correction(measure, N, rank=rank, dtype=dtype) # (r N)
AP = A + torch.sum(P.unsqueeze(-2)*P.unsqueeze(-1), dim=-3)
# We require AP to be nearly skew-symmetric
_A = AP + AP.transpose(-1, -2)
err = torch.sum((_A - _A[0,0]*torch.eye(N))**2) / N
if err > 1e-5: # if not torch.allclose(_A - _A[0,0]*torch.eye(N), torch.zeros(N, N), atol=1e-5):
print("WARNING: HiPPO matrix not skew symmetric", err)
# Take advantage of identity + skew-symmetric form to calculate real and imaginary parts separately
# Imaginary part can use eigh instead of eig
w_re = torch.mean(torch.diagonal(AP), -1, keepdim=True)
# Diagonalize in double precision
if diagonalize_precision: AP = AP.to(torch.double)
# w, V = torch.linalg.eig(AP) # (..., N) (..., N, N)
w_im, V = torch.linalg.eigh(AP*-1j) # (..., N) (..., N, N)
if diagonalize_precision: w_im, V = w_im.to(cdtype), V.to(cdtype)
w = w_re + 1j * w_im
# Check: V w V^{-1} = A
# print("check", V @ torch.diag_embed(w) @ V.conj().transpose(-1, -2))
# Only keep half of each conjugate pair
_, idx = torch.sort(w.imag)
w_sorted = w[idx]
V_sorted = V[:, idx]
# There is an edge case when eigenvalues can be 0, which requires some machinery to handle
# We use a huge hack here: Assume only one pair is 0, and that it is the first row/column of A (only happens in Fourier case)
V = V_sorted[:, :N//2]
w = w_sorted[:N//2]
assert w[-2].abs() > 1e-4, "Only 1 zero eigenvalue allowed in diagonal part of A"
if w[-1].abs() < 1e-4:
V[:, -1] = 0.
V[0, -1] = 2**-0.5
V[1, -1] = 2**-0.5 * 1j
_AP = V @ torch.diag_embed(w) @ V.conj().transpose(-1, -2)
err = torch.sum((2*_AP.real-AP)**2)/N
if err > 1e-5:
print("Warning: Diagonalization of A matrix not numerically precise - error", err)
# print("check", V @ torch.diag_embed(w) @ V.conj().transpose(-1, -2))
V_inv = V.conj().transpose(-1, -2)
# C = initial_C(measure, N, dtype=dtype)
B = contract('ij, j -> i', V_inv, B.to(V)) # V^* B
# C = contract('ij, j -> i', V_inv, C.to(V)) # V^* C
P = contract('ij, ...j -> ...i', V_inv, P.to(V)) # V^* P
# return w, P, B, C, V
return w, P, B, V
|
archai/archai/discrete_search/search_spaces/nlp/tfpp/ops/ssm_utils/hippo.py/0
|
{
"file_path": "archai/archai/discrete_search/search_spaces/nlp/tfpp/ops/ssm_utils/hippo.py",
"repo_id": "archai",
"token_count": 4948
}
| 324 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
#
# Copyright (c) 2018, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0.
from typing import Optional, Tuple
import torch
import torch.nn as nn
import torch.nn.functional as F
class AdaptiveEmbedding(nn.Module):
def __init__(
self,
vocab_size: int,
d_embed: int,
d_model: int,
cutoffs: Tuple[int],
div_val: Optional[int] = 1,
sample_softmax: Optional[bool] = False,
fp16: Optional[bool] = False,
) -> None:
super().__init__()
self.vocab_size = vocab_size
self.d_embed = d_embed
self.d_model = d_model
self.div_val = div_val
self.cutoffs = cutoffs + [vocab_size]
self.cutoffs_ends = [0] + self.cutoffs
self.n_clusters = len(self.cutoffs) - 1
self.emb_scale = d_model**0.5
self.emb_layers = nn.ModuleList()
self.emb_projs = nn.ParameterList()
if div_val == 1:
self.emb_layers.append(nn.Embedding(vocab_size, d_embed, sparse=sample_softmax > 0))
if d_model != d_embed:
self.emb_projs.append(nn.Parameter(torch.zeros(d_model, d_embed)))
else:
for i in range(len(self.cutoffs)):
d_embed_i = d_embed // (div_val**i)
d_out_i = self.cutoffs_ends[i + 1] - self.cutoffs_ends[i]
self.emb_layers.append(nn.Embedding(d_out_i, d_embed_i))
self.emb_projs.append(nn.Parameter(torch.zeros(d_model, d_embed_i)))
if fp16:
self.dtype = torch.float16
else:
self.dtype = torch.float32
def forward(self, inputs: torch.FloatTensor) -> torch.FloatTensor:
if self.div_val == 1:
embed = self.emb_layers[0](inputs)
if self.d_model != self.d_embed:
embed = F.linear(embed, self.emb_projs[0])
else:
inputs_flatten = inputs.view(-1)
embed_flatten = torch.zeros(
[inputs_flatten.size(0), self.d_model],
dtype=self.dtype,
device=inputs_flatten.device,
)
# Every cutoff should be considered for calculating final embeddings
for i in range(len(self.cutoffs)):
cutoff_start, cutoff_end = (
self.cutoffs_ends[i],
self.cutoffs_ends[i + 1],
)
# Gathers a mask of valid indexes
mask_i = (inputs_flatten >= cutoff_start) & (inputs_flatten < cutoff_end)
indexes_i = mask_i.nonzero().squeeze()
if indexes_i.numel() == 0:
continue
inputs_i = inputs_flatten.index_select(0, indexes_i) - cutoff_start
embed_i = self.emb_layers[i](inputs_i)
embed_i = F.linear(embed_i, self.emb_projs[i]).to(self.dtype)
embed_flatten.index_copy_(0, indexes_i, embed_i)
embed_shape = inputs.size() + (self.d_model,)
embed = embed_flatten.view(embed_shape)
embed.mul_(self.emb_scale)
return embed
|
archai/archai/discrete_search/search_spaces/nlp/transformer_flex/models/mem_transformer_utils/adaptive_embedding.py/0
|
{
"file_path": "archai/archai/discrete_search/search_spaces/nlp/transformer_flex/models/mem_transformer_utils/adaptive_embedding.py",
"repo_id": "archai",
"token_count": 1644
}
| 325 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from itertools import chain
from typing import Optional
import numpy as np
import torch
from archai.common.ordered_dict_logger import OrderedDictLogger
from archai.onnx.config_utils.codegen_onnx_config import CodeGenOnnxConfig
from archai.onnx.config_utils.gpt2_onnx_config import GPT2FlexOnnxConfig, GPT2OnnxConfig
from archai.onnx.config_utils.onnx_config_base import OnnxConfig
from archai.onnx.export_utils import prepare_model_for_onnx, weight_sharing
from archai.onnx.onnx_loader import load_from_onnx
logger = OrderedDictLogger(source=__name__)
AVAILABLE_ONNX_CONFIGS = {"codegen": CodeGenOnnxConfig, "gpt2": GPT2OnnxConfig, "gpt2-flex": GPT2FlexOnnxConfig}
def validate_onnx_outputs(
onnx_config: OnnxConfig,
reference_model: torch.nn.Module,
onnx_model_path: str,
atol: float,
) -> None:
"""Validate the outputs of an ONNX model against a reference PyTorch model.
Args:
onnx_config: Configuration for ONNX model.
reference_model: PyTorch model to use as reference.
onnx_model_path: Path to the ONNX model.
atol: Tolerance value for comparing the model outputs.
Raises:
ValueError: If the shapes or values of the ONNX model outputs do not match
the reference model outputs within the specified tolerance.
"""
logger.info("Validating model ...")
session = load_from_onnx(onnx_model_path)
ref_inputs = onnx_config.generate_dummy_inputs()
ref_outputs = reference_model(**ref_inputs)
# Flattens the reference outputs
ref_outputs_dict = {}
for name, value in ref_outputs.items():
if name == "past_key_values":
name = "present"
elif name == "logits":
name = "probs"
if isinstance(value, (list, tuple)):
for i, v in enumerate(value):
name_with_idx = f"{name}_{i}"
ref_outputs_dict[name_with_idx] = v
else:
ref_outputs_dict[name] = value
# Transforms the inputs into an ONNX compatible format
onnx_inputs = {}
for name, value in ref_inputs.items():
if name == "past_key_values":
name = "past"
if isinstance(value, (list, tuple)):
for i, v in enumerate(value):
name_with_idx = f"{name}_{i}"
onnx_inputs[name_with_idx] = v.numpy()
else:
onnx_inputs[name] = value.numpy()
# Performs the ONNX inference session
onnx_named_outputs = [output for output in onnx_config.get_outputs().keys()]
onnx_outputs = session.run(onnx_named_outputs, onnx_inputs)
# Checks whether subset of ONNX outputs is valid
ref_outputs_set, onnx_outputs_set = set(ref_outputs_dict.keys()), set(onnx_config.get_outputs())
if not onnx_outputs_set.issubset(ref_outputs_set):
error = f"Unmatched outputs: {onnx_outputs_set} (ONNX) and {ref_outputs_set} (reference)"
logger.error(error)
raise ValueError(error)
else:
logger.debug(f"Matched outputs: {onnx_outputs_set}")
# Checks whether shapes and values are within expected tolerance
for name, ort_value in zip(onnx_config.get_outputs(), onnx_outputs):
logger.debug(f"Validating output: {name}")
ref_value = ref_outputs_dict[name].detach().numpy()
if not ort_value.shape == ref_value.shape:
error = f"Unmatched shape: {ort_value.shape} (ONNX) and {ref_value.shape} (reference)"
logger.error(error)
raise ValueError(error)
else:
logger.debug(f"Matched shape: {ort_value.shape} (ONNX) and {ref_value.shape} (reference)")
diff = np.amax(np.abs(ref_value - ort_value))
if not np.allclose(ref_value, ort_value, atol=atol):
error = f"Unmatched difference: {diff:.4e} > {atol}"
logger.error(error)
raise ValueError(error)
else:
logger.debug(f"Matched difference: {diff:.4e} < {atol}")
def export_to_onnx(
model: torch.nn.Module,
output_model_path: str,
task: Optional[str] = "causal-lm",
use_past: Optional[bool] = True,
validate: Optional[bool] = True,
share_weights: Optional[bool] = True,
opset: Optional[int] = 11,
atol: Optional[float] = 1e-4,
) -> OnnxConfig:
"""Export a pre-trained PyTorch model to ONNX format.
Args:
model: Instance of the PyTorch model to be exported.
output_model_path: Path to save the exported ONNX model.
task: Task identifier to use proper inputs/outputs.
use_past: Whether to include past key/values in the model.
validate: Whether to validate the exported model.
share_weights: Whether to share the embedding and softmax weights.
opset: Set of operations to use with ONNX.
atol: Tolerance between input and exported model.
Returns:
ONNX configuration of the model that was exported.
"""
logger.info(f"Exporting model: {output_model_path}")
model_type = model.config.model_type
available_configs = list(AVAILABLE_ONNX_CONFIGS.keys())
assert model_type in available_configs, f"`model_type`: {model_type} is not supported for ONNX export."
onnx_config = AVAILABLE_ONNX_CONFIGS[model_type](
model.config,
task=task,
use_past=use_past,
)
model = prepare_model_for_onnx(model, model_type)
dynamic_axes = {
name: axes for name, axes in chain(onnx_config.get_inputs().items(), onnx_config.get_outputs().items())
}
torch.onnx.export(
model,
(onnx_config.generate_dummy_inputs(),),
f=output_model_path,
export_params=True,
input_names=list(onnx_config.get_inputs().keys()),
output_names=list(onnx_config.get_outputs().keys()),
dynamic_axes=dynamic_axes,
opset_version=opset,
do_constant_folding=True,
)
if validate:
validate_onnx_outputs(onnx_config, model, output_model_path, atol)
if share_weights:
weight_sharing(output_model_path, model_type)
return onnx_config
|
archai/archai/onnx/export.py/0
|
{
"file_path": "archai/archai/onnx/export.py",
"repo_id": "archai",
"token_count": 2647
}
| 326 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import functools
from typing import Any
def rgetattr(obj: Any, attr: str, *args) -> Any:
"""Recursively get an attribute from an object.
This function allows accessing nested attributes by separating each level with a dot (e.g., "attr1.attr2.attr3").
If any attribute along the chain does not exist, the function returns the default value
specified in the `*args` parameter.
Args:
obj: Object from which the attribute will be retrieved.
attr: Name of the attribute to be retrieved, with each level separated by a dot.
Returns:
Attribute from the object.
Example:
>>> obj = MyObject()
>>> rgetattr(obj, "attr1.attr2.attr3")
Reference:
https://stackoverflow.com/questions/31174295/getattr-and-setattr-on-nested-subobjects-chained-properties/31174427#31174427
"""
def _getattr(obj: Any, attr: Any) -> Any:
return getattr(obj, attr, *args)
return functools.reduce(_getattr, [obj] + attr.split("."))
def rsetattr(obj: Any, attr: str, value: Any) -> None:
"""Recursively set an attribute on an object.
This function allows setting nested attributes by separating each level with a dot (e.g., "attr1.attr2.attr3").
Args:
obj: Object on which the attribute will be set.
attr: Name of the attribute to be set, with each level separated by a dot.
value: New value for the attribute.
Example:
>>> obj = MyObject()
>>> rsetattr(obj, "attr1.attr2.attr3", new_value)
Reference:
https://stackoverflow.com/questions/31174295/getattr-and-setattr-on-nested-subobjects-chained-properties/31174427#31174427
"""
pre_attr, _, post_attr = attr.rpartition(".")
return setattr(rgetattr(obj, pre_attr) if pre_attr else obj, post_attr, value)
|
archai/archai/quantization/quantization_utils.py/0
|
{
"file_path": "archai/archai/quantization/quantization_utils.py",
"repo_id": "archai",
"token_count": 684
}
| 327 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from collections import defaultdict
from typing import Dict, List
import numpy as np
import archai.supergraph.algos.divnas.analyse_activations as aa
from archai.supergraph.nas.cell import Cell
from archai.supergraph.nas.operations import Op, Zero
class Divnas_Cell():
''' Wrapper cell class for divnas specific modifications '''
def __init__(self, cell:Cell):
self._cell = cell
self._collect_activations = False
self._edgeoptype = None
self._sigma = None
self._counter = 0
self.node_covs:Dict[int, np.array] = {}
self.node_num_to_node_op_to_cov_ind:Dict[int, Dict[Op, int]] = {}
def collect_activations(self, edgeoptype, sigma:float)->None:
self._collect_activations = True
self._edgeoptype = edgeoptype
self._sigma = sigma
# collect bookkeeping info
for i, node in enumerate(self._cell.dag):
node_op_to_cov_ind:Dict[Op, int] = {}
counter = 0
for edge in node:
for op, alpha in edge._op.ops():
if isinstance(op, Zero):
continue
node_op_to_cov_ind[op] = counter
counter += 1
self.node_num_to_node_op_to_cov_ind[i] = node_op_to_cov_ind
# go through all edges in the DAG and if they are of edgeoptype
# type then set them to collect activations
for i, node in enumerate(self._cell.dag):
# initialize the covariance matrix for this node
num_ops = 0
for edge in node:
if hasattr(edge._op, 'PRIMITIVES') and type(edge._op) == self._edgeoptype:
num_ops += edge._op.num_primitive_ops - 1
edge._op.collect_activations = True
self.node_covs[id(node)] = np.zeros((num_ops, num_ops))
def update_covs(self):
assert self._collect_activations
for _, node in enumerate(self._cell.dag):
# TODO: convert to explicit ordering
all_activs = []
for j, edge in enumerate(node):
if type(edge._op) == self._edgeoptype:
activs = edge._op.activations
all_activs.append(activs)
# update covariance matrix
activs_converted = self._convert_activations(all_activs)
new_cov = aa.compute_rbf_kernel_covariance(activs_converted, sigma=self._sigma)
updated_cov = (self._counter * self.node_covs[id(node)] + new_cov) / (self._counter + 1)
self.node_covs[id(node)] = updated_cov
def clear_collect_activations(self):
for _, node in enumerate(self._cell.dag):
for edge in node:
if hasattr(edge._op, 'PRIMITIVES') and type(edge._op) == self._edgeoptype:
edge._op.collect_activations = False
self._collect_activations = False
self._edgeoptype = None
self._sigma = None
self._node_covs = {}
def _convert_activations(self, all_activs:List[List[np.array]])->List[np.array]:
''' Converts to the format needed by covariance computing functions
Input all_activs: List[List[np.array]]. Outer list len is num_edges.
Inner list is of num_ops length. Each element in inner list is [batch_size, x, y, z] '''
num_ops = len(all_activs[0])
for activs in all_activs:
assert num_ops == len(activs)
all_edge_list = []
for edge in all_activs:
obsv_dict = defaultdict(list)
# assumption edge_np will be (num_ops, batch_size, x, y, z)
edge_np = np.array(edge)
for op in range(edge_np.shape[0]):
for b in range(edge_np.shape[1]):
feat = edge_np[op][b]
feat = feat.flatten()
obsv_dict[op].append(feat)
feature_list = [*range(num_ops)]
for key in obsv_dict.keys():
feat = np.array(obsv_dict[key])
feature_list[key] = feat
all_edge_list.extend(feature_list)
return all_edge_list
|
archai/archai/supergraph/algos/divnas/divnas_cell.py/0
|
{
"file_path": "archai/archai/supergraph/algos/divnas/divnas_cell.py",
"repo_id": "archai",
"token_count": 2032
}
| 328 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from typing import Optional
from overrides import overrides
from archai.supergraph.algos.manual.manual_evaluater import ManualEvaluater
from archai.supergraph.algos.manual.manual_searcher import ManualSearcher
from archai.supergraph.nas.arch_trainer import TArchTrainer
from archai.supergraph.nas.exp_runner import ExperimentRunner
from archai.supergraph.nas.model_desc_builder import ModelDescBuilder
class ManualExperimentRunner(ExperimentRunner):
"""Runs manually designed models such as resnet"""
@overrides
def model_desc_builder(self)->Optional[ModelDescBuilder]:
return None
@overrides
def trainer_class(self)->TArchTrainer:
return None # no search trainer
@overrides
def searcher(self)->ManualSearcher:
return ManualSearcher()
@overrides
def evaluater(self)->ManualEvaluater:
return ManualEvaluater()
@overrides
def copy_search_to_eval(self)->None:
pass
|
archai/archai/supergraph/algos/manual/manual_exp_runner.py/0
|
{
"file_path": "archai/archai/supergraph/algos/manual/manual_exp_runner.py",
"repo_id": "archai",
"token_count": 353
}
| 329 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from typing import List, Optional
import torch
from overrides import overrides
from torch import Tensor
from archai.supergraph.nas.arch_params import ArchParams
from archai.supergraph.nas.model_desc import OpDesc
from archai.supergraph.nas.operations import Op
class NasBench101Op(Op):
def __init__(self, op_desc:OpDesc, arch_params: Optional[ArchParams], affine:bool):
super().__init__()
vertex_op_name = op_desc.params['vertex_op']
proj_first = op_desc.params['proj_first'] # first input needs projection
self._vertex_op = Op.create(OpDesc(vertex_op_name, params=op_desc.params,
in_len=1, trainables=None),
affine=affine, arch_params=None)
self._in_len = op_desc.in_len
self._proj_op = Op.create(OpDesc('convbnrelu_1x1', params=op_desc.params,
in_len=1, trainables=None),
affine=affine, arch_params=None) \
if proj_first else None
@overrides
def forward(self, x:List[Tensor]):
assert not isinstance(x, torch.Tensor)
assert len(x) == self._in_len
x0 = x[0] if not self._proj_first else self._proj_op(x[0])
s = sum(x[1:]) + x0
out = self._vertex_op(s)
return out
|
archai/archai/supergraph/algos/nasbench101/nasbench101_op.py/0
|
{
"file_path": "archai/archai/supergraph/algos/nasbench101/nasbench101_op.py",
"repo_id": "archai",
"token_count": 702
}
| 330 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from .providers.cifar10_provider import Cifar10Provider
from .providers.cifar100_provider import Cifar100Provider
from .providers.fashion_mnist_provider import FashionMnistProvider
from .providers.flower102_provider import Flower102Provider
from .providers.food101_provider import Food101Provider
from .providers.imagenet_provider import ImagenetProvider
from .providers.mit67_provider import Mit67Provider
from .providers.mnist_provider import MnistProvider
from .providers.sport8_provider import Sport8Provider
from .providers.svhn_provider import SvhnProvider
|
archai/archai/supergraph/datasets/__init__.py/0
|
{
"file_path": "archai/archai/supergraph/datasets/__init__.py",
"repo_id": "archai",
"token_count": 183
}
| 331 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import os
from overrides import overrides
from torchvision import datasets
from torchvision.transforms import transforms
from archai.common import utils
from archai.common.config import Config
from archai.supergraph.datasets.dataset_provider import (
DatasetProvider,
ImgSize,
TrainTestDatasets,
register_dataset_provider,
)
class ImagenetProvider(DatasetProvider):
def __init__(self, conf_dataset:Config):
super().__init__(conf_dataset)
self._dataroot = utils.full_path(conf_dataset['dataroot'])
@overrides
def get_datasets(self, load_train:bool, load_test:bool,
transform_train, transform_test)->TrainTestDatasets:
trainset, testset = None, None
if load_train:
trainset = datasets.ImageFolder(root=os.path.join(self._dataroot, 'ImageNet', 'train'),
transform=transform_train)
# compatibility with older PyTorch
if not hasattr(trainset, 'targets'):
trainset.targets = [lb for _, lb in trainset.samples]
if load_test:
testset = datasets.ImageFolder(root=os.path.join(self._dataroot, 'ImageNet', 'val'),
transform=transform_test)
return trainset, testset
@overrides
def get_transforms(self, img_size:ImgSize)->tuple:
MEAN = [0.485, 0.456, 0.406]
STD = [0.229, 0.224, 0.225]
_IMAGENET_PCA = {
'eigval': [0.2175, 0.0188, 0.0045],
'eigvec': [
[-0.5675, 0.7192, 0.4009],
[-0.5808, -0.0045, -0.8140],
[-0.5836, -0.6948, 0.4203],
]
}
transform_train, transform_test = None, None
transform_train = transforms.Compose([
transforms.RandomResizedCrop(224,
scale=(0.08, 1.0), # TODO: these two params are normally not specified
interpolation=transforms.InterpolationMode.BICUBIC),
transforms.RandomHorizontalFlip(),
transforms.ColorJitter(
brightness=0.4,
contrast=0.4,
saturation=0.4,
hue=0.2
),
transforms.ToTensor(),
# TODO: Lighting is not used in original darts paper
# Lighting(0.1, _IMAGENET_PCA['eigval'], _IMAGENET_PCA['eigvec']),
transforms.Normalize(mean=MEAN, std=STD)
])
transform_test = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(mean=MEAN, std=STD)
])
return transform_train, transform_test
register_dataset_provider('imagenet', ImagenetProvider)
|
archai/archai/supergraph/datasets/providers/imagenet_provider.py/0
|
{
"file_path": "archai/archai/supergraph/datasets/providers/imagenet_provider.py",
"repo_id": "archai",
"token_count": 1349
}
| 332 |
"""
Properly implemented ResNet-s for CIFAR10 as described in paper [1].
The implementation and structure of this file is hugely influenced by [2]
which is implemented for ImageNet and doesn't have option A for identity.
Moreover, most of the implementations on the web is copy-paste from
torchvision's resnet and has wrong number of params.
Proper ResNet-s for CIFAR10 (for fair comparision and etc.) has following
number of layers and parameters:
name | layers | params
ResNet20 | 20 | 0.27M
ResNet32 | 32 | 0.46M
ResNet44 | 44 | 0.66M
ResNet56 | 56 | 0.85M
ResNet110 | 110 | 1.7M
ResNet1202| 1202 | 19.4m
which this implementation indeed has.
Reference:
[1] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun. Deep Residual Learning for Image Recognition. arXiv:1512.03385
[2] https://github.com/pytorch/vision/blob/master/torchvision/models/resnet.py
If you use this implementation in you work, please don't forget to mention the
author, Yerlan Idelbayev.
"""
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.init as init
__all__ = ['ResNet', 'resnet20', 'resnet32', 'resnet44', 'resnet56', 'resnet110', 'resnet1202']
def _weights_init(m):
classname = m.__class__.__name__
#print(classname)
if isinstance(m, nn.Linear) or isinstance(m, nn.Conv2d):
init.kaiming_normal_(m.weight)
class LambdaLayer(nn.Module):
def __init__(self, lambd):
super(LambdaLayer, self).__init__()
self.lambd = lambd
def forward(self, x):
return self.lambd(x)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1, option='A'):
super(BasicBlock, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != planes:
if option == 'A':
"""
For CIFAR10 ResNet paper uses option A.
"""
self.shortcut = LambdaLayer(lambda x:
F.pad(x[:, :, ::2, ::2], (0, 0, 0, 0, planes//4, planes//4), "constant", 0))
elif option == 'B':
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion * planes, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion * planes)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, num_blocks, num_classes=10):
super(ResNet, self).__init__()
self.in_planes = 16
self.conv1 = nn.Conv2d(3, 16, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(16)
self.layer1 = self._make_layer(block, 16, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 32, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 64, num_blocks[2], stride=2)
self.linear = nn.Linear(64, num_classes)
self.apply(_weights_init)
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1]*(num_blocks-1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = F.avg_pool2d(out, out.size()[3])
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
def resnet20():
return ResNet(BasicBlock, [3, 3, 3])
def resnet32():
return ResNet(BasicBlock, [5, 5, 5])
def resnet44():
return ResNet(BasicBlock, [7, 7, 7])
def resnet56():
return ResNet(BasicBlock, [9, 9, 9])
def resnet110():
return ResNet(BasicBlock, [18, 18, 18])
def resnet1202():
return ResNet(BasicBlock, [200, 200, 200])
def test(net):
import numpy as np
total_params = 0
for x in filter(lambda p: p.requires_grad, net.parameters()):
total_params += np.prod(x.data.numpy().shape)
print("Total number of params", total_params)
print("Total layers", len(list(filter(lambda p: p.requires_grad and len(p.data.size())>1, net.parameters()))))
if __name__ == "__main__":
for net_name in __all__:
if net_name.startswith('resnet'):
print(net_name)
test(globals()[net_name]())
print()
|
archai/archai/supergraph/models/resnet_paper.py/0
|
{
"file_path": "archai/archai/supergraph/models/resnet_paper.py",
"repo_id": "archai",
"token_count": 2228
}
| 333 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from typing import List, Optional, Tuple
from overrides import EnforceOverrides
from torch import nn
from archai.supergraph.nas.cell import Cell
from archai.supergraph.nas.model import Model
from archai.supergraph.nas.model_desc import CellDesc, EdgeDesc, ModelDesc, NodeDesc
class Finalizers(EnforceOverrides):
"""Provides base algorithms for finalizing model, cell and edge which can be overriden
For op-level finalize, just put logic in op's finalize.
For model/cell/edge level finalize, you can override the methods in this class to customize the behavior. To override any of these methods, simply create new class in your algos folder, for example, diversity/diversity_finalizers.py. In this file create class that derives from Finalizers. Then in your algos exp_runner.py, return instance of that class in its finalizers() method.
"""
def finalize_model(self, model:Model, to_cpu=True, restore_device=True)->ModelDesc:
# move model to CPU before finalize because each op will serialize
# its parameters and we don't want copy of these parameters hanging on GPU
original = model.device_type()
if to_cpu:
model.cpu()
# finalize will create copy of state and this can overflow GPU RAM
assert model.device_type() == 'cpu'
cell_descs = self.finalize_cells(model)
if restore_device:
model.to(original, non_blocking=True)
return ModelDesc(conf_model_desc=model.desc.conf_model_desc,
model_stems=[op.finalize()[0] for op in model.model_stems],
pool_op=model.pool_op.finalize()[0],
cell_descs=cell_descs,
aux_tower_descs=model.desc.aux_tower_descs,
logits_op=model.logits_op.finalize()[0])
def finalize_cells(self, model:Model)->List[CellDesc]:
return [self.finalize_cell(cell, i, model.desc) \
for i,cell in enumerate(model.cells)]
def finalize_cell(self, cell:Cell, cell_index:int,
model_desc:ModelDesc, *args, **kwargs)->CellDesc:
# first finalize each node, we will need to recreate node desc with final version
max_final_edges = model_desc.max_final_edges
node_descs:List[NodeDesc] = []
for i,node in enumerate(cell.dag):
node_desc = self.finalize_node(node, i, cell.desc.nodes()[i],max_final_edges)
node_descs.append(node_desc)
desc = cell.desc
finalized = CellDesc(
id = desc.id, cell_type=desc.cell_type, conf_cell=desc.conf_cell,
stems=[cell.s0_op.finalize()[0], cell.s1_op.finalize()[0]],
stem_shapes=desc.stem_shapes,
nodes = node_descs, node_shapes=desc.node_shapes,
post_op=cell.post_op.finalize()[0],
out_shape=desc.out_shape,
trainables_from = desc.trainables_from
)
return finalized
def finalize_node(self, node:nn.ModuleList, node_index:int,
node_desc:NodeDesc, max_final_edges:int,
*args, **kwargs)->NodeDesc:
# get edge ranks, if rank is None it is deemed as required
pre_selected, edge_desc_ranks = self.get_edge_ranks(node)
ranked_selected = self.select_edges(edge_desc_ranks, max_final_edges)
selected_edges = pre_selected + ranked_selected
return NodeDesc(selected_edges, node_desc.conv_params)
def select_edges(self, edge_desc_ranks:List[Tuple[EdgeDesc, float]],
max_final_edges:int)->List[EdgeDesc]:
if len(edge_desc_ranks) > max_final_edges:
# sort by rank and pick bottom
edge_desc_ranks.sort(key=lambda d:d[1], reverse=True)
edge_desc_ranks = edge_desc_ranks[:max_final_edges]
return [edr[0] for edr in edge_desc_ranks]
def get_edge_ranks(self, node:nn.ModuleList)\
->Tuple[List[EdgeDesc], List[Tuple[EdgeDesc, float]]]:
selected_edges, edge_desc_ranks = [], []
for edge in node:
edge_desc, rank = self.finalize_edge(edge)
# if rank is None then it is required rank
if rank is None:
selected_edges.append(edge_desc) # required edge
else: # optional edge
edge_desc_ranks.append((edge_desc, rank))
return selected_edges, edge_desc_ranks
def finalize_edge(self, edge)->Tuple[EdgeDesc, Optional[float]]:
op_desc, rank = edge._op.finalize()
return (EdgeDesc(op_desc, edge.input_ids), rank)
|
archai/archai/supergraph/nas/finalizers.py/0
|
{
"file_path": "archai/archai/supergraph/nas/finalizers.py",
"repo_id": "archai",
"token_count": 2019
}
| 334 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from typing import Iterator, List, Optional
from torch.optim.lr_scheduler import _LRScheduler
from torch.optim.optimizer import Optimizer
from archai.common.utils import zip_eq
class OptimSched:
"""Holds the optimizer and scheduler"""
def __init__(self, optim:Optimizer, sched:Optional[_LRScheduler],
sched_on_epoch:Optional[bool])->None:
self.optim = optim
self.sched = sched
self.sched_on_epoch = sched_on_epoch
class MultiOptim:
def __init__(self) -> None:
self._optim_scheds:List[OptimSched] = []
def append(self, optim_sched:OptimSched)->None:
self._optim_scheds.append(optim_sched)
def zero_grad(self)->None:
for optim_sched in self._optim_scheds:
optim_sched.optim.zero_grad()
def step(self)->None:
for optim_sched in self._optim_scheds:
optim_sched.optim.step()
if optim_sched.sched and not optim_sched.sched_on_epoch:
optim_sched.sched.step(epoch=None)
def epoch(self, epoch:Optional[int]=None)->None:
for optim_sched in self._optim_scheds:
if optim_sched.sched and optim_sched.sched_on_epoch:
optim_sched.sched.step(epoch=epoch)
def get_lr(self, optim_index:int, param_index:int)->float:
return self._optim_scheds[optim_index].optim.param_groups[param_index]['lr']
def state_dict(self)->dict:
optim_states = [optim_sched.optim.state_dict() for optim_sched in self]
sched_states = [optim_sched.sched.state_dict() if optim_sched.sched else None \
for optim_sched in self]
return {'optim_states': optim_states, 'sched_states':sched_states}
def load_state_dict(self, state_dict:dict)->None:
optim_states = state_dict['optim_states']
sched_states = state_dict['sched_states']
for optim_sched, optim_state, sched_state in zip_eq(self, optim_states, sched_states):
optim_sched.optim.load_state_dict(optim_state)
if optim_sched.sched:
assert sched_state is not None
optim_sched.sched.load_state_dict(sched_state)
else:
assert sched_state is None
def __getitem__(self, index)->OptimSched:
return self._optim_scheds[index]
def __len__(self)->int:
return len(self._optim_scheds)
def __iter__(self)->Iterator[OptimSched]:
return iter(self._optim_scheds)
|
archai/archai/supergraph/utils/multi_optim.py/0
|
{
"file_path": "archai/archai/supergraph/utils/multi_optim.py",
"repo_id": "archai",
"token_count": 1135
}
| 335 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from dataclasses import dataclass, field
from transformers.training_args import TrainingArguments
@dataclass
class DistillerTrainingArguments(TrainingArguments):
"""Training arguments for distillation-based training.
This class extends `TrainingArguments` and provides additional arguments
specific to distillation-based training.
Args:
alpha: Weight ratio between the student and KD losses. This should be
a value in the range [0, 1].
temperature: Annealing ratio for the softmax activations. This value
should be greater than 0.
"""
alpha: float = field(default=0.5, metadata={"help": "Weight ratio between student and KD losses."})
temperature: float = field(default=1.0, metadata={"help": "Annealing ratio for the softmax activations."})
|
archai/archai/trainers/nlp/hf_training_args.py/0
|
{
"file_path": "archai/archai/trainers/nlp/hf_training_args.py",
"repo_id": "archai",
"token_count": 262
}
| 336 |
__include__: 'darts.yaml' # just use darts defaults
nas:
search:
model_desc:
num_edges_to_sample: 2 # number of edges each node will take input from
eval:
model_desc:
num_edges_to_sample: 2
|
archai/confs/algos/random.yaml/0
|
{
"file_path": "archai/confs/algos/random.yaml",
"repo_id": "archai",
"token_count": 87
}
| 337 |
dataset:
name: svhn
autoaug:
model:
type: wresnet28_10
loader:
aug: fa_reduced_svhn
cutout: 20
batch: 512
epochs: 200
lr_schedule:
type: 'cosine'
warmup:
multiplier: 4
epochs: 5
optimizer:
type: sgd
lr: 0.01
nesterov: True
decay: 0.0005
|
archai/confs/aug/wresnet28x10_svhn_b512.yaml/0
|
{
"file_path": "archai/confs/aug/wresnet28x10_svhn_b512.yaml",
"repo_id": "archai",
"token_count": 157
}
| 338 |
#!/bin/bash
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
# Runs an interactive bash within the container
docker run --rm \
--gpus all \
--name nvidia22.10-archai \
--shm-size=10g \
--ipc=host \
--ulimit memlock=-1 \
--ulimit stack=67108864 \
-e NCCL_P2P_LEVEL=NVL \
-it nvidia22.10-archai:latest
|
archai/docker/run_container.sh/0
|
{
"file_path": "archai/docker/run_container.sh",
"repo_id": "archai",
"token_count": 149
}
| 339 |
<jupyter_start><jupyter_text>Multi-node SearchThis notebook and accompanying code shows how to run an[Archai](https://github.com/microsoft/archai/) Neural Architecture Search (NAS) using an [AzureMachine Learning Workspace](https://ml.azure.com/) with partial training of models (on a GPUcluster) providing validation accuracies to guide that search. This notebook requires that you havethe [Azure CLI](https://learn.microsoft.com/en-us/cli/azure/install-azure-cli) installed and loggedin otherwise the create compute cluster cell will fail. You will also need to create an Azure MLworkspace using [https://ml.azure.com] and an Azure storage account. The storage account does notneed to be in the same resource group as the workspace.This notebook also assumes you have a python environment setup using `pip install -e .[aml]` in yourArchai repository root. This example requires a `config.json` file containing the information aboutyour Azure subscription, the Azure ML workspace name and resource group, and the azure storageaccount key and name:```json{ "subscription_id": "...", "resource_group": "...", "workspace_name": "...", "storage_account_key": "...", "storage_account_name": "..."}```See:- [Set up a Python development environment for Azure Machine Learning](https://learn.microsoft.com/en-us/azure/machine-learning/how-to-configure-environmentlocal-and-dsvm-only-create-a-workspace-configuration-file) - [Get your Storage Account keys](https://learn.microsoft.com/en-us/azure/storage/common/storage-account-keys-manage?tabs=azure-portal).<jupyter_code>import os
import json
import archai.common.azureml_helper as aml_helper
import archai.common.notebook_helper as nb_helper
from azure.ai.ml.entities import UserIdentityConfiguration
# locate the code that we will use in this notebook.
scripts_dir = "./scripts"
data_scripts_dir = "./data_prep"
config_file = "../.azureml/config.json"
config = json.load(open(config_file, 'r'))
for required_key in ['subscription_id', 'resource_group', 'workspace_name', 'storage_account_key', 'storage_account_name']:
if not required_key in config:
print(f"### Error: please add a {required_key} to {config_file}")
storage_account_key = config['storage_account_key']
storage_account_name = config['storage_account_name']
workspace_name = config['workspace_name']
print(f'Using workspace {workspace_name} and storage account: {storage_account_name}')
# This is the AML experiment name
experiment_name = 'mnist_test_run'<jupyter_output>Using workspace archai-aml-test and storage account: archaimnistmodels<jupyter_text>Our [search.py](scripts/search.py) will use a ConfigSearchSpace based on the following parameters, the model defined in [model.py](scripts/model.py) will take various configurations and build different shape CNN models for each configuration as shown below. Each time you execute this cell it will generate a new random CNN model.<jupyter_code>from archai.discrete_search.search_spaces.config import ArchParamTree, DiscreteChoice, ArchConfig
from scripts.model import MyModel
arch_param_tree = ArchParamTree({
'nb_layers': DiscreteChoice(list(range(1, 13))),
'kernel_size': DiscreteChoice([1, 3, 5, 7]),
'hidden_dim': DiscreteChoice([16, 32, 64, 128])
})
arch_config = arch_param_tree.sample_config()
print(arch_config)
MyModel(arch_config)
ml_client = aml_helper.get_aml_client_from_file(config_path=config_file)
print(f'Using workspace "{ml_client.workspace_name}" in resource group "{ml_client.resource_group_name}"')<jupyter_output>Found the config file in: ..\.azureml\config.json<jupyter_text>Create the compute clusters that we need. If this cell fails with `DefaultAzureCredential failed to retrieve a token from the included credentials`.then you might need to run `az login` from the command line using the [Azure CLI](https://learn.microsoft.com/en-us/cli/azure/install-azure-cli).If it also fails with `ResourceNotFoundError: (ParentResourceNotFound)` then you may need to run `az account set --subscription ...` with the subscription id you specified in the above `config.json` file and check that the resource group you specified really does contain the Azure ML workspaceyou specified.<jupyter_code># Create cpu cluster for running the search
cpu_compute_name = "nas-cpu-cluster-D14-v2"
aml_helper.create_compute_cluster(ml_client, cpu_compute_name, size="Standard_D14_v2", location="westus2")
# Create gpu cluster for running the search
gpu_compute_name = "nas-gpu-cluster-NC6"
aml_helper.create_compute_cluster(ml_client, gpu_compute_name, size="Standard_NC6", location="westus2", max_instances=8)<jupyter_output>You already have a cluster named nas-cpu-cluster-D14-v2, we'll reuse it as is.
You already have a cluster named nas-gpu-cluster-NC6, we'll reuse it as is.<jupyter_text>Create the AML Environment from our conda.yaml file. This ensures our conda environment contains the Archai framework that we are using here.<jupyter_code>archai_job_env = aml_helper.create_environment_from_file(ml_client,
image="mcr.microsoft.com/azureml/openmpi3.1.2-ubuntu18.04:latest",
conda_file="conda.yaml",
version='1.0.0')
environment_name = f"{archai_job_env.name}:{archai_job_env.version}"
print(environment_name)<jupyter_output>Environment with name aml-archai is registered to workspace, the environment version is 1.0.0
aml-archai:1.0.0<jupyter_text>Ensure our storage account is setup with a `models` blob store container for storing the final onnx models, a `datasets` blob store for our training dataset and a `status` storage table.<jupyter_code>from archai.common.store import ArchaiStore
# Register the datastore with AML
data_store_name = "datasets"
data_container_name = "datasets"
model_store_name = "models"
model_container_name = "models"
root_folder = experiment_name
# make sure the datasets container exists
store = ArchaiStore(storage_account_name, storage_account_key, blob_container_name=data_container_name)
store.upload_blob(root_folder, config_file)
# make sure the models container exists
store = ArchaiStore(storage_account_name, storage_account_key, blob_container_name=model_container_name)
store.upload_blob("config", config_file)
datastore_path = f'azureml://datastores/{data_store_name}/paths/{root_folder}'
results_path = f'azureml://datastores/{model_store_name}/paths/{root_folder}'<jupyter_output><empty_output><jupyter_text>Register the `models` blob store container in the ML workspace `datastores`<jupyter_code>from azure.ai.ml.entities import AzureBlobDatastore
from azure.ai.ml.entities._credentials import AccountKeyConfiguration
try:
model_store = ml_client.datastores.get(model_store_name)
except:
model_store = AzureBlobDatastore(
name=model_store_name,
description="Datastore pointing to our models blob container.",
account_name=storage_account_name,
container_name=model_container_name,
credentials=AccountKeyConfiguration(
account_key=storage_account_key
),
)
ml_client.create_or_update(model_store)<jupyter_output><empty_output><jupyter_text>Register the `datasets` blob store container in the ML workspace `datastores`<jupyter_code>try:
data_store = ml_client.datastores.get(data_store_name)
except:
data_store = AzureBlobDatastore(
name=data_store_name,
description="Datastore pointing to our dataset container.",
account_name=storage_account_name,
container_name=data_container_name,
credentials=AccountKeyConfiguration(
account_key=storage_account_key
),
)
ml_client.create_or_update(data_store)<jupyter_output><empty_output><jupyter_text>Create a pipeline command that prepares our training dataset using `prep_data_store.py`. This pipeline will write the output do our `datasets` blob store so the training jobs can find the dataset there all ready to go. That way each training job doesn't have to repeat the download and preparation of the dataset.<jupyter_code>from azure.ai.ml import command
from azure.ai.ml import Input, Output
data_prep_component = command(
name="data_prep2",
display_name="Data preparation for training",
description="Downloads the remote dataset to our blob store.",
inputs= {
"name": Input(type='string')
},
outputs= {
"data": Output(type="uri_folder", path=datastore_path, mode="rw_mount")
},
# The source folder of the component
code=data_scripts_dir,
command="""python3 prep_data_store.py \
--path ${{outputs.data}} \
""",
environment=environment_name,
)<jupyter_output><empty_output><jupyter_text>Create a command that kicks off the Archai Search using `search.py`, it will take the dataset as input so it can pass it along to the training jobs later on. It also produces some search output files, .png charts, and json results files which will also go into our `models` blob store under the folder named the same as our `experiment_name`.<jupyter_code>hex_config = bytes(json.dumps(config), encoding='utf-8').hex()
partial_epochs = 0.1
max_unseen_population = 16 # best if this is an even multiple our gpu cluster size, we'll get much better throughput.
search_iterations = 5 # for quick debugging.
init_num_models = 10
output_path = results_path + '/' + experiment_name
fixed_args = f'--data_dir {datastore_path} ' + \
f'--output_dir {output_path} ' + \
f'--environment "{environment_name}" ' + \
f'--experiment "{experiment_name}" ' + \
f'--compute "{gpu_compute_name}" ' + \
f'--config "{hex_config}" ' + \
f'--search_iterations {search_iterations} ' + \
f'--init_num_models {init_num_models} ' + \
f'--max_unseen_population {max_unseen_population} ' + \
f'--partial_training_epochs {partial_epochs} '
search_component = command(
name="search",
display_name="The Archai NAS search",
description="Runs the NAS search algorithm.",
is_deterministic=False,
inputs= {
"data": Input(type="uri_folder")
},
outputs= {
"results": Output(type="uri_folder", path=output_path, mode="rw_mount")
},
code=scripts_dir,
identity= UserIdentityConfiguration(),
command='python3 search.py --local_output ${{outputs.results}} ' + \
fixed_args,
environment=environment_name,
)<jupyter_output><empty_output><jupyter_text>This creates a command that will do full training on the final list of the best models produced by the above search command. Also creates a monitor command that monitors all the parallel training jobs and gathers the results when they are all complete updating our final `models.json` file.<jupyter_code>from scripts.commands import make_training_pipeline_command, make_monitor_command
full_epochs = 10
timeout = 3600
hex_config = bytes(json.dumps(config), encoding='utf-8').hex()
full_training_component = make_training_pipeline_command(
"Full Training Pipeline", hex_config, scripts_dir, gpu_compute_name,
datastore_path, output_path, experiment_name, environment_name, full_epochs, save_models=True)
keys = ['val_acc']
monitor_component = make_monitor_command(hex_config, scripts_dir, results_path, environment_name, keys, timeout)<jupyter_output><empty_output><jupyter_text>Create an AML pipeline that pipes the output of the data prep to the search component, then when search is finished, starts a full training job of the top models and then waits for all that training to finish.<jupyter_code>from azure.ai.ml import dsl
@dsl.pipeline(
compute=cpu_compute_name,
description="Archai search pipeline",
)
def archai_search_pipeline():
data_prep_job = data_prep_component(
name="MNIST"
)
search_job = search_component(
data=data_prep_job.outputs.data
)
training_job = full_training_component(
models=search_job.outputs.results,
data=data_prep_job.outputs.data
)
monitor_job = monitor_component(
models=search_job.outputs.results,
training_results=training_job.outputs.results
)
return {
"results": monitor_job.outputs.results
}<jupyter_output><empty_output><jupyter_text>Submit the pipeline job so it starts running in your Azure ML workspace.<jupyter_code>pipeline_job = ml_client.jobs.create_or_update(
archai_search_pipeline(),
# Project's name
experiment_name=experiment_name,
)<jupyter_output><empty_output><jupyter_text>Open the pipeline azure ML studio portal in your web browser (this works when you are running this notebook in VS code).<jupyter_code>import webbrowser
webbrowser.open(pipeline_job.services["Studio"].endpoint)
job_name = pipeline_job.name
print(f'Started pipeline: {job_name}')
# you can fetch any pipeline job again if you needed to continue this notebook later:
from azure.ai.ml import dsl, Input, Output
# job_name = 'amusing_monkey_2248p6qm47'
pipeline_job = ml_client.jobs.get(job_name)<jupyter_output><empty_output><jupyter_text>PlotsThis cell can be run multiple times and you will see updates as each iteration finishes.You can even run this later after restarting the jupyter notebook because it is not dependent on variable state it is onlydependent on the persistent 'models' blob store.<jupyter_code>from scripts.utils import get_results, show_results, download_best_models
from archai.common.store import ArchaiStore
store = ArchaiStore(storage_account_name, storage_account_key, blob_container_name=model_container_name)
print(f'Fetching results for {experiment_name}...')
blob_path = root_folder + '/' + experiment_name
output_folder = experiment_name
get_results(store, blob_path, output_folder)
download_best_models(store, experiment_name, output_folder)<jupyter_output><empty_output><jupyter_text>Take a look at the pareto curve plots.<jupyter_code># Now show the png plots produced by the search
show_results(output_folder)
from IPython.display import display
from IPython.core.display import HTML
df = nb_helper.get_search_csv(output_folder)
df.drop(columns=['parent', 'parents'], inplace=True)
df = df[df["AmlTrainingValAccuracy"] > 0.97]
df[df["is_pareto"] == True]<jupyter_output><empty_output><jupyter_text>Test ONNX Runtime Inference on the Best ModelWhen the search pipeline completes you should have a `models.json` file in the experiment_name output folder and you can use that to find the most accurate model and run it through the ONNX runtime to see if the ONNX inference gets the same accuracy.<jupyter_code># find the top model in the json results.
filename = os.path.join(output_folder, "models.json")
best_of_the_best = None
top_accuracy = -1
row = None
if not os.path.isfile(filename):
raise Exception(f"Could not find {filename} file. Please wait for job to finish.")
results = json.load(open(filename, "r"))
models = results['models']
for a in models:
if type(a) is dict and 'val_acc' in a:
val_acc = a['val_acc']
if val_acc > top_accuracy:
top_accuracy = val_acc
best_of_the_best = a['id']
row = a
model = MyModel(ArchConfig(row))
arch = f"nb_layers={model.nb_layers}, kernel_size={model.kernel_size}, hidden_dim={model.hidden_dim}"
print(f"The top model is {best_of_the_best} with accuracy {top_accuracy} and architecture {arch}")
blob_path = root_folder + '/' + best_of_the_best
model_output = os.path.join(output_folder, 'top_model')
get_results(store, blob_path, model_output)
model_path = os.path.join(model_output, 'model.onnx')
# Great, now let's test if this model works as advertised.
from archai.datasets.cv.mnist_dataset_provider import MnistDatasetProvider
import onnxruntime as ort
import numpy as np
dataset_provider = MnistDatasetProvider()
val_data = dataset_provider.get_val_dataset()
count = val_data.data.shape[0]
test = np.random.choice(count, 1)[0]
data = val_data.data[test]
import matplotlib.pyplot as plt
# check what the images look like.
plt.figure(figsize=(2,2))
plt.imshow(data, cmap='gray')
print(f'data has shape: {data.shape}')
plt.axis('off')
plt.show()
# Now run the ONNX runtime on this the validation set.
# You can change this to `CUDAExecutionProvider` if you have a GPU and have
# installed the CUDA runtime.
ort_sess = ort.InferenceSession(model_path, providers=['CPUExecutionProvider'])
for i in ort_sess.get_inputs():
print(f'input: {i.name}, {i.shape}, {i.type}')
print(f'Testing {count} rows')
failed = 0
for i in range(val_data.data.shape[0]):
data = val_data.data[i]
expected = int(val_data.train_labels[i])
while len(data.shape) < 4:
data = np.expand_dims(data, axis=0)
outputs = ort_sess.run(None, {'input': data.astype(np.float32) / 255.0})
result = outputs[0]
index = np.argmax(result)
label = val_data.classes[index]
if expected != index:
# print(f'### Failed: {expected} and got {label}')
failed += 1
rate = (count - failed) * 100 / count
print(f"Failed {failed} out of {count} rows")
print(f'Inference pass rate is {rate} %.')
print(f'How does this compare with the training validation accuracy of {top_accuracy}')
if np.isclose(rate, top_accuracy* 100, atol=0.1):
print('Success! The model is working as expected.')
else:
print('The onnx runtime is giving different results.')<jupyter_output>data has shape: torch.Size([28, 28])
|
archai/docs/advanced_guide/cloud/azure/notebooks/multi_node_search/multi_node_search.ipynb/0
|
{
"file_path": "archai/docs/advanced_guide/cloud/azure/notebooks/multi_node_search/multi_node_search.ipynb",
"repo_id": "archai",
"token_count": 6162
}
| 340 |
import argparse
from archai.discrete_search.algos.evolution_pareto import EvolutionParetoSearch
from archai.discrete_search.api.search_objectives import SearchObjectives
from archai.discrete_search.evaluators.nlp.parameters import NonEmbeddingParamsProxy
from archai.discrete_search.evaluators.nlp.transformer_flex_latency import (
TransformerFlexOnnxLatency,
)
from archai.discrete_search.evaluators.nlp.transformer_flex_memory import (
TransformerFlexOnnxMemory,
)
from archai.discrete_search.search_spaces.nlp.transformer_flex.search_space import (
TransformerFlexSearchSpace,
)
def parse_args() -> argparse.Namespace:
parser = argparse.ArgumentParser(description="Searches with Transformer-Flex.")
parser.add_argument(
"-mt",
"--model_type",
type=str,
choices=["gpt2"],
default="gpt2",
help="Type of model.",
)
parser.add_argument("-o", "--output_dir", type=str, default="", help="Output folder.")
parser.add_argument(
"-n",
"--num_iters",
type=int,
default=5,
help="Number of search iterations.",
)
parser.add_argument(
"-inm",
"--init_num_models",
type=int,
default=10,
help="Number of initialization models.",
)
parser.add_argument(
"-nrm",
"--num_random_mix",
type=int,
default=5,
help="Number of random models to mix with the population in each iteration.",
)
parser.add_argument(
"-mup",
"--max_unseen_population",
type=int,
default=100,
help="Maximum number of unseen models in each iteration.",
)
parser.add_argument(
"-mpp",
"--mutations_per_parent",
type=int,
default=1,
help="Number of distinct mutations generated for each Pareto frontier member.",
)
parser.add_argument(
"-nc",
"--num_crossovers",
type=int,
default=5,
help="Total number of crossovers generated per iteration.",
)
parser.add_argument(
"-s",
"--seed",
type=int,
default=1,
help="Random seed.",
)
args = parser.parse_args()
return args
if __name__ == "__main__":
args = parse_args()
space = TransformerFlexSearchSpace(args.model_type, vocab_size=50257, max_sequence_length=1024)
search_objectives = SearchObjectives()
search_objectives.add_objective(
"non_embedding_params",
NonEmbeddingParamsProxy(),
higher_is_better=True,
compute_intensive=False,
constraint=(1e6, 1e9),
)
search_objectives.add_objective(
"onnx_latency",
TransformerFlexOnnxLatency(space, seq_len=1024, n_trials=5, use_past=False),
higher_is_better=False,
compute_intensive=False,
)
search_objectives.add_objective(
"onnx_memory",
TransformerFlexOnnxMemory(space, use_past=False),
higher_is_better=False,
compute_intensive=False,
)
algo = EvolutionParetoSearch(
space,
search_objectives,
None,
args.output_dir,
num_iters=args.num_iters,
init_num_models=args.init_num_models,
num_random_mix=args.num_random_mix,
max_unseen_population=args.max_unseen_population,
mutations_per_parent=args.mutations_per_parent,
num_crossovers=args.num_crossovers,
save_pareto_model_weights=False,
seed=args.seed,
)
algo.search()
|
archai/docs/advanced_guide/cloud/azure/notebooks/text_generation/src/search.py/0
|
{
"file_path": "archai/docs/advanced_guide/cloud/azure/notebooks/text_generation/src/search.py",
"repo_id": "archai",
"token_count": 1577
}
| 341 |
Documentation
=============
The Archai project welcomes contributions through the implementation of documentation files using Sphinx and RST. If you are interested in contributing to the project in this way, please follow these steps:
#. Ensure that Sphinx is installed. You can install it using ``pip install archai[docs]``.
#. Check out the Archai codebase and create a new branch for your changes. This will allow for easy submission of your code as a pull request upon completion.
#. Create an ``.rst`` file in the :github:`docs`. For example, if writing an API documentation for the :github:`archai/trainers/nlp/hf_training_args.py` file, the corresponding path would be :github:`docs/reference/api/archai.trainers.nlp.rst`.
#. Check the pre-defined format and include the corresponding section:
.. code-block:: rst
Training Arguments
------------------
.. automodule:: archai.trainers.nlp.hf_training_args
:members:
:undoc-members:
#. To build the documentation, run the following command from the root directory of the documentation (i.e. the ``docs`` directory). The HTML files will be created in a ``_build`` directory.
.. tab:: Linux/MacOS
.. code-block:: sh
cd archai/docs
make html
.. tab:: Windows
.. code-block:: bat
cd archai/docs
.\make.bat html
These are just some basic guidelines for an API documentation using Sphinx. For more information check the `Sphinx documentation <https://www.sphinx-doc.org/en/master>`_ and `RST guide <https://docutils.sourceforge.io/docs/user/rst/quickref.html>`_. Additionally, it is recommended to review the Archai documentation style guide before contributing to the documentation to ensure consistency.
|
archai/docs/contributing/documentation.rst/0
|
{
"file_path": "archai/docs/contributing/documentation.rst",
"repo_id": "archai",
"token_count": 544
}
| 342 |
from typing import List, Optional
from overrides import overrides
import numpy as np
import torch
import re
from torch import nn
from archai.discrete_search.api import ArchaiModel
import json
from random import Random
from archai.discrete_search.api import DiscreteSearchSpace
from model import MyModel
class CNNSearchSpace(DiscreteSearchSpace):
def __init__(self, min_layers: int = 1, max_layers: int = 12,
kernel_list=(1, 3, 5, 7), hidden_list=(16, 32, 64, 128),
seed: int = 1):
self.min_layers = min_layers
self.max_layers = max_layers
self.kernel_list = kernel_list
self.hidden_list = hidden_list
self.rng = Random(seed)
def get_archid(self, model: MyModel) -> str:
return f'L={model.nb_layers}, K={model.kernel_size}, H={model.hidden_dim}'
@overrides
def random_sample(self) -> ArchaiModel:
# Randomly chooses architecture parameters
nb_layers = self.rng.randint(self.min_layers, self.max_layers)
kernel_size = self.rng.choice(self.kernel_list)
hidden_dim = self.rng.choice(self.hidden_list)
model = MyModel(nb_layers, kernel_size, hidden_dim)
# Wraps model into ArchaiModel
return ArchaiModel(arch=model, archid=self.get_archid(model))
@overrides
def save_arch(self, model: ArchaiModel, file: str):
with open(file, 'w') as fp:
json.dump({
'nb_layers': model.arch.nb_layers,
'kernel_size': model.arch.kernel_size,
'hidden_dim': model.arch.hidden_dim
}, fp)
@overrides
def load_arch(self, file: str):
config = json.load(open(file))
model = MyModel(**config)
return ArchaiModel(arch=model, archid=self.get_archid(model))
@overrides
def save_model_weights(self, model: ArchaiModel, file: str):
state_dict = model.arch.get_state_dict()
torch.save(state_dict, file)
@overrides
def load_model_weights(self, model: ArchaiModel, file: str):
model.arch.load_state_dict(torch.load(file))
def from_archid(self, archid: str) -> MyModel:
# parse the format 'L=1, K=5, H=64'
regex = re.compile(r'L=(\d+), K=(\d+), H=(\d+)')
m = regex.match(archid).groups()
if len(m) == 3:
config = {
'nb_layers': int(m[0]),
'kernel_size': int(m[1]),
'hidden_dim': int(m[2])
}
return MyModel(**config)
else:
raise Exception(f"Archid '{archid}' is not in the correct format")
def ensure_model(self, model: ArchaiModel):
if not isinstance(model.arch, MyModel):
model.arch = self.from_archid(model.archid)
from archai.discrete_search.api.search_space import EvolutionarySearchSpace, BayesOptSearchSpace
class CNNSearchSpaceExt(CNNSearchSpace, EvolutionarySearchSpace, BayesOptSearchSpace):
''' We are subclassing CNNSearchSpace just to save up space'''
@overrides
def mutate(self, model_1: ArchaiModel) -> ArchaiModel:
self.ensure_model(model_1)
config = {
'nb_layers': model_1.arch.nb_layers,
'kernel_size': model_1.arch.kernel_size,
'hidden_dim': model_1.arch.hidden_dim
}
if self.rng.random() < 0.2:
config['nb_layers'] = self.rng.randint(self.min_layers, self.max_layers)
if self.rng.random() < 0.2:
config['kernel_size'] = self.rng.choice(self.kernel_list)
if self.rng.random() < 0.2:
config['hidden_dim'] = self.rng.choice(self.hidden_list)
mutated_model = MyModel(**config)
return ArchaiModel(
arch=mutated_model, archid=self.get_archid(mutated_model)
)
@overrides
def crossover(self, model_list: List[ArchaiModel]) -> ArchaiModel:
for m in model_list:
self.ensure_model(m)
new_config = {
'nb_layers': self.rng.choice([m.arch.nb_layers for m in model_list]),
'kernel_size': self.rng.choice([m.arch.kernel_size for m in model_list]),
'hidden_dim': self.rng.choice([m.arch.hidden_dim for m in model_list]),
}
crossover_model = MyModel(**new_config)
return ArchaiModel(
arch=crossover_model, archid=self.get_archid(crossover_model)
)
@overrides
def encode(self, model: ArchaiModel) -> np.ndarray:
self.ensure_model(model)
return np.array([model.arch.nb_layers, model.arch.kernel_size, model.arch.hidden_dim])
|
archai/docs/getting_started/notebooks/discrete_search/cnn_search_space.py/0
|
{
"file_path": "archai/docs/getting_started/notebooks/discrete_search/cnn_search_space.py",
"repo_id": "archai",
"token_count": 2089
}
| 343 |
<jupyter_start><jupyter_text>Quantizing Models with PyTorchQuantizing an NLP-based model in PyTorch involves reducing the precision of the model's parameters to improve its inference speed and reduce its memory footprint. The process involves converting floating-point parameters to integers and can be implemented by adding a few lines of code. Loading the ModelThe first step is to load any NLP-related model. In this notebook, we will be using a pre-trained GPT-2 model from the Hugging Face's Hub.<jupyter_code>from transformers import GPT2LMHeadModel
model = GPT2LMHeadModel.from_pretrained("gpt2")<jupyter_output><empty_output><jupyter_text>Post-Training Quantization (PTQ)Post-Training Quantization (PTQ) is a technique of quantizing a pre-trained model, where dynamic quantization is used to adjust the quantization levels during runtime to ensure optimal accuracy and performance.Archai's offer a wrapper function, denoted as `dynamic_quantization_torch()`, which takes care of dynamically quantizing the pre-trained model.*Note that we set PyTorch's number of threads to 1 because quantized models will only use a single thread.*<jupyter_code>import torch
from archai.quantization.ptq import dynamic_quantization_torch
torch.set_num_threads(1)
model_qnt = dynamic_quantization_torch(model)<jupyter_output>2023-03-21 15:18:12,480 - archai.quantization.ptq — INFO — Quantizing model ...<jupyter_text>Comparing Default and Quantized ModelsFinally, we can compare the size of default and quantized models, as well as their logits different. Nevertheless, please note that if the model has not been pre-trained with Quantization Aware Training (QAT), it might produce different logits and have its performance diminished.<jupyter_code>from archai.common.file_utils import calculate_torch_model_size
print(f"Model: {calculate_torch_model_size(model)}MB")
print(f"Model-QNT: {calculate_torch_model_size(model_qnt)}MB")
inputs = {"input_ids": torch.randint(1, 10, (1, 192))}
logits = model(**inputs).logits
logits_qnt = model_qnt(**inputs).logits
print(f"Difference between logits: {logits_qnt - logits}")<jupyter_output>Model: 510.391647MB
Model-QNT: 431.250044MB
Difference between logits: tensor([[[-0.2147, -0.0618, -0.2794, ..., 1.0471, 1.0807, -0.8749],
[-1.4394, -1.5974, -5.1243, ..., -3.5922, -2.7616, -1.6151],
[-4.1445, -3.5687, -6.8751, ..., -3.9694, -4.0689, -3.0092],
...,
[-2.2967, -4.1277, -9.3187, ..., -1.6556, -3.2380, -1.3445],
[-2.0462, -4.3560, -9.2828, ..., -2.0148, -2.9403, -1.1727],
[-1.5593, -4.3758, -8.6710, ..., -0.7250, -2.5097, -0.7405]]],
grad_fn=<SubBackward0>)
|
archai/docs/getting_started/notebooks/nlp/torch_quantization.ipynb/0
|
{
"file_path": "archai/docs/getting_started/notebooks/nlp/torch_quantization.ipynb",
"repo_id": "archai",
"token_count": 955
}
| 344 |
Natural Language Processing
===========================
Parameters
----------
.. automodule:: archai.discrete_search.evaluators.nlp.parameters
:members:
:undoc-members:
Transformer-Flex Latency
------------------------
.. automodule:: archai.discrete_search.evaluators.nlp.transformer_flex_latency
:members:
:undoc-members:
Transformer-Flex Memory
-----------------------
.. automodule:: archai.discrete_search.evaluators.nlp.transformer_flex_memory
:members:
:undoc-members:
|
archai/docs/reference/api/archai.discrete_search.evaluators.nlp.rst/0
|
{
"file_path": "archai/docs/reference/api/archai.discrete_search.evaluators.nlp.rst",
"repo_id": "archai",
"token_count": 161
}
| 345 |
ONNX
====
.. toctree::
:maxdepth: 2
archai.onnx.config_utils
archai.onnx.optimization_utils
ONNX Forward
------------
.. automodule:: archai.onnx.onnx_forward
:members:
:undoc-members:
ONNX Loader
-----------
.. automodule:: archai.onnx.onnx_loader
:members:
:undoc-members:
Export
------
.. automodule:: archai.onnx.export
:members:
:undoc-members:
Export (Utilities)
------------------
.. automodule:: archai.onnx.export_utils
:members:
:undoc-members:
Optimization
------------
.. automodule:: archai.onnx.optimization
:members:
:undoc-members:
|
archai/docs/reference/api/archai.onnx.rst/0
|
{
"file_path": "archai/docs/reference/api/archai.onnx.rst",
"repo_id": "archai",
"token_count": 248
}
| 346 |
ShakeShake
==========
Shake ResNet
------------
.. automodule:: archai.supergraph.models.shakeshake.shake_resnet
:members:
:undoc-members:
Shake ResNext
-------------
.. automodule:: archai.supergraph.models.shakeshake.shake_resnext
:members:
:undoc-members:
ShakeShake
----------
.. automodule:: archai.supergraph.models.shakeshake.shakeshake
:members:
:undoc-members:
|
archai/docs/reference/api/archai.supergraph.models.shakeshake.rst/0
|
{
"file_path": "archai/docs/reference/api/archai.supergraph.models.shakeshake.rst",
"repo_id": "archai",
"token_count": 149
}
| 347 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import argparse
import json
from lm_eval.evaluator import make_table
from lm_eval.tasks import ALL_TASKS, TASK_REGISTRY
from lm_eval_harness.lm_eval_evaluator import evaluate_wrapper
from lm_eval_harness.lm_eval_hf_model import HFEvalModel
from lm_eval_harness.tasks.human_eval import HumanEval
from lm_eval_harness.utils.regex import MultiChoice, pattern_match
from transformers import AutoModelForCausalLM, AutoTokenizer
# Ensures additional tasks are loaded and registered
ALL_TASKS.append("human_eval")
TASK_REGISTRY.update({"human_eval": HumanEval})
def parse_args():
parser = argparse.ArgumentParser(description="Evaluates pre-trained models using `lm-eval`.")
parser.add_argument("pre_trained_model_path", type=str, help="Path to the pre-trained model file.")
parser.add_argument(
"hub_tokenizer_path",
type=str,
help="Name or path to the Hugging Face hub's tokenizer.",
)
parser.add_argument(
"-t",
"--tasks",
choices=MultiChoice(ALL_TASKS),
type=str,
default=None,
help="Tasks to be evaluated (separated by comma), e.g., `wsc,cb,copa`.",
)
parser.add_argument(
"-o",
"--output_path",
type=str,
default=None,
help="Path to the saved outputs.",
)
parser.add_argument(
"-ns",
"--n_few_shot_samples",
type=int,
default=0,
help="Number of few-shot samples.",
)
parser.add_argument(
"-ls",
"--limit_samples",
type=int,
default=None,
help="Limit the number of samples.",
)
parser.add_argument(
"-nc",
"--no_cache",
action="store_true",
help="Whether to not store predictions in a cache database.",
)
parser.add_argument(
"-dnp",
"--decontamination_ngrams_path",
type=str,
default=None,
help="Path to the de-contamination n-grams file.",
)
parser.add_argument(
"-ddp",
"--description_dict_path",
type=str,
default=None,
help="Path to the description dictionary file.",
)
parser.add_argument(
"-ci",
"--check_integrity",
action="store_true",
help="Whether to check integrity of tasks.",
)
return parser.parse_args()
if __name__ == "__main__":
args = parse_args()
if args.limit_samples:
print("Warning: --limit_samples should only be used for testing.")
task_names = ALL_TASKS if args.tasks is None else pattern_match(args.tasks.split(","), ALL_TASKS)
print(f"Selected Tasks: {task_names}")
description_dict = {}
if args.description_dict_path:
with open(args.description_dict_path, "r") as f:
description_dict = json.load(f)
model = AutoModelForCausalLM.from_pretrained(args.pre_trained_model_path)
tokenizer = AutoTokenizer.from_pretrained(args.hub_tokenizer_path)
hf_model = HFEvalModel(model, tokenizer)
outputs = evaluate_wrapper(
hf_model,
task_names,
num_fewshot=args.n_few_shot_samples,
no_cache=args.no_cache,
limit=args.limit_samples,
description_dict=description_dict,
check_integrity=args.check_integrity,
decontamination_ngrams_path=args.decontamination_ngrams_path,
)
output_json = json.dumps(outputs, indent=2)
if args.output_path:
with open(args.output_path, "w") as f:
f.write(output_json)
print(make_table(outputs))
|
archai/research/lm_eval_harness/evaluate_with_lm_eval_harness.py/0
|
{
"file_path": "archai/research/lm_eval_harness/evaluate_with_lm_eval_harness.py",
"repo_id": "archai",
"token_count": 1551
}
| 348 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import argparse
import json
import os
import natsort
from lm_eval.evaluator import evaluate
from lm_eval_harness.lm_eval_hf_model import HFEvalModel
from lm_eval_harness.tasks.human_eval import HumanEval
from transformers import AutoTokenizer, CodeGenForCausalLM
from archai.common.file_utils import CHECKPOINT_REGEX
def find_checkpoints(folder_name: str) -> str:
folder_content = os.listdir(folder_name)
checkpoints = [
os.path.join(folder_name, path)
for path in folder_content
if CHECKPOINT_REGEX.search(path) is not None and os.path.isdir(os.path.join(folder_name, path))
]
checkpoints = natsort.natsorted(checkpoints)
return checkpoints
def parse_args() -> argparse.Namespace:
parser = argparse.ArgumentParser(description="Evaluate Hugging Face checkpoints on HumanEval.")
parser.add_argument(
"checkpoint_dir",
type=str,
help="Directory containing the checkpoints to evaluate.",
)
parser.add_argument(
"-htn",
"--hub_tokenizer_name",
type=str,
default="Salesforce/codegen-350M-mono",
help="Name of the tokenizer to use (via the Hugging Face Hub).",
)
parser.add_argument(
"-ns",
"--n_samples",
type=int,
default=1,
help="Number of code samples to generate.",
)
parser.add_argument(
"-t",
"--temperature",
type=float,
default=0.01,
help="Temperature for the code generation.",
)
parser.add_argument(
"-pk",
"--pass_at_k",
type=int,
nargs="+",
default=1,
help="Pass at k for the code generation.",
)
args = parser.parse_args()
return args
if __name__ == "__main__":
args = parse_args()
if not isinstance(args.pass_at_k, list):
args.pass_at_k = [args.pass_at_k]
tokenizer = AutoTokenizer.from_pretrained(args.hub_tokenizer_name)
for checkpoint in find_checkpoints(args.checkpoint_dir):
print(f"Loading checkpoint: {checkpoint}")
model = CodeGenForCausalLM.from_pretrained(checkpoint)
hf_model = HFEvalModel(model, tokenizer)
print("Evaluating on HumanEval ...")
results = evaluate(
lm=hf_model,
task_dict={
"human_eval": HumanEval(
n_samples=args.n_samples,
temperature=args.temperature,
pass_at_k=args.pass_at_k,
)
},
)
output_json = json.dumps(results, indent=2)
output_json_path = os.path.join(checkpoint, "human_eval.json")
with open(output_json_path, "w") as f:
print(f"Dumping evaluation results: {output_json_path}")
f.write(output_json)
|
archai/scripts/eval/hf/evaluate_human_eval.py/0
|
{
"file_path": "archai/scripts/eval/hf/evaluate_human_eval.py",
"repo_id": "archai",
"token_count": 1280
}
| 349 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import argparse
import os
import pathlib
from archai.common import utils
from archai.common.config import Config
# To upload dataset on Azure, tar the folder and use command like
# azcopy copy "H:\dataroot_cloud\ImageNet.tar" "https://archai.blob.core.windows.net/phillytools/dataroot/ImageNet.tar"
def _create_ram_disk(req_ram: int, path: str) -> bool:
os.makedirs(path, exist_ok=True)
return True
# tmp_filepath = os.path.join(path,'delete_me.temp')
# disk_speed_command = f'dd if=/dev/zero of="{tmp_filepath}" bs=4k count=100000; rm "{tmp_filepath}"'
# utils.exec_shell_command(disk_speed_command)
# avail_mem = psutil.virtual_memory().available
# print(f'RAM Disk params: req_ram={req_ram}, avail_mem={avail_mem}, path={path}')
# if avail_mem > req_ram:
# utils.exec_shell_command(f'sudo mount -t tmpfs -o size={req_ram} pt_data "{path}"')
# utils.exec_shell_command(f'sudo mount') # display mounts
# utils.exec_shell_command(disk_speed_command)
# return True
# else:
# print('RAM disk is not created because not enough memory')
# return False
def untar_dataset(conf_name: str, pt_data_dir: str, conf_dataset: Config, dataroot: str) -> None:
if "storage_name" not in conf_dataset or not conf_dataset["storage_name"]:
print(f"data config {conf_name} ignored because storage_name key was not found or not set")
return
print(f"Untaring for data config: {conf_name}")
storage_name = conf_dataset["storage_name"] # TODO: rename to file_name
tar_filepath = os.path.join(pt_data_dir, storage_name + ".tar")
if not os.path.isfile(tar_filepath):
raise RuntimeError(f"Tar file for dataset at {tar_filepath} was not found")
tar_size = pathlib.Path(tar_filepath).stat().st_size
print("tar_filepath:", tar_filepath, "tar_size:", tar_size)
local_dataroot = utils.full_path(dataroot)
print("local_dataroot:", local_dataroot)
_create_ram_disk(tar_size, local_dataroot)
# os.makedirs(local_dataroot, exist_ok=True)
command = f'tar --skip-old-files -xf "{tar_filepath}" -C "{local_dataroot}"'
utils.exec_shell_command(command)
print(f"dataset copied from {tar_filepath} to {local_dataroot} successfully")
def _is_pt() -> bool:
"""Is this code running in pt infrastrucuture"""
return os.environ.get("PT_OUTPUT_DIR", "") != ""
def _default_dataroot() -> str:
# the home folder on ITP VMs is super slow so use local temp directory instead
return "/var/tmp/dataroot" if _is_pt() else "~/dataroot"
def main():
parser = argparse.ArgumentParser(description="Archai data install")
parser.add_argument(
"--dataroot",
type=str,
default=_default_dataroot(), # TODO:should read from conf_dataset
help="path to dataroot on local drive",
)
parser.add_argument(
"--dataset",
type=str,
default="cifar10",
help="Name of the dataset for which confs/dataset/name.yaml should exist and have name of folder or tar file it resides in",
)
args, extra_args = parser.parse_known_args()
pt_data_dir = os.environ.get("PT_DATA_DIR", "")
if not pt_data_dir:
raise RuntimeError("This script needs PT_DATA_DIR environment variable with path to dataroot on cloud drive")
pt_data_dir = utils.full_path(pt_data_dir)
print("pt_data_dir:", pt_data_dir)
conf_data_filepath = f"confs/datasets/{args.dataset}.yaml"
print("conf_data_filepath:", conf_data_filepath)
conf = Config(
file_path=conf_data_filepath
) # TODO: use common.create_config so env vars and pt stuff taken care of
for dataset_key in ["dataset", "dataset_search", "dataset_eval"]:
if dataset_key in conf:
print(f"dataset_key: {dataset_key}")
conf_dataset = conf[dataset_key]
untar_dataset(dataset_key, pt_data_dir, conf_dataset, args.dataroot)
if __name__ == "__main__":
# for testing comment below line and set destination path on line 62
# os.environ['PT_DATA_DIR'] = r'H:\dataroot_cloud'
main()
|
archai/scripts/supergraph/download_datasets/pt_install.py/0
|
{
"file_path": "archai/scripts/supergraph/download_datasets/pt_install.py",
"repo_id": "archai",
"token_count": 1665
}
| 350 |
import json
import pickle
import tensorflow as tf
from archai.common import utils
dataset_file = utils.full_path("~/dataroot/nasbench_ds/nasbench_only108.tfrecord")
records = []
for serialized_row in tf.python_io.tf_record_iterator(dataset_file):
module_hash, epochs, raw_adjacency, raw_operations, raw_metrics = json.loads(serialized_row.decode("utf-8"))
# dim = int(np.sqrt(len(raw_adjacency)))
# adjacency = np.array([int(e) for e in list(raw_adjacency)], dtype=np.int8)
# adjacency = np.reshape(adjacency, (dim, dim))
# operations = raw_operations.split(',')
# metrics = base64.b64decode(raw_metrics)
records.append((module_hash, epochs, raw_adjacency, raw_operations, raw_metrics))
with open(dataset_file + ".pkl", "wb") as f:
pickle.dump(records, f)
|
archai/scripts/supergraph/nasbench101/tfrecord2pkl.py/0
|
{
"file_path": "archai/scripts/supergraph/nasbench101/tfrecord2pkl.py",
"repo_id": "archai",
"token_count": 328
}
| 351 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import argparse
from transformers import (
AutoTokenizer,
CodeGenConfig,
CodeGenForCausalLM,
TrainingArguments,
)
from archai.datasets.nlp.fast_hf_dataset_provider import (
FastDataCollatorForLanguageModeling,
FastHfDatasetProvider,
)
from archai.trainers.nlp.hf_trainer import HfTrainer
def parse_args() -> argparse.Namespace:
parser = argparse.ArgumentParser(description="Trains a CodeGen model using the Hugging Face trainer.")
parser.add_argument(
"-dn",
"--dataset_name",
type=str,
default="wikitext",
help="Name of the dataset to use (via the datasets library).",
)
parser.add_argument(
"-dcn",
"--dataset_config_name",
type=str,
default="wikitext-103-raw-v1",
help="Configuration name of the dataset to use (via the datasets library).",
)
parser.add_argument("-ls", "--logging_steps", type=int, default=10, help="Number of steps between logs.")
parser.add_argument("-es", "--eval_steps", type=int, default=100, help="Number of steps between evaluations.")
parser.add_argument("-ss", "--save_steps", type=int, default=100, help="Number of steps between checkpoints.")
parser.add_argument("-bsz", "--per_device_train_batch_size", type=int, default=64, help="Batch size per device.")
parser.add_argument("-n", "--max_steps", type=int, default=1, help="Maximum number of steps.")
args = parser.parse_args()
return args
if __name__ == "__main__":
args = parse_args()
tokenizer = AutoTokenizer.from_pretrained("Salesforce/codegen-350M-mono")
collator = FastDataCollatorForLanguageModeling(tokenizer=tokenizer, mlm=False)
dataset_provider = FastHfDatasetProvider.from_hub(
args.dataset_name,
dataset_config_name=args.dataset_config_name,
tokenizer=tokenizer,
)
train_dataset = dataset_provider.get_train_dataset(seq_len=2048)
eval_dataset = dataset_provider.get_val_dataset(seq_len=2048)
config = CodeGenConfig(
vocab_size=50304,
n_positions=2048,
n_embd=1024,
n_layer=20,
n_head=16,
rotary_dim=32,
)
model = CodeGenForCausalLM(config=config)
print(f"Total parameters: {sum(p.numel() for p in model.parameters())}")
training_args = TrainingArguments(
"hf-codegen",
evaluation_strategy="steps",
logging_steps=args.logging_steps,
eval_steps=args.eval_steps,
per_device_train_batch_size=args.per_device_train_batch_size,
learning_rate=1.8e-3,
adam_beta1=0.9,
adam_beta2=0.95,
adam_epsilon=1e-8,
weight_decay=0.1,
max_steps=args.max_steps,
save_steps=args.save_steps,
)
trainer = HfTrainer(
model=model,
args=training_args,
data_collator=collator,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
)
trainer.train()
|
archai/scripts/trainers/hf/train_codegen.py/0
|
{
"file_path": "archai/scripts/trainers/hf/train_codegen.py",
"repo_id": "archai",
"token_count": 1288
}
| 352 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import argparse
import os
import sys
import json
import statistics
from status import get_all_status_entities, update_status_entity
CONNECTION_NAME = 'MODEL_STORAGE_CONNECTION_STRING'
STDEV_THRESHOLD = 10 # redo any runs that have a stdev > 10% of the mean.
MAX_COUNT = 100
def find_unsteady_runs(threshold, reset, limit=None):
conn_string = os.getenv(CONNECTION_NAME)
if not conn_string:
print(f"Please specify your {CONNECTION_NAME} environment variable.")
sys.exit(1)
wobbly = []
# Check standard deviation and if it is more than %5 of the mean then
# reset the total_inference_avg so it re-runs.
for e in get_all_status_entities():
name = e['name']
if 'total_inference_avg' in e and 'model_date' in e:
total_inference_avg = json.loads(e['total_inference_avg'])
if len(total_inference_avg) < 2:
continue
stdev = int(statistics.stdev(total_inference_avg))
mean = int(statistics.mean(total_inference_avg))
changed = False
if 'stdev' not in e:
e['stdev'] = int((stdev * 100) / mean)
changed = True
r = int(stdev * 100 / mean)
if r >= threshold:
print(f"Found {name}, with mean {mean}, stdev {stdev} which is {r}% of the mean")
wobbly += [e]
if changed:
update_status_entity(e)
if reset:
s = sorted(wobbly, key=lambda e: e['model_date'])
s.reverse()
if limit:
print(f"Found {len(s)} wobbly jobs, but limiting reset to the newest {limit} jobs")
s = s[0:limit]
for e in s:
name = e['name']
print(f"Resetting {name} total_inference_avg={e['total_inference_avg']}...")
del e['total_inference_avg']
e['status'] = 'reset'
update_status_entity(e)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Reset jobs that have a stdev above a given percentage level ' +
'and optionally reset them so they they run again.')
parser.add_argument(
'--threshold', type=int,
help=f'What percentage stddev to use as threshold (default {STDEV_THRESHOLD}).',
default=STDEV_THRESHOLD)
parser.add_argument(
'--limit', type=int,
help=f'Maximum number of jobs to reset (default {MAX_COUNT}).',
default=MAX_COUNT)
parser.add_argument(
'--reset',
help='Reset the runs found to be unsteady so they run again.',
action="store_true")
args = parser.parse_args()
if args.threshold < 1:
print("### threshold must be greater than 1")
else:
find_unsteady_runs(args.threshold, args.reset, args.limit)
|
archai/tasks/face_segmentation/aml/azure/find_unsteady_runs.py/0
|
{
"file_path": "archai/tasks/face_segmentation/aml/azure/find_unsteady_runs.py",
"repo_id": "archai",
"token_count": 1290
}
| 353 |
#!/bin/bash
mkdir -p /home/archai/experiment
export INPUT_DATASET=/home/archai/datasets/FaceSynthetics
if [[ ! -d $INPUT_DATASET ]]; then
mkdir -p $INPUT_DATASET
pushd $INPUT_DATASET
azcopy copy https://nasfacemodels.blob.core.windows.net/downloads/099000.zip .
unzip 099000.zip
rm -rf 099000.zip
popd
fi
python -m olive.snpe.configure
pushd /home/archai/experiment
while true
do
python -u /home/archai/archai/tasks/face_segmentation/aml/azure/runner.py
if [ $? != 0 ]; then
echo "Script returned an error code!"
fi
echo "Sleeping for 30 seconds..."
sleep 30
done
|
archai/tasks/face_segmentation/aml/docker/quantizer/run.sh/0
|
{
"file_path": "archai/tasks/face_segmentation/aml/docker/quantizer/run.sh",
"repo_id": "archai",
"token_count": 269
}
| 354 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import numpy as np
def calc_pareto_frontier(points):
""" Given an array of points where the first 2 coordinates define a 2D point
return a sorted version of those points and a list of array indexes into that
sorted list that define the pareto frontier for these points """
points = np.array(points)
sorted = points[points[:, 0].argsort()]
pareto = []
pareto += [0]
p1 = sorted[0]
for i in range(1, len(sorted)):
p2 = sorted[i]
if p2[1] > p1[1]:
pareto += [i]
p1 = p2
return (sorted, pareto)
|
archai/tasks/face_segmentation/aml/util/pareto.py/0
|
{
"file_path": "archai/tasks/face_segmentation/aml/util/pareto.py",
"repo_id": "archai",
"token_count": 254
}
| 355 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import os
import sys
import itertools
from pathlib import Path
from argparse import ArgumentParser
from typing import List, Optional
from archai.common.config import Config
from archai.common.store import ArchaiStore
from archai.datasets.cv.face_synthetics import FaceSyntheticsDatasetProvider
from archai.discrete_search.api import SearchObjectives
from archai.discrete_search.algos import (
MoBananasSearch, EvolutionParetoSearch, LocalSearch,
RandomSearch, RegularizedEvolutionSearch
)
from archai.discrete_search.evaluators import TorchNumParameters, RayParallelEvaluator
from archai.discrete_search.evaluators.remote_azure_benchmark import RemoteAzureBenchmarkEvaluator
from archai.discrete_search.api.searcher import Searcher
from search_space.hgnet import HgnetSegmentationSearchSpace
from training.partial_training_evaluator import PartialTrainingValIOU
from aml.training.aml_training_evaluator import AmlPartialTrainingEvaluator
from aml.util.setup import configure_store
from aml.training.onnx_latency import AvgOnnxLatencyEvaluator
AVAILABLE_ALGOS = {
'mo_bananas': MoBananasSearch,
'evolution_pareto': EvolutionParetoSearch,
'local_search': LocalSearch,
'random_search': RandomSearch,
'regularized_evolution': RegularizedEvolutionSearch
}
AVAILABLE_SEARCH_SPACES = {
'hgnet': HgnetSegmentationSearchSpace,
}
confs_path = Path(__file__).absolute().parent / 'confs'
def filter_extra_args(extra_args: List[str], prefix: str) -> List[str]:
return list(itertools.chain([
[arg, val]
for arg, val in zip(extra_args[::2], extra_args[1::2])
if arg.startswith(prefix)
]))
def main():
parser = ArgumentParser()
parser.add_argument('--dataset_dir', type=Path, help='Face Synthetics dataset directory.')
parser.add_argument('--output_dir', type=Path, help='Output directory.', default='output')
parser.add_argument('--search_config', type=Path, help='Search config file.', default=confs_path / 'cpu_search.yaml')
parser.add_argument('--serial_training', help='Search config file.', action='store_true')
parser.add_argument('--gpus_per_job', type=float, help='Number of GPUs used per job (if `serial_training` flag is disabled)',
default=0.5)
parser.add_argument('--partial_tr_epochs', type=int, help='Number of epochs to run partial training', default=1)
parser.add_argument('--seed', type=int, help='Random seed', default=42)
parser.add_argument('--timeout', type=int, help='Timeout for partial training (in seconds)(default 10800)', default=10800)
args, extra_args = parser.parse_known_args()
timeout_seconds = args.timeout
# Filters extra args that have the prefix `search_space`
search_extra_args = filter_extra_args(extra_args, 'search.')
config = Config(str(args.search_config), search_extra_args, resolve_env_vars=True)
search_config = config['search']
# Search space
ss_config = search_config['search_space']
search_space = AVAILABLE_SEARCH_SPACES[ss_config['name']](
seed=args.seed,
**ss_config.get('params', {}),
)
input_shape = (1, search_space.in_channels, *search_space.img_size[::-1])
partial_training_output = args.output_dir / 'partial_training_logs'
os.makedirs(partial_training_output, exist_ok=True)
# Search objectives
so = SearchObjectives()
target_config = search_config.get('target', {})
target_name = target_config.pop('name', 'cpu')
assert target_name in ['cpu', 'snp']
max_latency = 0.3 if target_name == 'cpu' else 0.185
algo_config = search_config['algorithm']
algo_params = algo_config.get('params', {})
max_parameters = float(algo_params.pop('max_parameters', 5e7))
# Adds a constraint on number of parameters so we don't sample models that are too large
so.add_constraint(
'Model Size (b)',
TorchNumParameters(),
constraint=(1e6, max_parameters)
)
aml_training = False
store = None
if 'aml' in config:
aml_config = config['aml']
experiment_name = aml_config.get('experiment_name', 'facesynthetics')
store: ArchaiStore = configure_store(aml_config)
aml_training = 'training_cluster' in aml_config
# Adds a constrained objective on model latency so we don't pick models that are too slow.
onnx_evaluator = AvgOnnxLatencyEvaluator(
input_shape=input_shape,
export_kwargs={'opset_version': 11},
store=store)
so.add_objective(
'CPU ONNX Latency (s)',
onnx_evaluator,
higher_is_better=False,
compute_intensive=False,
constraint=[0, max_latency]
)
if target_name == 'snp':
# Gets connection string from env variable
evaluator = RemoteAzureBenchmarkEvaluator(
input_shape=input_shape,
store=store,
experiment_name=experiment_name,
onnx_export_kwargs={'opset_version': 11},
**target_config
)
so.add_objective(
'SNP Quantized Latency (s)',
evaluator,
higher_is_better=False,
compute_intensive=True
)
if aml_training:
# do the partial training on an AML gpu cluster
partial_tr_obj = AmlPartialTrainingEvaluator(
config,
tr_epochs=int(args.partial_tr_epochs),
timeout_seconds=timeout_seconds,
local_output=partial_training_output
)
else:
if args.dataset_dir is None:
raise ValueError('--dataset_dir must be specified if target is not aml')
# Dataset provider
dataset_provider = FaceSyntheticsDatasetProvider(args.dataset_dir)
partial_tr_obj = PartialTrainingValIOU(
dataset_provider,
tr_epochs=args.partial_tr_epochs,
output_dir=partial_training_output
)
if not args.serial_training:
partial_tr_obj = RayParallelEvaluator(
partial_tr_obj, num_gpus=args.gpus_per_job,
max_calls=1
)
so.add_objective(
'Partial Training Val. IOU',
partial_tr_obj,
higher_is_better=True,
compute_intensive=True
)
# Search algorithm
algo : Searcher = AVAILABLE_ALGOS[algo_config['name']](
search_space, so,
output_dir=args.output_dir,
seed=args.seed,
**algo_params,
)
algo.subscribe_start_iteration(lambda x: onnx_evaluator.on_start_iteration(x))
algo.search()
if __name__ == '__main__':
main()
|
archai/tasks/face_segmentation/search.py/0
|
{
"file_path": "archai/tasks/face_segmentation/search.py",
"repo_id": "archai",
"token_count": 2679
}
| 356 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import io
from typing import Dict, List, Optional, Tuple, Union
import os
os.environ["OMP_NUM_THREADS"] = "1"
import statistics
from time import perf_counter
import onnxruntime as rt
import torch
from archai.discrete_search.api.archai_model import ArchaiModel
class AvgOnnxLatency:
higher_is_better: bool = False
def __init__(
self,
input_shape: Union[Tuple, List[Tuple]],
num_trials: int = 15,
num_input: int = 15,
input_dtype: str = "torch.FloatTensor",
rand_range: Tuple[float, float] = (0.0, 1.0),
export_kwargs: Optional[Dict] = None,
inf_session_kwargs: Optional[Dict] = None,
):
"""Measure the average ONNX Latency (in millseconds) of a model
Args:
input_shape (Union[Tuple, List[Tuple]]): Model Input shape or list of model input shapes.
num_trials (int, optional): Number of trials. Defaults to 15.
num_input (int, optional): Number of input per trial. Defaults to 15.
input_dtype (str, optional): Data type of input samples.
rand_range (Tuple[float, float], optional): The min and max range of input samples.
export_kwargs (Optional[Dict], optional): Optional dictionary of key-value args passed to
`torch.onnx.export`. Defaults to None.
inf_session_kwargs (Optional[Dict], optional): Optional dictionary of key-value args
passed to `onnxruntime.InferenceSession()`. Defaults to None.
"""
input_shapes = [input_shape] if isinstance(input_shape, tuple) else input_shape
rand_min, rand_max = rand_range
self.sample_input = tuple(
[
((rand_max - rand_min) * torch.rand(*input_shape) + rand_min).type(input_dtype)
for input_shape in input_shapes
]
)
self.num_trials = num_trials
self.num_input_per_trial = num_input
self.export_kwargs = export_kwargs or dict()
self.inf_session_kwargs = inf_session_kwargs or dict()
def evaluate(self, model: ArchaiModel) -> float:
"""Evaluate the model and return the average latency (in milliseconds)"""
"""Args: model (ArchaiModel): Model to evaluate"""
"""Returns: float: Average latency (in milliseconds)"""
model.arch.to("cpu")
exported_model_buffer = io.BytesIO()
torch.onnx.export(
model.arch,
self.sample_input,
exported_model_buffer,
input_names=[f"input_{i}" for i in range(len(self.sample_input))],
opset_version=13,
**self.export_kwargs,
)
exported_model_buffer.seek(0)
opts = rt.SessionOptions()
opts.inter_op_num_threads = 1
opts.intra_op_num_threads = 1
onnx_session = rt.InferenceSession(exported_model_buffer.read(), sess_options=opts, **self.inf_session_kwargs)
sample_input = {f"input_{i}": inp.numpy() for i, inp in enumerate(self.sample_input)}
inf_time_avg = self.get_time_elapsed(
onnx_session, sample_input, num_input=self.num_input_per_trial, num_measures=self.num_trials
)
return inf_time_avg
def get_time_elapsed(self, onnx_session, sample_input, num_input: int = 15, num_measures: int = 15) -> float:
"""Measure the average time elapsed (in milliseconds) for a given model and input for anumber of times
Args:
onnx_session (onnxruntime.InferenceSession): ONNX Inference Session
sample_input (Dict[str, np.ndarray]): Sample input to the model
num_input (int, optional): Number of input per trial. Defaults to 15.
num_measures (int, optional): Number of measures. Defaults to 15.
Returns:
float: Average time elapsed (in milliseconds)"""
def meausre_func():
"""Measure the time elapsed (in milliseconds) for a given model and input, once
Returns: float: Time elapsed (in milliseconds)"""
t0 = perf_counter()
for _ in range(num_input):
onnx_session.run(None, input_feed=sample_input)[0]
t1 = perf_counter()
time_measured = 1e3 * (t1 - t0) / num_input
return time_measured
return statistics.mean([meausre_func() for _ in range(num_measures)])
|
archai/tasks/facial_landmark_detection/latency.py/0
|
{
"file_path": "archai/tasks/facial_landmark_detection/latency.py",
"repo_id": "archai",
"token_count": 1910
}
| 357 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import argparse
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
def parse_args() -> argparse.Namespace:
parser = argparse.ArgumentParser(description="Generates new tokens with a pre-trained model.")
parser.add_argument("pre_trained_model_path", type=str, help="Path to the pre-trained model path/file.")
parser.add_argument("prompt", type=str, help="Prompt to serve as the generation's context.")
parser.add_argument(
"-sf", "--pre_trained_model_subfolder", type=str, default=None, help="Subfolder to the pre-trained model path."
)
args = parser.parse_args()
return args
if __name__ == "__main__":
args = parse_args()
device = "cpu"
if torch.cuda.is_available():
device = "cuda"
tokenizer = AutoTokenizer.from_pretrained(args.pre_trained_model_path)
model = AutoModelForCausalLM.from_pretrained(
args.pre_trained_model_path, subfolder=args.pre_trained_model_subfolder
).to(device)
model.config.use_cache = True
inputs = tokenizer(args.prompt, return_tensors="pt").to(device)
outputs = model.generate(
inputs["input_ids"],
attention_mask=inputs["attention_mask"],
pad_token_id=model.config.eos_token_id,
do_sample=True,
temperature=0.8,
top_p=0.95,
max_new_tokens=128,
)
print(f"Generated: \n{tokenizer.decode(outputs[0], skip_special_tokens=True)}")
|
archai/tasks/text_generation/generate_text.py/0
|
{
"file_path": "archai/tasks/text_generation/generate_text.py",
"repo_id": "archai",
"token_count": 575
}
| 358 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import torch
import shutil
from archai.datasets.cv.mnist_dataset_provider import MnistDatasetProvider
def test_mnist_dataset_provider():
# make sure tests can run in parallel and not clobber each other's dataroot.
unique_data_root = 'test_mnist_dataset_provider_dataroot'
dataset_provider = MnistDatasetProvider(root=unique_data_root)
# Assert that we can individually load training, validation and test datasets
train_dataset = dataset_provider.get_train_dataset()
assert len(train_dataset) == 60000
assert isinstance(train_dataset[0][0], torch.Tensor)
assert isinstance(train_dataset[0][1], int)
val_dataset = dataset_provider.get_val_dataset()
assert len(val_dataset) == 10000
assert isinstance(val_dataset[0][0], torch.Tensor)
assert isinstance(val_dataset[0][1], int)
test_dataset = dataset_provider.get_test_dataset()
assert len(test_dataset) == 10000
assert isinstance(test_dataset[0][0], torch.Tensor)
assert isinstance(test_dataset[0][1], int)
shutil.rmtree(unique_data_root)
|
archai/tests/datasets/cv/test_mnist_dataset_provider.py/0
|
{
"file_path": "archai/tests/datasets/cv/test_mnist_dataset_provider.py",
"repo_id": "archai",
"token_count": 422
}
| 359 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import os
import pytest
from archai.discrete_search.algos.random_search import RandomSearch
@pytest.fixture(scope="session")
def output_dir(tmp_path_factory):
return tmp_path_factory.mktemp("out_evo")
def test_random_search(output_dir, search_space, search_objectives):
algo = RandomSearch(search_space, search_objectives, output_dir, num_iters=2, samples_per_iter=5)
search_results = algo.search()
assert len(os.listdir(output_dir)) > 0
df = search_results.get_search_state_df()
assert all(0 <= x <= 0.4 for x in df["Random1"].tolist())
all_models = [m for iter_r in search_results.results for m in iter_r["models"]]
# Checks if all registered models satisfy constraints
_, valid_models = search_objectives.validate_constraints(all_models)
assert len(valid_models) == len(all_models)
|
archai/tests/discrete_search/algos/test_random_search.py/0
|
{
"file_path": "archai/tests/discrete_search/algos/test_random_search.py",
"repo_id": "archai",
"token_count": 314
}
| 360 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.