text
stringlengths 5
22M
| id
stringlengths 12
177
| metadata
dict | __index_level_0__
int64 0
1.37k
|
---|---|---|---|
DATASET_PATH=path_to_dataset
MODEL_PATH=path_to_bart_large
python -m bpe_encoder \
--encoder-json $MODEL_PATH/encoder.json \
--vocab-bpe $MODEL_PATH/vocab.bpe \
--inputs $DATASET_PATH/train.src \
--outputs $DATASET_PATH/train.bpe.src \
--workers 20 \
--keep-empty
python -m bpe_encoder \
--encoder-json $MODEL_PATH/encoder.json \
--vocab-bpe $MODEL_PATH/vocab.bpe \
--inputs $DATASET_PATH/train.tgt \
--outputs $DATASET_PATH/train.bpe.tgt \
--workers 20 \
--keep-empty
python -m bpe_encoder \
--encoder-json $MODEL_PATH/encoder.json \
--vocab-bpe $MODEL_PATH/vocab.bpe \
--inputs $DATASET_PATH/dev.src \
--outputs $DATASET_PATH/dev.bpe.src \
--workers 20 \
--keep-empty
python -m bpe_encoder \
--encoder-json $MODEL_PATH/encoder.json \
--vocab-bpe $MODEL_PATH/vocab.bpe \
--inputs $DATASET_PATH/dev.tgt \
--outputs $DATASET_PATH/dev.bpe.tgt \
--workers 20 \
--keep-empty
python -m bpe_encoder \
--encoder-json $MODEL_PATH/encoder.json \
--vocab-bpe $MODEL_PATH/vocab.bpe \
--inputs $DATASET_PATH/test.src \
--outputs $DATASET_PATH/test.bpe.src \
--workers 20 \
--keep-empty
python -m bpe_encoder \
--encoder-json $MODEL_PATH/encoder.json \
--vocab-bpe $MODEL_PATH/vocab.bpe \
--inputs $DATASET_PATH/test.tgt \
--outputs $DATASET_PATH/test.bpe.tgt \
--workers 20 \
--keep-empty
fairseq-preprocess --source-lang "src" --target-lang "tgt" \
--trainpref $DATASET_PATH/train.bpe \
--validpref $DATASET_PATH/dev.bpe \
--testpref $DATASET_PATH/test.bpe \
--destdir $DATASET_PATH/bin_large \
--workers 20 \
--srcdict $MODEL_PATH/dict.txt \
--tgtdict $MODEL_PATH/dict.txt
|
ContextualSP/lemon/lemon/preprocess_finetune.bat/0
|
{
"file_path": "ContextualSP/lemon/lemon/preprocess_finetune.bat",
"repo_id": "ContextualSP",
"token_count": 1124
}
| 241 |
CORRECT_OPTION_TAG = "correct_option"
INCORRECT_OPTION_TAG = "incorrect_option"
CORRECT_OPTION_GOLD_TAG = "gold"
CORRECT_OPTION_TAG_LIST = [CORRECT_OPTION_TAG, CORRECT_OPTION_GOLD_TAG]
ALL_OPTION_TAG_LIST = [
CORRECT_OPTION_TAG,
CORRECT_OPTION_GOLD_TAG,
INCORRECT_OPTION_TAG,
]
|
ContextualSP/lemon/propara_evaluator/aristo-leaderboard/eqasc/code/allennlp_reasoning_explainqa/common/constants.py/0
|
{
"file_path": "ContextualSP/lemon/propara_evaluator/aristo-leaderboard/eqasc/code/allennlp_reasoning_explainqa/common/constants.py",
"repo_id": "ContextualSP",
"token_count": 129
}
| 242 |
from errors.errors import corrupted_action_file, corrupted_sentences_file
|
ContextualSP/lemon/propara_evaluator/aristo-leaderboard/propara/evaluator/errors/__init__.py/0
|
{
"file_path": "ContextualSP/lemon/propara_evaluator/aristo-leaderboard/propara/evaluator/errors/__init__.py",
"repo_id": "ContextualSP",
"token_count": 18
}
| 243 |
from typing import List, NamedTuple, Callable, TypeVar, Optional
from evaluation.metric import Metric
from text import terms
from process import ProcessSummary, Conversion, Move, Input, Output
# Question types used in functions here
QType = TypeVar("QType", Input, Output, Conversion, Move)
class QuestionScores(NamedTuple):
inputs: Metric
outputs: Metric
conversions: Metric
moves: Metric
@classmethod
def from_summaries(cls, answer: ProcessSummary, prediction: ProcessSummary):
return cls(
inputs=_score_inputs(answer.inputs, prediction.inputs),
outputs=_score_outputs(answer.outputs, prediction.outputs),
conversions=_score_conversions(answer.conversions, prediction.conversions),
moves=_score_moves(answer.moves, prediction.moves),
)
def _edgecases(answers: List[QType], predictions: List[QType]) -> Optional[Metric]:
if len(answers) == 0 and len(predictions) == 0:
return Metric(precision=1.0, recall=1.0)
if len(answers) == 0:
return Metric(precision=0.0, recall=1.0)
if len(predictions) == 0:
return Metric(precision=1.0, recall=0.0)
return None
def _score_inputs(answers: List[Input], predictions: List[Input]) -> Metric:
m = _edgecases(answers, predictions)
if m:
return m
return _score(answers, predictions, _score_input_pair)
def _score_input_pair(answer: Input, prediction: Input) -> float:
return _compare_participants(answer.participants, prediction.participants)
def _score_outputs(answers: List[Output], predictions: List[Output]) -> Metric:
m = _edgecases(answers, predictions)
if m:
return m
return _score(answers, predictions, _score_output_pair)
def _score_output_pair(answer: Output, prediction: Output) -> float:
return _compare_participants(answer.participants, prediction.participants)
def _score_conversions(answers: List[Conversion], predictions: List[Conversion]) -> Metric:
m = _edgecases(answers, predictions)
if m:
return m
return _score(answers, predictions, _score_conversion_pair)
def _score_conversion_pair(answer: Conversion, prediction: Conversion) -> float:
if answer.step_id != prediction.step_id:
return 0.0
return sum((_compare_locations(answer.locations, prediction.locations),
_compare_participants(answer.destroyed, prediction.destroyed),
_compare_participants(answer.created, prediction.created))) / 3
def _score_moves(answers: List[Move], predictions: List[Move]) -> Metric:
m = _edgecases(answers, predictions)
if m:
return m
return _score(answers, predictions, _score_move_pair)
def _score_move_pair(answer: Move, prediction: Move) -> float:
if answer.step_id != prediction.step_id:
return 0.0
return sum((_compare_participants(answer.participants, prediction.participants),
_compare_locations(answer.location_before, prediction.location_before),
_compare_locations(answer.location_after, prediction.location_after))) / 3
def _compare_participants(answer: str, prediction: str) -> float:
# Trivial match
if answer == prediction:
return 1.0
prediction_terms = terms.extract_termsets(prediction)
answer_terms = terms.extract_termsets(answer)
# calculate Jaccard similarity score
numerator = terms.terms_overlap(prediction_terms, answer_terms)
denominator = len(prediction_terms) + len(answer_terms) - numerator
return numerator / denominator
def _compare_locations(answer: str, prediction: str) -> float:
if answer == prediction:
return 1.0
prediction_terms = terms.extract_termsets_with_normalization(prediction)
answer_terms = terms.extract_termsets_with_normalization(answer)
# calculate Jaccard similarity score
numerator = terms.terms_overlap(prediction_terms, answer_terms)
denominator = len(prediction_terms) + len(answer_terms) - numerator
return numerator / denominator
# Score a pair of QType answers and predictions, such that:
#
# precision = precision_numerator / len(predictions)
# recall = recall_numerator / len(answers)
#
# The calculation of precision and recall numerators depends on the number of answers and predictions. In these
# examples, a1 and a2 are answers and p1, p2 and p3 are predictions. Combinations (like a2p3) indicate a score for the
# answer-prediction pair (like a2 and p3).
#
# Example 1: answers = [a1,a2] predictions = [p1]
# precision_numerator = max(a1p1, a2p1)
# recall_numerator = max(a1p1) + max(a2p1)
#
# Example 2: answers = [a1,a2] predictions = [p1,p2]
# precision_numerator = max(a1p1, a2p1) + max(a1p2, a2p2)
# recall_numerator = max(a1p1, a2p1) + max(a1p2, a2p2)
#
# Example 3: answers = [a1,a2] predictions = [p1,p2,p3]
# precision_numerator = max(a1p1, a2p1) + max(a1p2, a2p2) + max(a1p3, a2p3)
# recall_numerator = max(a1p1, a1p2, a1p3) + max(a2p1, a2p2, a2p3)
def _score(answers: List[QType], predictions: List[QType], scoring_function: Callable[[QType, QType], float]) -> Metric:
precision_numerator = 0.0
for p in predictions:
max_score = 0.0
for a in answers:
max_score = max(max_score, scoring_function(a, p))
precision_numerator += max_score
# only compute recall numerator when number of predictions doesn't match number of expected answers
recall_numerator = precision_numerator
if len(predictions) != len(answers):
recall_numerator = 0.0
for a in answers:
max_score = 0.0
for p in predictions:
max_score = max(max_score, scoring_function(a, p))
recall_numerator += max_score
if precision_numerator == 0.0:
precision = 0.0
else:
precision = precision_numerator / (1.0 * len(predictions))
if recall_numerator == 0.0:
recall = 0.0
else:
recall = recall_numerator / (1.0 * len(answers))
return Metric(precision=precision, recall=recall)
|
ContextualSP/lemon/propara_evaluator/aristo-leaderboard/propara/evaluator/scoring/question.py/0
|
{
"file_path": "ContextualSP/lemon/propara_evaluator/aristo-leaderboard/propara/evaluator/scoring/question.py",
"repo_id": "ContextualSP",
"token_count": 2335
}
| 244 |
The file [answers.jsonl](answers.jsonl) are the dev answers against which development predictions can be evaluated.
The file [dummy-predictions.csv](dummy-predictions.csv) is an example prediction file that can be evaluated against the answers in [answers.jsonl](answers.jsonl).
This is a prediction that every pair of sentences is predicted to entail, and scores about 50% correct.
|
ContextualSP/lemon/propara_evaluator/aristo-leaderboard/scitail/data/dev/README.md/0
|
{
"file_path": "ContextualSP/lemon/propara_evaluator/aristo-leaderboard/scitail/data/dev/README.md",
"repo_id": "ContextualSP",
"token_count": 101
}
| 245 |
#!/bin/bash
GPU_NUM=16
python -m torch.distributed.launch --nproc_per_node=${GPU_NUM} nli_es.py
#python nli_es.py
|
ContextualSP/logigan/pre-training/run_nli_es.sh/0
|
{
"file_path": "ContextualSP/logigan/pre-training/run_nli_es.sh",
"repo_id": "ContextualSP",
"token_count": 50
}
| 246 |
# coding=utf-8
"""Preprocesses a specific split of the CFQ dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import json
import os
import string
from typing import Any, Dict, List, Text, Tuple
from absl import app
from absl import flags
from absl import logging
FLAGS = flags.FLAGS
flags.DEFINE_string('dataset_path', None, 'Path to the JSON file containing '
'the dataset.')
flags.DEFINE_string('split_path', None, 'Path to the JSON file containing '
'split information.')
flags.DEFINE_string('save_path', None, 'Path to the directory where to '
'save the files to.')
flags.mark_flag_as_required('save_path')
flags.register_validator('dataset_path', os.path.exists, 'Dataset not found.')
flags.register_validator('split_path', os.path.exists, 'Split not found.')
Dataset = Dict[Text, List[Tuple[Text, Text]]]
def load_json(path):
logging.info(f'Reading json from {path} into memory...')
with open(path, 'r', encoding='utf-8') as f:
data = json.load(f)
logging.info(f'Successfully loaded json data from {path} into memory.')
return data
def tokenize_punctuation(text):
text = map(lambda c: f' {c} ' if c in string.punctuation else c, text)
return ' '.join(''.join(text).split())
def preprocess_sparql(query):
"""Do various preprocessing on the SPARQL query."""
# Tokenize braces.
query = query.replace('count(*)', 'count ( * )')
tokens = []
for token in query.split():
# Replace 'ns:' prefixes.
if token.startswith('ns:'):
token = token[3:]
# Replace mid prefixes.
if token.startswith('m.'):
token = 'm_' + token[2:]
tokens.append(token)
return ' '.join(tokens).replace('\\n', ' ')
def get_encode_decode_pair(sample):
# Apply some simple preprocessing on the tokenizaton, which improves the
# performance of the models significantly.
encode_text = tokenize_punctuation(sample['questionPatternModEntities'])
decode_text = preprocess_sparql(sample['sparqlPatternModEntities'])
skeleton_text = preprocess_sparql(sample['sparqlPattern'])
return (encode_text, decode_text, skeleton_text)
def get_dataset(samples, split):
"""Creates a dataset by taking @split from @samples."""
logging.info('Retrieving splits...')
split_names = ['train', 'dev', 'test']
idx_names = [f'{s}Idxs' for s in split_names]
dataset = collections.defaultdict(list)
if not set(idx_names) <= split.keys():
logging.fatal(f'Invalid split: JSON should contain fields {idx_names}.')
return dataset
for split_name, idx_name in zip(split_names, idx_names):
logging.info(
f' Retrieving {split_name} ({len(split[idx_name])} instances)')
for idx in split[idx_name]:
dataset[split_name].append(get_encode_decode_pair(samples[idx]))
size_str = ', '.join(f'{s}={len(dataset[s])}' for s in split_names)
logging.info(f'Finished retrieving splits. Size: {size_str}')
return dataset
def write_dataset(dataset, save_path):
"""Saves the given dataset into the given location."""
if not dataset:
logging.info('No dataset to write.')
return
logging.info(f'Writing dataset to {save_path}')
for split_name, list_of_input_output_pairs in dataset.items():
folder_name = os.path.join(save_path, split_name)
if not os.path.exists(folder_name):
os.makedirs(folder_name)
encode_name = os.path.join(folder_name, f'{split_name}_encode.txt')
decode_name = os.path.join(folder_name, f'{split_name}_decode.txt')
skeleton_name = os.path.join(folder_name, f'{split_name}_skeleton.txt')
with open(
encode_name, 'w', encoding='utf8') as encode_f, open(
decode_name, 'w', encoding='utf8') as decode_f, open(
skeleton_name, 'w', encoding='utf8') as skeleton_f:
for pair in list_of_input_output_pairs:
encode_f.write(pair[0] + '\n')
decode_f.write(pair[1] + '\n')
skeleton_f.write(pair[2] + '\n')
logging.info(f'Dataset written to {save_path}')
def write_token_vocab(words, save_path):
""""Writes token vocabulary from @words to @save_path."""
# Sort tokens by frequency and then lexically to break ties.
words_with_counts = words.most_common()
words_with_counts.sort(key=lambda x: (x[1], x[0]), reverse=True)
vocab_path = os.path.join(save_path, 'vocab.cfq.tokens')
with open(vocab_path, 'w') as f:
# Tensor2tensor needs these additional tokens.
f.write('<pad>\n<EOS>\n<OOV>\n')
for word, _ in words_with_counts:
f.write(f'{word}\n')
logging.info(f'Token vocabulary written to {vocab_path} ({len(words)} '
'distinct tokens).')
def get_lines(path, filename):
with open(os.path.join(path, 'train', filename), 'r') as f:
lines = [l.strip() for l in f.readlines() if l.strip()]
return lines
def get_token_vocab(path):
words = collections.Counter()
lines = get_lines(path, 'train_encode.txt')
lines.extend(get_lines(path, 'train_decode.txt'))
for line in lines:
words.update(line.split(' '))
return words
def main(argv):
if len(argv) > 1:
raise app.UsageError('Too many command-line arguments.')
write_dataset(
get_dataset(load_json(FLAGS.dataset_path), load_json(FLAGS.split_path)),
FLAGS.save_path)
write_token_vocab(get_token_vocab(FLAGS.save_path), FLAGS.save_path)
if __name__ == '__main__':
app.run(main)
|
ContextualSP/poset_decoding/preprocess_cfq.py/0
|
{
"file_path": "ContextualSP/poset_decoding/preprocess_cfq.py",
"repo_id": "ContextualSP",
"token_count": 2084
}
| 247 |
# Watchers and contributors to MatchZoo repo directories/packages/files
# Please see documentation of use of CODEOWNERS file at
# https://help.github.com/articles/about-codeowners/ and
# https://github.com/blog/2392-introducing-code-owners
#
# Anybody can add themselves or a team as additional watcher or contributor
# to get notified about changes in a specific package.
# See https://help.github.com/articles/about-teams how to setup teams.
# Define individuals or teams that are responsible for code in a repository.
# global owner.
* @faneshion
* @Chriskuei
# third-party & project configuration
.codecov.yml @Chriskuei
.coveragerc @Chriskuei
.flake8 @Chriskuei
.gitignore @Chriskuei
.travis.yml @Chriskuei
CONTRIBUTING.MD @Chriskuei
Makefile @Chriskuei
pytest.ini @Chriskuei
README.md @faneshion @Chriskuei
readthedocs.yml @wqh17101
requirements.txt @Chriskuei @faneshion
setup.py @Chriskuei @faneshion
# artworks
/artworks/ @faneshion
# tutorials
/tutorials/ @Chriskuei @faneshion @caiyinqiong
# docs
/docs/ @wqh17101
# tests
/tests/ @Chriskuei @faneshion
# matchzoo
/matchzoo/auto/ @Chriskuei
/matchzoo/data_pack/ @caiyinqiong @faneshion
/matchzoo/dataloader/ @caiyinqiong @Chriskuei
/matchzoo/datasets/ @caiyinqiong
/matchzoo/embedding/ @caiyinqiong
/matchzoo/engine/ @faneshion @Chriskuei
/matchzoo/losses/ @faneshion @Chriskuei
/matchzoo/metrics/ @faneshion @Chriskuei
/matchzoo/models/ @Chriskuei @faneshion @caiyinqiong
/matchzoo/modules/ @Chriskuei @caiyinqiong
/matchzoo/preprocessors/ @caiyinqiong @faneshion
/matchzoo/tasks/ @Chriskuei
/matchzoo/trainers/ @Chriskuei
/matchzoo/utils/ @Chriskuei @caiyinqiong
/matchzoo/* @faneshion @Chriskuei
|
ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/CODEOWNERS/0
|
{
"file_path": "ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/CODEOWNERS",
"repo_id": "ContextualSP",
"token_count": 986
}
| 248 |
from .preparer import prepare
from .preparer import Preparer
from .tuner import Tuner
from .tuner import tune
from . import tuner
|
ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/matchzoo/auto/__init__.py/0
|
{
"file_path": "ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/matchzoo/auto/__init__.py",
"repo_id": "ContextualSP",
"token_count": 38
}
| 249 |
"""Basic data loader."""
import typing
import math
import numpy as np
import torch
from torch.utils import data
from matchzoo.dataloader.dataset import Dataset
from matchzoo.engine.base_callback import BaseCallback
class DataLoader(object):
"""
DataLoader that loads batches of data from a Dataset.
:param dataset: The Dataset object to load data from.
:param device: The desired device of returned tensor. Default: if None,
use the current device. If `torch.device` or int, use device specified
by user. If list, the first item will be used.
:param stage: One of "train", "dev", and "test". (default: "train")
:param callback: BaseCallback. See
`matchzoo.engine.base_callback.BaseCallback` for more details.
:param pin_momory: If set to `True`, tensors will be copied into
pinned memory. (default: `False`)
:param timeout: The timeout value for collecting a batch from workers. (
default: 0)
:param num_workers: The number of subprocesses to use for data loading. 0
means that the data will be loaded in the main process. (default: 0)
:param worker_init_fn: If not ``None``, this will be called on each
worker subprocess with the worker id (an int in [0, num_workers - 1])
as input, after seeding and before data loading. (default: None)
Examples:
>>> import matchzoo as mz
>>> data_pack = mz.datasets.toy.load_data(stage='train')
>>> preprocessor = mz.preprocessors.BasicPreprocessor()
>>> data_processed = preprocessor.fit_transform(data_pack)
>>> dataset = mz.dataloader.Dataset(
... data_processed, mode='point', batch_size=32)
>>> padding_callback = mz.dataloader.callbacks.BasicPadding()
>>> dataloader = mz.dataloader.DataLoader(
... dataset, stage='train', callback=padding_callback)
>>> len(dataloader)
4
"""
def __init__(
self,
dataset: Dataset,
device: typing.Union[torch.device, int, list, None] = None,
stage='train',
callback: BaseCallback = None,
pin_memory: bool = False,
timeout: int = 0,
num_workers: int = 0,
worker_init_fn=None,
):
"""Init."""
if stage not in ('train', 'dev', 'test', 'debug'):
raise ValueError(f"{stage} is not a valid stage type."
f"Must be one of `train`, `dev`, `test`.")
if isinstance(device, list) and len(device):
device = device[0]
elif not (isinstance(device, torch.device) or isinstance(device, int)):
device = torch.device(
"cuda" if torch.cuda.is_available() else "cpu")
self._dataset = dataset
self._pin_momory = pin_memory
self._timeout = timeout
self._num_workers = num_workers
self._worker_init_fn = worker_init_fn
self._device = device
self._stage = stage
self._callback = callback
self._dataloader = data.DataLoader(
self._dataset,
batch_size=None,
shuffle=False,
collate_fn=lambda x: x,
batch_sampler=None,
num_workers=self._num_workers,
pin_memory=self._pin_momory,
timeout=self._timeout,
worker_init_fn=self._worker_init_fn,
)
def __len__(self) -> int:
"""Get the total number of batches."""
return len(self._dataset)
@property
def id_left(self) -> np.ndarray:
"""`id_left` getter."""
x, _ = self._dataset[:]
return x['id_left']
@property
def label(self) -> np.ndarray:
"""`label` getter."""
_, y = self._dataset[:]
return y.squeeze() if y is not None else None
def __iter__(self) -> typing.Tuple[dict, torch.tensor]:
"""Iteration."""
for batch_data in self._dataloader:
x, y = batch_data
self._handle_callbacks_on_batch_unpacked(x, y)
batch_x = {}
for key, value in x.items():
if key == 'id_left' or key == 'id_right':
continue
# print("key:", key)
# print("value:", value)
batch_x[key] = torch.tensor(
value, device=self._device)
if self._stage == 'test':
yield batch_x, None
else:
if y.dtype == 'int': # task='classification'
batch_y = torch.tensor(
y.squeeze(axis=-1), dtype=torch.long, device=self._device)
else: # task='ranking'
batch_y = torch.tensor(
y, dtype=torch.float, device=self._device)
yield batch_x, batch_y
def _handle_callbacks_on_batch_unpacked(self, x, y):
if self._callback is not None:
self._callback.on_batch_unpacked(x, y)
|
ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/matchzoo/dataloader/dataloader.py/0
|
{
"file_path": "ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/matchzoo/dataloader/dataloader.py",
"repo_id": "ContextualSP",
"token_count": 2270
}
| 250 |
"""Quora Question Pairs data loader."""
import typing
from pathlib import Path
import pandas as pd
import matchzoo
from matchzoo.engine.base_task import BaseTask
_url = "https://firebasestorage.googleapis.com/v0/b/mtl-sentence" \
"-representations.appspot.com/o/data%2FQQP.zip?alt=media&" \
"token=700c6acf-160d-4d89-81d1-de4191d02cb5"
def load_data(
stage: str = 'train',
task: typing.Union[str, BaseTask] = 'classification',
return_classes: bool = False,
) -> typing.Union[matchzoo.DataPack, tuple]:
"""
Load QuoraQP data.
:param path: `None` for download from quora, specific path for
downloaded data.
:param stage: One of `train`, `dev`, and `test`.
:param task: Could be one of `ranking`, `classification` or a
:class:`matchzoo.engine.BaseTask` instance.
:param return_classes: Whether return classes for classification task.
:return: A DataPack if `ranking`, a tuple of (DataPack, classes) if
`classification`.
"""
if stage not in ('train', 'dev', 'test'):
raise ValueError(f"{stage} is not a valid stage."
f"Must be one of `train`, `dev`, and `test`.")
data_root = _download_data()
file_path = data_root.joinpath(f"{stage}.tsv")
data_pack = _read_data(file_path, stage, task)
if task == 'ranking' or isinstance(task, matchzoo.tasks.Ranking):
return data_pack
elif task == 'classification' or isinstance(
task, matchzoo.tasks.Classification):
if return_classes:
return data_pack, [False, True]
else:
return data_pack
else:
raise ValueError(f"{task} is not a valid task.")
def _download_data():
ref_path = matchzoo.utils.get_file(
'quora_qp', _url, extract=True,
cache_dir=matchzoo.USER_DATA_DIR,
cache_subdir='quora_qp'
)
return Path(ref_path).parent.joinpath('QQP')
def _read_data(path, stage, task):
data = pd.read_csv(path, sep='\t', error_bad_lines=False, dtype=object)
data = data.dropna(axis=0, how='any').reset_index(drop=True)
if stage in ['train', 'dev']:
df = pd.DataFrame({
'id_left': data['qid1'],
'id_right': data['qid2'],
'text_left': data['question1'],
'text_right': data['question2'],
'label': data['is_duplicate'].astype(int)
})
else:
df = pd.DataFrame({
'text_left': data['question1'],
'text_right': data['question2']
})
return matchzoo.pack(df, task)
|
ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/matchzoo/datasets/quora_qp/load_data.py/0
|
{
"file_path": "ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/matchzoo/datasets/quora_qp/load_data.py",
"repo_id": "ContextualSP",
"token_count": 1133
}
| 251 |
""":class:`BasePreprocessor` define input and ouutput for processors."""
import abc
import functools
import typing
from pathlib import Path
import dill
import matchzoo as mz
def validate_context(func):
"""Validate context in the preprocessor."""
@functools.wraps(func)
def transform_wrapper(self, *args, **kwargs):
if not self.context:
raise ValueError('Please call `fit` before calling `transform`.')
return func(self, *args, **kwargs)
return transform_wrapper
class BasePreprocessor(metaclass=abc.ABCMeta):
"""
:class:`BasePreprocessor` to input handle data.
A preprocessor should be used in two steps. First, `fit`, then,
`transform`. `fit` collects information into `context`, which includes
everything the preprocessor needs to `transform` together with other
useful information for later use. `fit` will only change the
preprocessor's inner state but not the input data. In contrast,
`transform` returns a modified copy of the input data without changing
the preprocessor's inner state.
"""
DATA_FILENAME = 'preprocessor.dill'
def __init__(self):
"""Initialization."""
self._context = {}
@property
def context(self):
"""Return context."""
return self._context
@abc.abstractmethod
def fit(
self,
data_pack: 'mz.DataPack',
verbose: int = 1
) -> 'BasePreprocessor':
"""
Fit parameters on input data.
This method is an abstract base method, need to be
implemented in the child class.
This method is expected to return itself as a callable
object.
:param data_pack: :class:`Datapack` object to be fitted.
:param verbose: Verbosity.
"""
@abc.abstractmethod
def transform(
self,
data_pack: 'mz.DataPack',
verbose: int = 1
) -> 'mz.DataPack':
"""
Transform input data to expected manner.
This method is an abstract base method, need to be
implemented in the child class.
:param data_pack: :class:`DataPack` object to be transformed.
:param verbose: Verbosity.
or list of text-left, text-right tuples.
"""
def fit_transform(
self,
data_pack: 'mz.DataPack',
verbose: int = 1
) -> 'mz.DataPack':
"""
Call fit-transform.
:param data_pack: :class:`DataPack` object to be processed.
:param verbose: Verbosity.
"""
return self.fit(data_pack, verbose=verbose) \
.transform(data_pack, verbose=verbose)
def save(self, dirpath: typing.Union[str, Path]):
"""
Save the :class:`DSSMPreprocessor` object.
A saved :class:`DSSMPreprocessor` is represented as a directory with
the `context` object (fitted parameters on training data), it will
be saved by `pickle`.
:param dirpath: directory path of the saved :class:`DSSMPreprocessor`.
"""
dirpath = Path(dirpath)
data_file_path = dirpath.joinpath(self.DATA_FILENAME)
if not dirpath.exists():
dirpath.mkdir(parents=True)
dill.dump(self, open(data_file_path, mode='wb'))
@classmethod
def _default_units(cls) -> list:
"""Prepare needed process units."""
return [
mz.preprocessors.units.tokenize.Tokenize(),
mz.preprocessors.units.lowercase.Lowercase(),
]
def load_preprocessor(dirpath: typing.Union[str, Path]) -> 'mz.DataPack':
"""
Load the fitted `context`. The reverse function of :meth:`save`.
:param dirpath: directory path of the saved model.
:return: a :class:`DSSMPreprocessor` instance.
"""
dirpath = Path(dirpath)
data_file_path = dirpath.joinpath(BasePreprocessor.DATA_FILENAME)
return dill.load(open(data_file_path, 'rb'))
|
ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/matchzoo/engine/base_preprocessor.py/0
|
{
"file_path": "ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/matchzoo/engine/base_preprocessor.py",
"repo_id": "ContextualSP",
"token_count": 1559
}
| 252 |
"""Precision for ranking."""
import numpy as np
from matchzoo.engine.base_metric import (
BaseMetric, sort_and_couple, RankingMetric
)
class Precision(RankingMetric):
"""Precision metric."""
ALIAS = 'precision'
def __init__(self, k: int = 1, threshold: float = 0.):
"""
:class:`PrecisionMetric` constructor.
:param k: Number of results to consider.
:param threshold: the label threshold of relevance degree.
"""
self._k = k
self._threshold = threshold
def __repr__(self) -> str:
""":return: Formated string representation of the metric."""
return f"{self.ALIAS}@{self._k}({self._threshold})"
def __call__(self, y_true: np.array, y_pred: np.array) -> float:
"""
Calculate precision@k.
Example:
>>> y_true = [0, 0, 0, 1]
>>> y_pred = [0.2, 0.4, 0.3, 0.1]
>>> Precision(k=1)(y_true, y_pred)
0.0
>>> Precision(k=2)(y_true, y_pred)
0.0
>>> Precision(k=4)(y_true, y_pred)
0.25
>>> Precision(k=5)(y_true, y_pred)
0.2
:param y_true: The ground true label of each document.
:param y_pred: The predicted scores of each document.
:return: Precision @ k
:raises: ValueError: len(r) must be >= k.
"""
if self._k <= 0:
raise ValueError(f"k must be greater than 0."
f"{self._k} received.")
coupled_pair = sort_and_couple(y_true, y_pred)
precision = 0.0
for idx, (label, score) in enumerate(coupled_pair):
if idx >= self._k:
break
if label > self._threshold:
precision += 1.
return precision / self._k
|
ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/matchzoo/metrics/precision.py/0
|
{
"file_path": "ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/matchzoo/metrics/precision.py",
"repo_id": "ContextualSP",
"token_count": 887
}
| 253 |
"""An implementation of ESIM Model."""
import typing
import torch
import torch.nn as nn
from torch.nn import functional as F
from matchzoo.engine.param_table import ParamTable
from matchzoo.engine.param import Param
from matchzoo.engine.base_model import BaseModel
from matchzoo.modules import RNNDropout
from matchzoo.modules import BidirectionalAttention
from matchzoo.modules import StackedBRNN
class ESIM(BaseModel):
"""
ESIM Model.
Examples:
>>> model = ESIM()
>>> model.guess_and_fill_missing_params(verbose=0)
>>> model.build()
"""
@classmethod
def get_default_params(cls) -> ParamTable:
""":return: model default parameters."""
params = super().get_default_params(
with_embedding=True,
with_multi_layer_perceptron=False
)
params.add(Param(name='mask_value', value=0,
desc="The value to be masked from inputs."))
params.add(Param(name='dropout', value=0.2,
desc="Dropout rate."))
params.add(Param(name='hidden_size', value=200,
desc="Hidden size."))
params.add(Param(name='lstm_layer', value=1,
desc="Number of LSTM layers"))
params.add(Param(name='drop_lstm', value=False,
desc="Whether dropout LSTM."))
params.add(Param(name='concat_lstm', value=True,
desc="Whether concat intermediate outputs."))
params.add(Param(name='rnn_type', value='lstm',
desc="Choose rnn type, lstm or gru."))
return params
def build(self):
"""Instantiating layers."""
rnn_mapping = {'lstm': nn.LSTM, 'gru': nn.GRU}
self.embedding = self._make_default_embedding_layer()
self.rnn_dropout = RNNDropout(p=self._params['dropout'])
lstm_size = self._params['hidden_size']
if self._params['concat_lstm']:
lstm_size /= self._params['lstm_layer']
self.input_encoding = StackedBRNN(
self._params['embedding_output_dim'],
int(lstm_size / 2),
self._params['lstm_layer'],
dropout_rate=self._params['dropout'],
dropout_output=self._params['drop_lstm'],
rnn_type=rnn_mapping[self._params['rnn_type'].lower()],
concat_layers=self._params['concat_lstm'])
self.attention = BidirectionalAttention()
self.projection = nn.Sequential(
nn.Linear(
4 * self._params['hidden_size'],
self._params['hidden_size']),
nn.ReLU())
self.composition = StackedBRNN(
self._params['hidden_size'],
int(lstm_size / 2),
self._params['lstm_layer'],
dropout_rate=self._params['dropout'],
dropout_output=self._params['drop_lstm'],
rnn_type=rnn_mapping[self._params['rnn_type'].lower()],
concat_layers=self._params['concat_lstm'])
self.classification = nn.Sequential(
nn.Dropout(
p=self._params['dropout']),
nn.Linear(
4 * self._params['hidden_size'],
self._params['hidden_size']),
nn.Tanh(),
nn.Dropout(
p=self._params['dropout']))
self.out = self._make_output_layer(self._params['hidden_size'])
def forward(self, inputs):
"""Forward."""
# Scalar dimensions referenced here:
# B = batch size (number of sequences)
# D = embedding size
# L = `input_left` sequence length
# R = `input_right` sequence length
# H = hidden size
# [B, L], [B, R]
query, doc = inputs['text_left'].long(), inputs['text_right'].long()
# [B, L]
# [B, R]
query_mask = (query == self._params['mask_value'])
doc_mask = (doc == self._params['mask_value'])
# [B, L, D]
# [B, R, D]
query = self.embedding(query)
doc = self.embedding(doc)
# [B, L, D]
# [B, R, D]
query = self.rnn_dropout(query)
doc = self.rnn_dropout(doc)
# [B, L, H]
# [B, R, H]
query = self.input_encoding(query, query_mask)
doc = self.input_encoding(doc, doc_mask)
# [B, L, H], [B, L, H]
attended_query, attended_doc = self.attention(
query, query_mask, doc, doc_mask)
# [B, L, 4 * H]
# [B, L, 4 * H]
enhanced_query = torch.cat([query,
attended_query,
query - attended_query,
query * attended_query],
dim=-1)
enhanced_doc = torch.cat([doc,
attended_doc,
doc - attended_doc,
doc * attended_doc],
dim=-1)
# [B, L, H]
# [B, L, H]
projected_query = self.projection(enhanced_query)
projected_doc = self.projection(enhanced_doc)
# [B, L, H]
# [B, L, H]
query = self.composition(projected_query, query_mask)
doc = self.composition(projected_doc, doc_mask)
# [B, L]
# [B, R]
reverse_query_mask = 1. - query_mask.float()
reverse_doc_mask = 1. - doc_mask.float()
# [B, H]
# [B, H]
query_avg = torch.sum(query * reverse_query_mask.unsqueeze(2), dim=1)\
/ (torch.sum(reverse_query_mask, dim=1, keepdim=True) + 1e-8)
doc_avg = torch.sum(doc * reverse_doc_mask.unsqueeze(2), dim=1)\
/ (torch.sum(reverse_doc_mask, dim=1, keepdim=True) + 1e-8)
# [B, L, H]
# [B, L, H]
query = query.masked_fill(query_mask.unsqueeze(2), -1e7)
doc = doc.masked_fill(doc_mask.unsqueeze(2), -1e7)
# [B, H]
# [B, H]
query_max, _ = query.max(dim=1)
doc_max, _ = doc.max(dim=1)
# [B, 4 * H]
v = torch.cat([query_avg, query_max, doc_avg, doc_max], dim=-1)
# [B, H]
hidden = self.classification(v)
# [B, num_classes]
out = self.out(hidden)
return out
|
ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/matchzoo/models/esim.py/0
|
{
"file_path": "ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/matchzoo/models/esim.py",
"repo_id": "ContextualSP",
"token_count": 3310
}
| 254 |
"""Matching module."""
import typing
import torch
import torch.nn as nn
import torch.nn.functional as F
class Matching(nn.Module):
"""
Module that computes a matching matrix between samples in two tensors.
:param normalize: Whether to L2-normalize samples along the
dot product axis before taking the dot product.
If set to `True`, then the output of the dot product
is the cosine proximity between the two samples.
:param matching_type: the similarity function for matching
Examples:
>>> import torch
>>> matching = Matching(matching_type='dot', normalize=True)
>>> x = torch.randn(2, 3, 2)
>>> y = torch.randn(2, 4, 2)
>>> matching(x, y).shape
torch.Size([2, 3, 4])
"""
def __init__(self, normalize: bool = False, matching_type: str = 'dot'):
""":class:`Matching` constructor."""
super().__init__()
self._normalize = normalize
self._validate_matching_type(matching_type)
self._matching_type = matching_type
@classmethod
def _validate_matching_type(cls, matching_type: str = 'dot'):
valid_matching_type = ['dot', 'exact', 'mul', 'plus', 'minus', 'concat']
if matching_type not in valid_matching_type:
raise ValueError(f"{matching_type} is not a valid matching type, "
f"{valid_matching_type} expected.")
def forward(self, x, y):
"""Perform attention on the input."""
length_left = x.shape[1]
length_right = y.shape[1]
if self._matching_type == 'dot':
if self._normalize:
x = F.normalize(x, p=2, dim=-1)
y = F.normalize(y, p=2, dim=-1)
return torch.einsum('bld,brd->blr', x, y)
elif self._matching_type == 'exact':
x = x.unsqueeze(dim=2).repeat(1, 1, length_right)
y = y.unsqueeze(dim=1).repeat(1, length_left, 1)
matching_matrix = (x == y)
x = torch.sum(matching_matrix, dim=2, dtype=torch.float)
y = torch.sum(matching_matrix, dim=1, dtype=torch.float)
return x, y
else:
x = x.unsqueeze(dim=2).repeat(1, 1, length_right, 1)
y = y.unsqueeze(dim=1).repeat(1, length_left, 1, 1)
if self._matching_type == 'mul':
return x * y
elif self._matching_type == 'plus':
return x + y
elif self._matching_type == 'minus':
return x - y
elif self._matching_type == 'concat':
return torch.cat((x, y), dim=3)
|
ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/matchzoo/modules/matching.py/0
|
{
"file_path": "ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/matchzoo/modules/matching.py",
"repo_id": "ContextualSP",
"token_count": 1233
}
| 255 |
import collections
import typing
import numpy as np
from .stateful_unit import StatefulUnit
class FrequencyFilter(StatefulUnit):
"""
Frequency filter unit.
:param low: Lower bound, inclusive.
:param high: Upper bound, exclusive.
:param mode: One of `tf` (term frequency), `df` (document frequency),
and `idf` (inverse document frequency).
Examples::
>>> import matchzoo as mz
To filter based on term frequency (tf):
>>> tf_filter = mz.preprocessors.units.FrequencyFilter(
... low=2, mode='tf')
>>> tf_filter.fit([['A', 'B', 'B'], ['C', 'C', 'C']])
>>> tf_filter.transform(['A', 'B', 'C'])
['B', 'C']
To filter based on document frequency (df):
>>> tf_filter = mz.preprocessors.units.FrequencyFilter(
... low=2, mode='df')
>>> tf_filter.fit([['A', 'B'], ['B', 'C']])
>>> tf_filter.transform(['A', 'B', 'C'])
['B']
To filter based on inverse document frequency (idf):
>>> idf_filter = mz.preprocessors.units.FrequencyFilter(
... low=1.2, mode='idf')
>>> idf_filter.fit([['A', 'B'], ['B', 'C', 'D']])
>>> idf_filter.transform(['A', 'B', 'C'])
['A', 'C']
"""
def __init__(self, low: float = 0, high: float = float('inf'),
mode: str = 'df'):
"""Frequency filter unit."""
super().__init__()
self._low = low
self._high = high
self._mode = mode
def fit(self, list_of_tokens: typing.List[typing.List[str]]):
"""Fit `list_of_tokens` by calculating `mode` states."""
valid_terms = set()
if self._mode == 'tf':
stats = self._tf(list_of_tokens)
elif self._mode == 'df':
stats = self._df(list_of_tokens)
elif self._mode == 'idf':
stats = self._idf(list_of_tokens)
else:
raise ValueError(f"{self._mode} is not a valid filtering mode."
f"Mode must be one of `tf`, `df`, and `idf`.")
for k, v in stats.items():
if self._low <= v < self._high:
valid_terms.add(k)
self._context[self._mode] = valid_terms
def transform(self, input_: list) -> list:
"""Transform a list of tokens by filtering out unwanted words."""
valid_terms = self._context[self._mode]
return list(filter(lambda token: token in valid_terms, input_))
@classmethod
def _tf(cls, list_of_tokens: list) -> dict:
stats = collections.Counter()
for tokens in list_of_tokens:
stats.update(tokens)
return stats
@classmethod
def _df(cls, list_of_tokens: list) -> dict:
stats = collections.Counter()
for tokens in list_of_tokens:
stats.update(set(tokens))
return stats
@classmethod
def _idf(cls, list_of_tokens: list) -> dict:
num_docs = len(list_of_tokens)
stats = cls._df(list_of_tokens)
for key, val in stats.most_common():
stats[key] = np.log((1 + num_docs) / (1 + val)) + 1
return stats
|
ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/matchzoo/preprocessors/units/frequency_filter.py/0
|
{
"file_path": "ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/matchzoo/preprocessors/units/frequency_filter.py",
"repo_id": "ContextualSP",
"token_count": 1449
}
| 256 |
"""Classification task."""
from matchzoo.engine.base_task import BaseTask
class Classification(BaseTask):
"""Classification task.
Examples:
>>> classification_task = Classification(num_classes=2)
>>> classification_task.metrics = ['acc']
>>> classification_task.num_classes
2
>>> classification_task.output_shape
(2,)
>>> classification_task.output_dtype
<class 'int'>
>>> print(classification_task)
Classification Task with 2 classes
"""
TYPE = 'classification'
def __init__(self, num_classes: int = 2, **kwargs):
"""Classification task."""
super().__init__(**kwargs)
if not isinstance(num_classes, int):
raise TypeError("Number of classes must be an integer.")
if num_classes < 2:
raise ValueError("Number of classes can't be smaller than 2")
self._num_classes = num_classes
@property
def num_classes(self) -> int:
""":return: number of classes to classify."""
return self._num_classes
@classmethod
def list_available_losses(cls) -> list:
""":return: a list of available losses."""
return ['cross_entropy']
@classmethod
def list_available_metrics(cls) -> list:
""":return: a list of available metrics."""
return ['acc']
@property
def output_shape(self) -> tuple:
""":return: output shape of a single sample of the task."""
return self._num_classes,
@property
def output_dtype(self):
""":return: target data type, expect `int` as output."""
return int
def __str__(self):
""":return: Task name as string."""
return f'Classification Task with {self._num_classes} classes'
|
ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/matchzoo/tasks/classification.py/0
|
{
"file_path": "ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/matchzoo/tasks/classification.py",
"repo_id": "ContextualSP",
"token_count": 698
}
| 257 |
import io
import os
from setuptools import setup, find_packages
here = os.path.abspath(os.path.dirname(__file__))
# Avoids IDE errors, but actual version is read from version.py
__version__ = None
exec(open('matchzoo/version.py').read())
short_description = 'Facilitating the design, comparison and sharing' \
'of deep text matching models.'
# Get the long description from the README file
with io.open(os.path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
install_requires = [
'torch >= 1.2.0',
'pytorch-transformers >= 1.1.0',
'nltk >= 3.4.3',
'numpy >= 1.16.4',
'tqdm == 4.38.0',
'dill >= 0.2.9',
'pandas == 0.24.2',
'networkx >= 2.3',
'h5py >= 2.9.0',
'hyperopt == 0.1.2'
]
extras_requires = {
'tests': [
'coverage >= 4.5.3',
'codecov >= 2.0.15',
'pytest >= 4.6.3',
'pytest-cov >= 2.7.1',
'flake8 >= 3.7.7',
'flake8_docstrings >= 1.3.0'],
}
setup(
name="matchzoo-py",
version=__version__,
author="MatchZoo-py Authors",
author_email="[email protected]",
description=(short_description),
license="Apache 2.0",
keywords="text matching models",
url="https://github.com/NTMC-Community/MatchZoo-py",
packages=find_packages(),
include_package_data=True,
long_description=long_description,
long_description_content_type='text/markdown',
classifiers=[
"Development Status :: 3 - Alpha",
'Environment :: Console',
'Operating System :: POSIX :: Linux',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
"License :: OSI Approved :: Apache Software License",
'Programming Language :: Python :: 3.6'
],
install_requires=install_requires,
extras_require=extras_requires
)
|
ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/setup.py/0
|
{
"file_path": "ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/setup.py",
"repo_id": "ContextualSP",
"token_count": 772
}
| 258 |
import numpy as np
from matchzoo.engine.base_metric import sort_and_couple
from matchzoo import metrics
def test_sort_and_couple():
l = [0, 1, 2]
s = [0.1, 0.4, 0.2]
c = sort_and_couple(l, s)
assert (c == np.array([(1, 0.4), (2, 0.2), (0, 0.1)])).all()
def test_mean_reciprocal_rank():
label = [0, 1, 2]
score = [0.1, 0.4, 0.2]
assert metrics.MeanReciprocalRank()(label, score) == 1
def test_precision_at_k():
label = [0, 1, 2]
score = [0.1, 0.4, 0.2]
assert metrics.Precision(k=1)(label, score) == 1.
assert metrics.Precision(k=2)(label, score) == 1.
assert round(metrics.Precision(k=3)(label, score), 2) == 0.67
def test_average_precision():
label = [0, 1, 2]
score = [0.1, 0.4, 0.2]
assert round(metrics.AveragePrecision()(label, score), 2) == 0.89
def test_mean_average_precision():
label = [0, 1, 2]
score = [0.1, 0.4, 0.2]
assert metrics.MeanAveragePrecision()(label, score) == 1.
def test_dcg_at_k():
label = [0, 1, 2]
score = [0.1, 0.4, 0.2]
dcg = metrics.DiscountedCumulativeGain
assert round(dcg(k=1)(label, score), 2) == 1.44
assert round(dcg(k=2)(label, score), 2) == 4.17
assert round(dcg(k=3)(label, score), 2) == 4.17
def test_ndcg_at_k():
label = [0, 1, 2]
score = [0.1, 0.4, 0.2]
ndcg = metrics.NormalizedDiscountedCumulativeGain
assert round(ndcg(k=1)(label, score), 2) == 0.33
assert round(ndcg(k=2)(label, score), 2) == 0.80
assert round(ndcg(k=3)(label, score), 2) == 0.80
def test_accuracy():
label = np.array([1])
score = np.array([[0, 1]])
assert metrics.Accuracy()(label, score) == 1
def test_cross_entropy():
label = [0, 1]
score = [[0.25, 0.25], [0.01, 0.90]]
assert round(metrics.CrossEntropy()(label, score), 2) == 0.75
|
ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/tests/test_metrics.py/0
|
{
"file_path": "ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/tests/test_metrics.py",
"repo_id": "ContextualSP",
"token_count": 838
}
| 259 |
<jupyter_start><jupyter_code>import sys
sys.path.append('/data/users/fyx/NTMC-Community/MatchZoo-py/')
import matchzoo as mz
%run init.ipynb
preprocessor = mz.models.DUET.get_default_preprocessor(
filter_mode='df',
filter_low_freq=2,
truncated_mode='post',
truncated_length_left=10,
truncated_length_right=40,
ngram_size=3
)
train_pack_processed = preprocessor.fit_transform(train_pack_raw)
dev_pack_processed = preprocessor.transform(dev_pack_raw)
test_pack_processed = preprocessor.transform(test_pack_raw)
preprocessor.context
triletter_callback = mz.dataloader.callbacks.Ngram(preprocessor, mode='sum')
trainset = mz.dataloader.Dataset(
data_pack=train_pack_processed,
mode='pair',
num_dup=2,
num_neg=1,
callbacks=[triletter_callback]
)
testset = mz.dataloader.Dataset(
data_pack=test_pack_processed,
callbacks=[triletter_callback]
)
padding_callback = mz.models.DUET.get_default_padding_callback(
fixed_length_left=10,
fixed_length_right=40,
pad_word_value=0,
pad_word_mode='pre',
with_ngram=True
)
trainloader = mz.dataloader.DataLoader(
dataset=trainset,
batch_size=20,
stage='train',
resample=True,
sort=False,
callback=padding_callback
)
testloader = mz.dataloader.DataLoader(
dataset=testset,
batch_size=20,
stage='dev',
callback=padding_callback
)
model = mz.models.DUET()
model.params['task'] = ranking_task
model.params['left_length'] = 10
model.params['right_length'] = 40
model.params['lm_filters'] = 100
model.params['mlp_num_layers'] = 2
model.params['mlp_num_units'] = 100
model.params['mlp_num_fan_out'] = 100
model.params['mlp_activation_func'] = 'tanh'
model.params['vocab_size'] = preprocessor.context['ngram_vocab_size']
model.params['dm_conv_activation_func'] = 'relu'
model.params['dm_filters'] = 100
model.params['dm_kernel_size'] = 3
model.params['dm_right_pool_size'] = 4
model.params['dropout_rate'] = 0.2
model.build()
print(model)
print('Trainable params: ', sum(p.numel() for p in model.parameters() if p.requires_grad))
optimizer = torch.optim.Adadelta(model.parameters())
trainer = mz.trainers.Trainer(
model=model,
optimizer=optimizer,
trainloader=trainloader,
validloader=testloader,
validate_interval=None,
epochs=10
)
trainer.run()<jupyter_output><empty_output>
|
ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/tutorials/ranking/duet.ipynb/0
|
{
"file_path": "ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/tutorials/ranking/duet.ipynb",
"repo_id": "ContextualSP",
"token_count": 938
}
| 260 |
#!/usr/bin/env bash
export model_file=checkpoints_sparc/sparc_concat_none_model
python -m allennlp.service.server_simple \
--archive-path ${model_file}/model.tar.gz \
--predictor sparc \
--include-package predictor.sparc_predictor \
--include-package dataset_reader.sparc_reader \
--include-package models.sparc_parser \
--title "Contextual Semantic Parsing Demo" \
--field-name question \
--field-name database_id
|
ContextualSP/semantic_parsing_in_context/bash_files/linux/demo.bash/0
|
{
"file_path": "ContextualSP/semantic_parsing_in_context/bash_files/linux/demo.bash",
"repo_id": "ContextualSP",
"token_count": 166
}
| 261 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from typing import Dict
from typing import List, Optional
from constant import SpecialSymbol
from context.db_context import SparcDBContext
from context.utils import Table
Keywords = ['limit', 'des', 'asc', 'and', 'or', 'sum', 'min', 'max', 'avg', 'none', '=', '!=', '<', '>', '<=', '>=',
'between', 'like', 'not_like', 'in', 'not_in', 'intersect', 'union', 'except', 'none', 'count', 'ins']
class Action(object):
grammar_dict = {}
def __init__(self):
self.ins_id = None
self.production = None
def get_next_action(self, is_sketch=False):
actions = list()
for x in self.production.split(' ')[1:]:
if x not in Keywords:
rule_type = eval(x)
if is_sketch:
if rule_type is not A and rule_type is not T:
actions.append(rule_type)
else:
actions.append(rule_type)
return actions
def __repr__(self):
space_ind = self.production.find(' ')
return f'{self.production[:space_ind]} -> {self.production[space_ind + 1:]}'
def is_global(self):
"""
Actions are global means they fit for the whole dataset, while others only
fit for specific instances
:return:
"""
if self.__class__ in [C, T, Segment]:
return False
else:
return True
def __lt__(self, other):
return self.__repr__() < other.__repr__()
def __hash__(self):
return hash(self.__repr__())
def __eq__(self, other):
return self.__repr__() == other.__repr__()
@staticmethod
def from_str(action_repr: str):
"""
Build an action object from string
:param action_repr: the representation of action
:return: Action object
"""
# the from_str ONLY can be used in non-copy scenario
lhs, rhs = action_repr.split(' -> ')
# eval class object
cls_obj = eval(lhs)
if cls_obj in [C, T]:
return cls_obj(rhs)
else:
# find the rule id
rule_str = ' '.join([lhs, rhs])
grammar_dict: Dict = cls_obj.grammar_dict
rule_id = list(grammar_dict.keys())[list(grammar_dict.values()).index(rule_str)]
return cls_obj(rule_id)
@property
def is_nonterminal(self):
"""
Here we use a simple but not robust method to judge whether self is a nonterminal action (Select),
or a terminal action (C/T)
:return:
"""
if isinstance(self.ins_id, int):
return True
else:
return False
@property
def nonterminal(self):
return self.__class__.__name__
class ActionTreeNode(object):
def __init__(self, action: Action):
self.action = action
self.child: List[Optional[ActionTreeNode]] = []
# drop self
if isinstance(self.action.ins_id, int):
all_child = self.action.grammar_dict[self.action.ins_id].split(' ')[1:]
else:
all_child = []
for child_name in all_child:
if child_name not in Keywords:
# placeholder
self.child.append(None)
def full_in_child(self) -> bool:
"""
test if an action could be inserted into self's child, if fail, return false; otherwise, return true.
:return:
"""
# if is a non terminal
if None in self.child:
return False
# successfully add the child, return true.
return True
def add_child(self, action_node):
ind = self.child.index(None)
self.child[ind] = action_node
def get_tree_action(self) -> List[Action]:
if self.action.is_nonterminal:
sub_tree = [self.action]
# FIXME: here we use a simple method to extract all subtrees from current root node:
# call all nodes' get_sub_tree. A better way is to backtrack and construct all subtrees
# using dynamic programming.
for child in self.child:
sub_tree.extend(child.get_tree_action())
return sub_tree
else:
return [self.action]
class GrammarType:
"""
Filter Grammar Type
"""
FilterBetween = 1
FilterEqual = 2
FilterGreater = 3
FilterLess = 4
FilterGeq = 5
FilterLeq = 6
FilterNeq = 7
FilterInNes = 8
FilterNotInNes = 9
FilterLike = 10
FilterNotLike = 11
FilterIs = 12
FilterExist = 13
# TODO: in and like does not have a nested version
FilterNotNes = 14
FilterBetweenNes = 15
FilterEqualNes = 16
FilterGreaterNes = 17
FilterLessNes = 18
FilterGeqNes = 19
FilterLeqNes = 20
FilterNeqNes = 21
FilterIsNes = 22
FilterExistNes = 23
FilterAnd = 24
FilterOr = 25
# FilterNone = 26
"""
Statement Grammar Type
"""
StateInter = 1
StateUnion = 2
StateExcept = 3
StateNone = 4
"""
Root Grammar Type
"""
RootSFO = 1
RootSO = 2
RootSF = 3
RootS = 4
RootJSFO = 5
RootJSO = 6
RootJSF = 7
RootJS = 8
"""
Select Grammar Type depends on the length of A
"""
"""
Join Grammar Type depends on the length of A
"""
"""
A Grammar Type
"""
ANone = 1
AMax = 2
AMin = 3
ACount = 4
ASum = 5
AAvg = 6
"""
Order Grammar Type
"""
OrderNone = 1
OrderAsc = 2
OrderDes = 3
OrderAscLim = 4
OrderDesLim = 5
class Grammar(object):
# static property, production rule to id
productions = None
def __init__(self, db_context: SparcDBContext):
self._pro_counter = 0
self._type_counter = 0
# lazy loading, init the production
if self.productions is None:
# new self.productions
self.productions = []
# C and T only contain one rule so they do not need initialization
self.build_production_map(Statement)
self.build_production_map(Root)
self.build_production_map(Join)
self.build_production_map(Select)
self.build_production_map(A)
self.build_production_map(Filter)
self.build_production_map(Order)
self.db_context = db_context
self.local_grammar = self.build_instance_production()
@classmethod
def build_ast_tree(cls, action_seq: List[Action]):
# action is the depth-first traversal
node_queue: List[ActionTreeNode] = []
root_node = None
seq_len = len(action_seq)
for i in range(seq_len):
# build tree node
tree_node = ActionTreeNode(action_seq[i])
if i == 0:
root_node = tree_node
# try to append current node into the first element of node queue
else:
cur_node = node_queue[-1]
# cannot insert, pop the least node
while cur_node.full_in_child():
# break the first node
node_queue.pop(-1)
# update current node
cur_node = node_queue[-1]
cur_node.add_child(tree_node)
node_queue.append(tree_node)
return root_node
@classmethod
def extract_all_subtree(cls, action_seq: List[Action]) -> List:
"""
Given the root node of ast tree, return all the valid subtrees
:return:
"""
nonterminal_node_list: List[ActionTreeNode] = []
# store root node into queue
node_queue: List[ActionTreeNode] = []
seq_len = len(action_seq)
for i in range(seq_len):
# build tree node
tree_node = ActionTreeNode(action_seq[i])
# try to append current node into the first element of node queue
if i == 0:
pass
# try to append current node into the first element of node queue
else:
cur_node = node_queue[-1]
# cannot insert, pop the least node
while cur_node.full_in_child():
# break the first node
node_queue.pop(-1)
# update current node
cur_node = node_queue[-1]
cur_node.add_child(tree_node)
node_queue.append(tree_node)
# add note into node list
if tree_node.action.is_nonterminal:
nonterminal_node_list.append(tree_node)
# build tree end, get all subtrees
subtree_list = [node.get_tree_action() for node in nonterminal_node_list]
return subtree_list
def build_production_map(self, cls):
"""
Record the production rules of class cls into self
:param cls: son class of Action
"""
# (note) the values could provide a fixed order
# only when the dictionary is built on
prod_ids = cls.grammar_dict.keys()
for prod_id in prod_ids:
cls_obj = cls(prod_id)
self.productions.append(cls_obj)
def build_instance_production(self):
"""
Instance all possible column and table production rules using db schema
"""
db_schema: Dict[str, Table] = self.db_context.schema
# fetch table name(id)
table_names = sorted([db_schema[table_ind].name for table_ind in
list(db_schema.keys())], reverse=True)
local_grammars = [T(table_name) for table_name in table_names]
all_columns = set()
for table in db_schema.values():
# use name(id) as standard grammar
all_columns.update([C(column.name) for column in table.columns])
column_grammars = list(all_columns)
local_grammars.extend(column_grammars)
# convert into set and sorted
local_grammars = set(local_grammars)
# sorted local grammars
local_grammars = sorted(local_grammars)
return local_grammars
@property
def global_grammar(self):
return sorted(self.productions)
@staticmethod
def default_sql_clause() -> Dict:
default_sql = {
"orderBy": [],
"from": {
"table_units": [
[
"table_unit",
1
]
],
"conds": []
},
"union": None,
"except": None,
"groupBy": None,
"limit": None,
"intersect": None,
"where": [],
"having": [],
"select": [
False,
[
[
3,
[
0,
[
0,
5,
False
],
None
]
]
]
]
}
return default_sql
class Statement(Action):
grammar_dict = {
GrammarType.StateInter: 'Statement intersect Root Root',
GrammarType.StateUnion: 'Statement union Root Root',
GrammarType.StateExcept: 'Statement except Root Root',
GrammarType.StateNone: 'Statement Root'
}
def __init__(self, id_c):
super().__init__()
self.ins_id = id_c
self.production = self.grammar_dict[id_c]
class Root(Action):
grammar_dict = {
GrammarType.RootSFO: 'Root Select Filter Order',
GrammarType.RootSF: 'Root Select Filter',
GrammarType.RootSO: 'Root Select Order',
GrammarType.RootS: 'Root Select',
GrammarType.RootJSFO: 'Root Join Select Filter Order',
GrammarType.RootJSF: 'Root Join Select Filter',
GrammarType.RootJSO: 'Root Join Select Order',
GrammarType.RootJS: 'Root Join Select'
}
def __init__(self, id_c):
super().__init__()
self.ins_id = id_c
self.production = self.grammar_dict[id_c]
class Select(Action):
grammar_dict = {
0: 'Select A',
1: 'Select A A',
2: 'Select A A A',
3: 'Select A A A A',
4: 'Select A A A A A',
5: 'Select A A A A A A'
}
def __init__(self, id_c):
super().__init__()
self.ins_id = id_c
self.production = self.grammar_dict[id_c]
class Join(Action):
grammar_dict = {
0: 'Join A',
# 1: 'Join A A'
}
def __init__(self, id_c):
super().__init__()
self.ins_id = id_c
self.production = self.grammar_dict[id_c]
class A(Action):
grammar_dict = {
GrammarType.ANone: 'A none C T',
GrammarType.AMax: 'A max C T',
GrammarType.AMin: 'A min C T',
GrammarType.ACount: 'A count C T',
GrammarType.ASum: 'A sum C T',
GrammarType.AAvg: 'A avg C T'
}
def __init__(self, id_c):
super().__init__()
self.ins_id = id_c
self.production = self.grammar_dict[id_c]
class Filter(Action):
# TODO: why not directly predict the number of Filters
grammar_dict = {
GrammarType.FilterAnd: 'Filter Filter and Filter',
GrammarType.FilterOr: 'Filter Filter or Filter',
GrammarType.FilterEqual: 'Filter = A',
GrammarType.FilterGreater: 'Filter > A',
GrammarType.FilterLess: 'Filter < A',
GrammarType.FilterGeq: 'Filter >= A',
GrammarType.FilterLeq: 'Filter <= A',
GrammarType.FilterNeq: 'Filter != A',
GrammarType.FilterBetween: 'Filter between A',
# TODO: like/not_like only apply to string type
GrammarType.FilterLike: 'Filter like A',
GrammarType.FilterNotLike: 'Filter not_like A',
GrammarType.FilterEqualNes: 'Filter = A Root',
GrammarType.FilterGreaterNes: 'Filter > A Root',
GrammarType.FilterLessNes: 'Filter < A Root',
GrammarType.FilterGeqNes: 'Filter >= A Root',
GrammarType.FilterLeqNes: 'Filter <= A Root',
GrammarType.FilterNeqNes: 'Filter != A Root',
GrammarType.FilterBetweenNes: 'Filter between A Root',
GrammarType.FilterInNes: 'Filter in A Root',
GrammarType.FilterNotInNes: 'Filter not_in A Root',
}
def __init__(self, id_c):
super().__init__()
self.ins_id = id_c
self.production = self.grammar_dict[id_c]
class Order(Action):
grammar_dict = {
GrammarType.OrderAsc: 'Order asc A',
GrammarType.OrderDes: 'Order des A',
GrammarType.OrderAscLim: 'Order asc A limit',
GrammarType.OrderDesLim: 'Order des A limit'
}
def __init__(self, ins_id):
super().__init__()
self.ins_id = ins_id
self.production = self.grammar_dict[ins_id]
class C(Action):
def __init__(self, ins_id: str):
super().__init__()
# TODO: here we lower it because the col -> id (entities_names) in SparcWorld is the lower key-value pair.
self.ins_id = ins_id.lower()
self.production = f'C {self.ins_id}'
class T(Action):
def __init__(self, ins_id: str):
super().__init__()
self.ins_id = ins_id.lower()
self.production = f'T {self.ins_id}'
class Segment(Action):
"""
segment action appears only in the training post-processing. it is used to copy segment-level precedent SQL
"""
def __init__(self, copy_ins_action: List[Action], copy_ins_idx: List[int]):
super().__init__()
self.copy_ins_action = copy_ins_action
# copy ins idx has been padded
self.copy_ins_idx = copy_ins_idx
self.production = f'Copy {self.ins_id}'
def __repr__(self):
repr_str = SpecialSymbol.copy_delimiter + \
SpecialSymbol.copy_delimiter.join([str(action) for action in self.copy_ins_action])
return repr_str
# the nonterminal is the first one
@property
def nonterminal(self):
# get the terminal of the first action string
first_action = self.copy_ins_action[0]
return first_action.nonterminal
|
ContextualSP/semantic_parsing_in_context/context/grammar.py/0
|
{
"file_path": "ContextualSP/semantic_parsing_in_context/context/grammar.py",
"repo_id": "ContextualSP",
"token_count": 7814
}
| 262 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import os
import re
import sys
import traceback
from collections import namedtuple
from typing import Dict, List, Any
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from allennlp.data import Vocabulary
from context.copy_production_rule_field import CopyProductionRule
from allennlp.models import Model
from allennlp.modules import Seq2SeqEncoder, Attention
from allennlp.modules.attention.dot_product_attention import DotProductAttention
from allennlp.modules import TextFieldEmbedder, Embedding
from allennlp.modules.input_variational_dropout import InputVariationalDropout
from torch.nn.modules import Dropout
from allennlp.modules.seq2seq_encoders import PytorchSeq2SeqWrapper
from allennlp.modules.seq2vec_encoders import PytorchSeq2VecWrapper
from allennlp.training.metrics import Average
from allennlp.modules.feedforward import FeedForward
from allennlp.nn import util
from allennlp.state_machines import BeamSearch
from overrides import overrides
from torch.nn.utils.rnn import pad_sequence
from allennlp.common.checks import check_dimensions_match
from context.converter import ActionConverter
from context.grammar import Statement, Action, Segment
from context.world import SparcWorld
from models.decode_trainer import MaximumMarginalLikelihood
from models.metrics import MetricUtil, TurnAverage
from models.states_machine.grammar_based_state import GrammarStatelet, GrammarBasedState, RnnStatelet, ConditionStatelet
from models.transition_functions.linking_transition_function import LinkingTransitionFunction
from models.util import get_span_representation, find_start_end
from copy import deepcopy
VALIDATE_SIZE = 422
@Model.register('sparc')
class SparcParser(Model):
def __init__(self,
vocab: Vocabulary,
text_encoder: Seq2SeqEncoder,
decoder_beam_search: BeamSearch,
input_attention: Attention,
text_embedder: TextFieldEmbedder,
max_decoding_steps: int,
action_embedding_dim: int,
entity_embedding_dim: int,
training_beam_size: int,
dropout_rate: float,
gate_dropout_rate: float = 0.2,
loss_mask: int = 6,
serialization_dir: str = 'checkpoints\\basic_model',
dataset_path: str = 'dataset',
decoder_num_layers: int = 1,
rule_namespace: str = 'rule_labels',
# parser linking and schema encoding setting
use_feature_score: bool = False,
use_schema_encoder: bool = False,
use_linking_embedding: bool = False,
# turn-level encoder setting
use_discourse_encoder: bool = False,
discourse_output_dim: int = 100,
use_attend_over_history: bool = False,
use_turn_position: bool = False,
# gate setting
use_context_gate: bool = False,
use_sigmoid_gate: bool = False,
use_unk_candidate: bool = False,
attn_on_self: bool = False,
gate_attn_size: int = 100,
# copy setting
use_sql_attention: bool = False,
link_to_precedent_sql: bool = False,
sql_hidden_size: int = 100,
use_copy_tree: bool = False,
use_copy_token: bool = False,
use_hard_token_as_seg: bool = False,
copy_encode_anon: bool = False,
copy_encode_with_context: bool = False,
# bert setting
bert_mode: str = "v0",
debug_parsing: bool = False):
super().__init__(vocab)
self.vocab = vocab
self.max_decoding_steps = max_decoding_steps
"""
Loss mask means i-th round where i is less than loss_mask will be used, otherwise will be masked.
"""
self.loss_mask = loss_mask
# padding for invalid action
self.action_padding_index = -1
# dropout inside/outside lstm
if dropout_rate > 0:
self.var_dropout = InputVariationalDropout(p=dropout_rate)
else:
self.var_dropout = lambda x: x
self.dropout = Dropout(p=dropout_rate)
# embedding layer of action like `Statement -> Select` and etc.
self.rule_namespace = rule_namespace
num_actions = vocab.get_vocab_size(self.rule_namespace)
"""
Define encoder layer
"""
self.text_embedder = text_embedder
# for bert/non-bert, we use the same text encoder
self.text_encoder = text_encoder
self.encoder_output_dim = text_encoder.get_output_dim()
self.embedding_dim = self.text_embedder.get_output_dim()
self.scale = int(math.sqrt(self.embedding_dim))
self.decoder_num_layers = decoder_num_layers
"""
Define embedding layer
"""
# used for scoring the action selection
self.output_action_embedder = Embedding(num_embeddings=num_actions,
embedding_dim=action_embedding_dim)
# used for sequence generation input
self.action_embedder = Embedding(num_embeddings=num_actions,
embedding_dim=action_embedding_dim)
# entity type embedding layer such as text/number/date/boolean/primary/foreign and etc. 0 for padding
# TODO: entity type embedding will add in the text embedding, so it should keep the same dimension
self.num_entity_types = 9 + 1
self.link_entity_type_embedder = Embedding(num_embeddings=self.num_entity_types,
embedding_dim=entity_embedding_dim,
padding_index=0)
self.output_entity_type_embedder = Embedding(num_embeddings=self.num_entity_types,
embedding_dim=action_embedding_dim,
padding_index=0)
# Note: the dimension is highly related to the knowledge graph field.
# please go there to see the dimensions of this linking feature.
self.linking_layer = torch.nn.Linear(14, 1)
torch.nn.init.uniform_(self.linking_layer.weight)
torch.nn.init.zeros_(self.linking_layer.bias)
self.beam_search = decoder_beam_search
"""
Define discourse/turn level encoder
"""
self.use_discourse_encoder = use_discourse_encoder
self.use_attend_over_history = use_attend_over_history
if use_discourse_encoder:
discourse_input_dim = self.text_encoder.get_output_dim()
self.discourse_level_encoder = PytorchSeq2SeqWrapper(nn.LSTM(input_size=discourse_input_dim,
hidden_size=discourse_output_dim,
batch_first=True),
stateful=True)
if bert_mode == "v0":
check_dimensions_match(self.embedding_dim * 2 + discourse_output_dim,
text_encoder.get_input_dim(),
"[Text Embedding; Linking Embedding; Discourse State]",
"Text Encoder Input")
else:
check_dimensions_match(self.embedding_dim + discourse_output_dim,
text_encoder.get_input_dim(),
"[Text Embedding; Linking Embedding; Discourse State]",
"Text Encoder Input")
check_dimensions_match(discourse_input_dim,
text_encoder.get_output_dim(),
"Discourse Input",
"Text Encoder Output")
else:
self.discourse_level_encoder = lambda x: x
self.use_turn_position = use_turn_position
# Note: turn attention means the mechanism which will concat extra positional embedding
# with the original encoder output hidden state (whether it is encoded by discourse lstm)
if use_turn_position:
# turn position needs extra positional embedding (which is equal to word embedding)
self.encoder_output_dim += self.embedding_dim
# turn position embedding, the same dimension with embedding dim, maximum is 5
# TODO: 7(sparc) or 50 (cosql)
self.turn_embedder = Embedding(num_embeddings=50,
embedding_dim=text_embedder.get_output_dim())
# whether to use the context modeling
self.use_context_gate = use_context_gate
self.use_sigmoid_gate = use_sigmoid_gate
self.use_unk_candidate = use_unk_candidate
# if true, calculate attention between i and 1,2, ...i;
# otherwise, only calculate i and 1,2,...i-1; i-th is always set as 1.0;
self.attn_on_self = attn_on_self
context_hidden_size = text_encoder.get_output_dim()
if self.use_turn_position:
context_hidden_size += self.turn_embedder.get_output_dim()
if self.use_context_gate:
if self.use_unk_candidate:
self.first_unk_context = nn.Parameter(torch.FloatTensor(context_hidden_size))
torch.nn.init.uniform_(self.first_unk_context, -0.1, 0.1)
self.context_w = nn.Linear(context_hidden_size, gate_attn_size, bias=False)
self.context_u = nn.Linear(context_hidden_size, gate_attn_size, bias=False)
self.context_v = nn.Linear(gate_attn_size * 2, 1)
torch.nn.init.uniform_(self.context_w.weight, -0.1, 0.1)
torch.nn.init.uniform_(self.context_u.weight, -0.1, 0.1)
torch.nn.init.uniform_(self.context_v.weight, -0.1, 0.1)
# embedding of the first special action
self.first_action_embedding = nn.Parameter(torch.FloatTensor(action_embedding_dim))
self.first_attended_output = nn.Parameter(torch.FloatTensor(self.encoder_output_dim))
# initialize parameters
torch.nn.init.uniform_(self.first_action_embedding, -0.1, 0.1)
torch.nn.init.uniform_(self.first_attended_output, -0.1, 0.1)
"""
Define sql query related network
"""
# if anon, we will encode schema using its type; otherwise, using its schema information
self.copy_encode_anon = copy_encode_anon
if use_copy_token:
# encoder_output_dim is the decoder_input_dim
copy_gate = FeedForward(self.encoder_output_dim,
num_layers=2,
hidden_dims=[int(self.encoder_output_dim / 2), 1],
# keep the last layer
activations=[torch.tanh, lambda x: x],
dropout=gate_dropout_rate)
else:
copy_gate = None
if self.copy_encode_anon:
"""
Use token type to represent its meaning
"""
self.sql_context_encoder = PytorchSeq2SeqWrapper(nn.LSTM(input_size=action_embedding_dim,
hidden_size=sql_hidden_size,
batch_first=True,
bidirectional=True))
self.sql_segment_encoder = PytorchSeq2VecWrapper(nn.LSTM(input_size=action_embedding_dim,
hidden_size=sql_hidden_size,
batch_first=True,
bidirectional=True))
self.sql_global_embedder = Embedding(num_embeddings=num_actions,
embedding_dim=action_embedding_dim)
else:
self.sql_context_encoder = PytorchSeq2SeqWrapper(nn.LSTM(input_size=self.embedding_dim,
hidden_size=sql_hidden_size,
batch_first=True,
bidirectional=True))
self.sql_segment_encoder = PytorchSeq2VecWrapper(nn.LSTM(input_size=self.embedding_dim,
hidden_size=sql_hidden_size,
batch_first=True,
bidirectional=True))
self.sql_global_embedder = Embedding(num_embeddings=num_actions,
embedding_dim=self.embedding_dim)
# FIXME: if tied, you should assign these two the same as the above embeddings
# self.output_action_embedder / self.output_entity_type_embedder
self.sql_schema_embedder = Embedding(num_embeddings=self.num_entity_types,
embedding_dim=action_embedding_dim,
padding_index=0)
self.sql_hidden_size = sql_hidden_size
self.sql_output_size = self.sql_segment_encoder.get_output_dim()
# add bias (equal to add bias to action embedding)
self.copy_sql_output = nn.Linear(self.sql_output_size, action_embedding_dim)
self.copy_sql_input = nn.Linear(self.sql_output_size, action_embedding_dim, bias=True)
# attentional reading from precedent sql query
self.use_sql_attention = use_sql_attention
self.link_to_precedent_sql = link_to_precedent_sql
# link to precedent sql means the final pointer network(linking score) will also point to precedent SQL
if self.link_to_precedent_sql:
assert self.use_sql_attention is True
sql_attention = DotProductAttention()
self.use_copy_segment = use_copy_tree
self.use_hard_token_as_seg = use_hard_token_as_seg
self.use_copy_token = use_copy_token
self.use_last_sql = self.use_sql_attention or self.use_copy_token or self.use_copy_segment
assert not (self.use_copy_token & self.use_copy_segment), "Cannot use both segment copy/token token !"
# for segment-level copy, encode sql with context means acquiring the span representation;
# for token-level copy, encode sql with context means using an encoder to get its hidden state;
self.copy_encode_with_context = copy_encode_with_context
# transform embedding into the same dimension with sql_context_encoder output
self.sql_embedder_transform = nn.Linear(action_embedding_dim, self.sql_output_size, bias=False)
torch.nn.init.zeros_(self.copy_sql_input.bias)
"""
Define parsing related variants
"""
self.use_schema_encoder = use_schema_encoder
self.use_linking_embedding = use_linking_embedding
self.use_feature_score = use_feature_score
# responsible for column encoding and table encoding respectively
if use_schema_encoder:
self.schema_encoder = PytorchSeq2VecWrapper(nn.LSTM(input_size=self.embedding_dim,
hidden_size=int(self.embedding_dim / 2),
bidirectional=True,
batch_first=True))
else:
self.schema_encoder = None
"""
Define bert mode, now we support two kinds of mode:
"v0": IRNet
"v3": IRNet + BERT
"""
self.bert_mode = bert_mode
decoder_input_dim = self.encoder_output_dim
# extra attention concat input
if self.use_sql_attention:
decoder_input_dim += self.sql_output_size
if self.use_sql_attention:
self.transition_function = LinkingTransitionFunction(encoder_output_dim=self.encoder_output_dim,
decoder_input_dim=decoder_input_dim,
action_embedding_dim=action_embedding_dim,
input_attention=input_attention,
sql_attention=sql_attention,
sql_output_dim=self.sql_output_size,
predict_start_type_separately=False,
add_action_bias=False,
copy_gate=copy_gate,
dropout=dropout_rate,
num_layers=self.decoder_num_layers)
else:
self.transition_function = LinkingTransitionFunction(encoder_output_dim=self.encoder_output_dim,
decoder_input_dim=decoder_input_dim,
action_embedding_dim=action_embedding_dim,
input_attention=input_attention,
predict_start_type_separately=False,
add_action_bias=False,
copy_gate=copy_gate,
dropout=dropout_rate,
num_layers=self.decoder_num_layers)
"""
Define the linear layer convert matching feature into score
"""
"""
Define metrics to measure
"""
self.sql_metric_util = MetricUtil(dataset_path=dataset_path)
self.action_metric_util = MetricUtil()
self.sql_metric = TurnAverage('sql')
self.action_metric = TurnAverage('action')
self.gate_metrics = {
'_copy': Average(),
'info': Average()
}
"""
Debugging setting
"""
self.debug_parsing = debug_parsing
if self.debug_parsing:
try:
from models.visualizer import Visualizer
# get serialization_dir
summary_dir = os.path.join("sql_log", os.path.split(serialization_dir)[-1])
self.visualizer = Visualizer(summary_dir=summary_dir,
validation_size=VALIDATE_SIZE,
vocab=self.vocab)
except ImportError:
print("Please install tensorboardX to enable debugging in parsing.")
self.performance_history = {}
"""
Define transition function
"""
self.decoder_trainer = MaximumMarginalLikelihood(training_beam_size,
re_weight=False,
loss_mask=self.loss_mask)
self.dev_step = 0
@overrides
def forward(self,
inter_utterance: Dict[str, torch.LongTensor],
inter_segment: torch.LongTensor,
inter_nonterminal: List,
valid_actions_list: List[List[List[CopyProductionRule]]],
action_sequence: torch.LongTensor,
worlds: List[List[SparcWorld]],
inter_schema: Dict[str, torch.LongTensor],
entity_type: torch.FloatTensor,
entity_mask: torch.LongTensor,
# Action sequence with copy is built for copy segment.
# Towards the first turn, it is equal to action_sequence
action_sequence_with_copy: torch.LongTensor = None,
schema_position: torch.LongTensor = None) -> Dict[str, torch.Tensor]:
device = entity_type.device
if 'tokens' in inter_utterance:
assert self.bert_mode == "v0"
# batch_size x inter_size x utter_size
utterance_tokens = inter_utterance['tokens']
else:
assert self.bert_mode != "v0"
utterance_tokens = inter_utterance['bert']
batch_size, inter_size, _ = utterance_tokens.size()
entity_type = entity_type * entity_mask
# batch_size x col_size (we should expand it into inter_size then)
entity_type = entity_type.long().view(batch_size * inter_size, -1)
# make ids
inter_segment = inter_segment.view(batch_size * inter_size, -1).long()
encoder_input, encoder_mask, linked_scores, encoding_schema = self._init_parser_input(
inter_utterance=inter_utterance,
inter_schema=inter_schema,
entity_type=entity_type,
inter_segment=inter_segment,
schema_position=schema_position)
world_flatten = []
for inter_world in worlds:
for inter_ind, world in enumerate(inter_world):
# only remove them when training & not pretrain
world_flatten.append(world)
valid_action_flatten = []
for inter_valid_action in valid_actions_list:
for valid_action in inter_valid_action:
valid_action_flatten.append(valid_action)
# batch_size x col_size (we should expand it into inter_size then)
entity_type = entity_type.long().view(batch_size * inter_size, -1)
if self.training:
initial_state, penalty_term = self._init_grammar_state(encoder_input,
encoder_mask,
batch_size,
inter_size,
world_flatten,
linked_scores,
valid_action_flatten,
entity_type,
encoding_schema)
if self.use_copy_segment:
# segment-level copy will change the supervision
action_copy_mask = torch.ne(action_sequence_with_copy, self.action_padding_index)
decode_output = self.decoder_trainer.decode(initial_state,
self.transition_function,
(action_sequence_with_copy,
action_copy_mask))
else:
action_mask = torch.ne(action_sequence, self.action_padding_index)
decode_output = self.decoder_trainer.decode(initial_state,
self.transition_function,
(action_sequence,
action_mask))
return {'loss': decode_output['loss'] + 0 * penalty_term}
else:
assert batch_size == 1, "Now we only support batch_size = 1 on evaluation"
self.dev_step += batch_size
loss = torch.tensor([0]).float().to(device)
initial_state, _ = self._init_grammar_state(encoder_input,
encoder_mask,
batch_size,
inter_size,
world_flatten,
linked_scores,
valid_action_flatten,
entity_type,
encoding_schema)
if action_sequence is not None and action_sequence.size(1) > 1:
try:
with torch.no_grad():
if self.use_copy_segment:
action_copy_mask = torch.ne(action_sequence_with_copy, self.action_padding_index)
loss = self.decoder_trainer.decode(initial_state,
self.transition_function,
(action_sequence_with_copy,
action_copy_mask))['loss']
else:
action_mask = torch.ne(action_sequence, self.action_padding_index)
loss = self.decoder_trainer.decode(initial_state,
self.transition_function,
(action_sequence,
action_mask))['loss']
except ZeroDivisionError:
# reached a dead-end during beam search
pass
outputs: Dict[str, Any] = {
'loss': loss
}
# In evaluation, segment-level copy will lead to two concerns:
# 1. the evaluation of turn $t$ can only be done after the turn $t-1$, so we need dynamically update
# precedent action sequence stored in the world.
# 2. the generating results should be reformulated as non-copy existing (e.g. expand the copy action).
num_steps = self.max_decoding_steps
# construct db_contexts
db_contexts = [world.db_context if world is not None else None
for world in world_flatten]
# Get the rules out of the instance
index_to_rule = [production_rule_field[0]
for production_rule_field in
valid_action_flatten[0]]
if self.use_last_sql:
assert batch_size == 1
outputs['best_predict_idx'] = []
outputs['best_predict_action_copy'] = []
outputs['debug_info'] = []
# CAUTION: we will change the world & valid_action_flatten, so we need deepcopy
world_flatten = deepcopy(world_flatten)
valid_action_flatten = deepcopy(valid_action_flatten)
# clear all copy-based actions in valid_action_flatten(if any)
for i in range(inter_size):
# no padding
copy_action_ids = []
for j in reversed(range(len(valid_action_flatten[i]))):
action = valid_action_flatten[i][j]
if action.rule == '':
del valid_action_flatten[i][j]
elif action.is_copy_rule:
copy_action_ids.append(j)
del valid_action_flatten[i][j]
valid_action_flatten[i] = [action for action in valid_action_flatten[i]
if not action.is_copy_rule and action.rule]
world_flatten[i].clear_precedent_state(copy_action_ids)
# to easily handle it, we assume batch_size = 1
for i in range(inter_size):
# WARNING: if we use both discourse encoder & segment copy, the discourse encoder
# assumes the input & mask are batchwise-interaction, however, here we pass by
# the only turn itself. Therefore, for discourse encoder scenario, we should
# pass by the encoder[:i + 1]
if self.use_discourse_encoder:
initial_state, _ = self._init_grammar_state(encoder_input[: i + 1],
encoder_mask[: i + 1],
batch_size,
# inter_size is i + 1
i + 1,
world_flatten[: i + 1],
linked_scores[: i + 1],
valid_action_flatten[: i + 1],
entity_type[: i + 1],
encoding_schema[: i + 1])
initial_state.debug_info = [[] for _ in range(i + 1)]
temp_predict_results = self.beam_search.search(num_steps,
initial_state,
self.transition_function,
keep_final_unfinished_states=True)
else:
# refresh initial_state. unsqueeze to fake the batch_size dimension
initial_state, _ = self._init_grammar_state(encoder_input[i].unsqueeze(dim=0),
encoder_mask[i].unsqueeze(dim=0),
batch_size,
# inter_size is set as 1
1,
[world_flatten[i]],
linked_scores[i].unsqueeze(dim=0),
[valid_action_flatten[i]],
entity_type[i].unsqueeze(dim=0),
encoding_schema[i].unsqueeze(dim=0))
initial_state.debug_info = [[]]
temp_predict_results = self.beam_search.search(num_steps,
initial_state,
self.transition_function,
keep_final_unfinished_states=True)
# if use discourse, take the last one; else, take the default prediction 0
take_away_key = i if self.use_discourse_encoder else 0
# keep the same structure
if take_away_key in temp_predict_results and i != 0:
best_action_sequence_with_copy = temp_predict_results[take_away_key][0].action_history[0]
best_action_sequence = []
for action_id in best_action_sequence_with_copy:
if valid_action_flatten[i][action_id].is_copy_rule:
# extend copy's related action ids
copy_action: Segment = world_flatten[i].valid_actions_flat[action_id]
valid_ins_idx = [idx for idx in copy_action.copy_ins_idx
if idx != self.action_padding_index]
best_action_sequence.extend(valid_ins_idx)
else:
best_action_sequence.append(action_id)
# Get the rules out of the instance
index_to_rule_copy = [production_rule_field[0]
for production_rule_field in
valid_action_flatten[i]]
outputs['best_predict_action_copy'].append(",".join([str(index_to_rule_copy[action_idx])
for action_idx in
best_action_sequence_with_copy]))
elif i == 0:
# no copy action
best_action_sequence = temp_predict_results[0][0].action_history[0]
outputs['best_predict_action_copy'].append(",".join([str(index_to_rule[action_idx])
for action_idx in
best_action_sequence]))
else:
best_action_sequence = []
outputs['best_predict_action_copy'].append("[EMPTY]")
outputs['best_predict_idx'].append(best_action_sequence)
outputs['debug_info'].append(temp_predict_results[0][0].debug_info[0])
if i != inter_size - 1:
# update next world's precedent action sequence.
# note the update is both for COPY & TOKEN.
# for token-level copy, it will ignore the segment actions.
world_flatten[i + 1].update_precedent_state(best_action_sequence,
extract_tree=not self.use_hard_token_as_seg)
world_flatten[i + 1].update_copy_valid_action()
# manually construct CopyRule for valid_action_flatten
for local_ind, prod_rule in enumerate(world_flatten[i + 1].precedent_segment_seq):
# get nonterminal name
nonterminal = prod_rule.nonterminal
rule_repr = str(prod_rule)
copy_rule = CopyProductionRule(rule=rule_repr,
# the copy rule is appended dynamically
is_global_rule=False,
is_copy_rule=True,
nonterminal=nonterminal)
valid_action_flatten[i + 1].append(copy_rule)
assert len(valid_action_flatten[i + 1]) == len(world_flatten[i + 1].valid_actions_flat)
# to fake the batch_size scope
outputs['best_predict_action_copy'] = [outputs['best_predict_action_copy']]
outputs['debug_info'] = [outputs['debug_info']]
else:
# This tells the state to start keeping track of debug info, which we'll pass along in
# our output dictionary.
initial_state.debug_info = [[] for _ in range(batch_size * inter_size)]
best_final_states = self.beam_search.search(num_steps,
initial_state,
self.transition_function,
keep_final_unfinished_states=True)
outputs['best_predict_idx'] = [
best_final_states[i][0].action_history[0] if i in best_final_states else [0]
for i in range(batch_size * inter_size)]
outputs['debug_info'] = [[best_final_states[i][0].debug_info[0] if i in best_final_states else []
for i in range(batch_size * inter_size)]]
# in test mode, predict the actual SQL
if worlds[0][0].sql_query == '':
outputs['best_predict_action'] = [[",".join([str(index_to_rule[action_idx])
for action_idx in action_seq])
for action_seq in outputs['best_predict_idx']]]
predict_sql = self.predict_sql(iter_size=batch_size * inter_size,
index_to_rule=index_to_rule,
predict_result=outputs['best_predict_idx'],
db_contexts=db_contexts)
outputs['best_predict_sql'] = predict_sql
# add utterance for better reading
if self.bert_mode == "v0":
# if under the v3 bert mode, the utterance idx cannot be recovered as
# the `BertIndexer` will re-index these tokens.
utterance_strs = [[self.vocab.get_token_from_index(int(token_id)) if token_id != 0
else ''
for token_id in token_seq]
for token_seq in utterance_tokens.view(batch_size * inter_size, -1)]
outputs['utterance'] = [utterance_strs]
for debug_sample in outputs['debug_info'][0]:
# every sample is a list
for info_dict in debug_sample:
info_dict['question_attention'] = ["{0:.2f}".format(float(num))
for num in info_dict['question_attention']]
info_dict['probabilities'] = ["{0:.2f}".format(float(num))
for num in info_dict['probabilities']]
else:
# reshape sequence and mask for convenient
action_sequence = action_sequence.reshape(batch_size * inter_size, -1)
action_mask = torch.ne(action_sequence, self.action_padding_index)
action_correct_mat, action_mask_mat = self.action_metric_util(outputs['best_predict_idx'],
action_sequence,
batch_size,
action_mask)
self.action_metric(action_correct_mat, action_mask_mat)
# construct action mapping
action_mapping: List[List[str]] = [[production_rule[0] for production_rule in valid_action]
for batch_valid_actions in valid_actions_list
for valid_action in batch_valid_actions]
sql_ground_truth: List[str] = [world.sql_query
for batch_world in worlds
for world in batch_world]
# calculate SQL matching
sql_correct_mat, sql_mask_mat = self.sql_metric_util(outputs['best_predict_idx'],
sql_ground_truth,
batch_size,
action_mask,
db_contexts,
action_mapping,
with_sql=True)
self.sql_metric(sql_correct_mat, sql_mask_mat)
if self.debug_parsing:
self.visualizer.update_global_step()
self.visualizer.log_sql(inter_utterance,
sql_correct_mat[0],
sql_ground_truth,
encoder_mask,
[[index_to_rule[ind] for ind in inter]
for inter in outputs['best_predict_idx']])
return outputs
@staticmethod
def predict_sql(iter_size, index_to_rule, predict_result, db_contexts) -> List[str]:
predict_sql = []
for i in range(iter_size):
action_seq = [index_to_rule[ind] for ind in predict_result[i]]
converter = ActionConverter(db_context=db_contexts[i])
try:
generated_sql = converter.translate_to_sql(action_seq)
except:
# if fail, return one valid SQL
generated_sql = f'SELECT * from {list(db_contexts[i].schema.keys())[0]}'
exec_info = sys.exc_info()
traceback.print_exception(*exec_info)
predict_sql.append(generated_sql)
# fake the batch_size dimension
return [predict_sql]
def _init_parser_input(self, inter_utterance: Dict[str, torch.LongTensor],
inter_schema: Dict[str, torch.LongTensor],
entity_type: torch.LongTensor,
inter_segment: torch.LongTensor = None,
schema_position: torch.LongTensor = None):
device = entity_type.device
# {'text': {'token': tensor}, 'linking': tensor }
# batch_size x inter_size x col_size x col_token_size
if 'tokens' in inter_utterance:
assert self.bert_mode == "v0"
utterance_tokens = inter_utterance['tokens']
else:
assert self.bert_mode != "v0"
utterance_tokens = inter_utterance['bert']
batch_size, inter_size, _ = utterance_tokens.size()
if self.bert_mode == "v0":
schema_text = inter_schema['text']
batch_size, inter_size, col_size, col_token_size = schema_text['tokens'].size()
# batch_size * inter_size x col_size x col_token_size (e.g. hospital station is 2)
token_dict = {
'tokens': schema_text['tokens'].view(batch_size * inter_size, col_size, col_token_size)
}
inter_utterance['tokens'] = inter_utterance['tokens'].view(batch_size * inter_size, -1)
# batch_size * inter_size x col_size x col_token_size x embedding_size
embedded_schema = self.text_embedder.forward(token_dict, num_wrapping_dims=1)
# get input mask
encoder_mask = util.get_text_field_mask(inter_utterance).float()
embedded_utterance = self.text_embedder.forward(inter_utterance)
# Compute entity and question word similarity. We tried using cosine distance here, but
# because this similarity is the main mechanism that the model can use to push apart logit
# scores for certain actions (like "n -> 1" and "n -> -1"), this needs to have a larger
# output range than [-1, 1].
if self.use_schema_encoder:
# resize schema and others
encoder_schema_mask = (token_dict['tokens'] != 0).long()
embedded_schema = embedded_schema.view(batch_size * inter_size * col_size, col_token_size, -1)
encoder_schema_mask = encoder_schema_mask.view(batch_size * inter_size * col_size, col_token_size)
# get the results, note the result is actually the final result of every column
encoding_schema = self.schema_encoder.forward(embedded_schema, encoder_schema_mask)
encoding_schema = encoding_schema.view(batch_size * inter_size, col_size, -1)
# encode table & column
linking_scores = torch.bmm(encoding_schema,
torch.transpose(embedded_utterance, 1, 2))
else:
encoding_schema = embedded_schema.view(batch_size * inter_size,
col_size * col_token_size,
self.embedding_dim)
question_entity_similarity = torch.bmm(encoding_schema,
torch.transpose(embedded_utterance, 1, 2)) / self.scale
# BOW representation
# eps for nan loss
encoder_sum = (token_dict['tokens'] != 0).view(batch_size * inter_size * col_size,
col_token_size).sum(dim=1).float() + 1e-2
encoding_schema = encoding_schema.view(batch_size * inter_size * col_size,
col_token_size, self.embedding_dim).sum(dim=1)
encoding_schema = encoding_schema / encoder_sum.unsqueeze(dim=1).expand_as(encoding_schema)
encoding_schema = encoding_schema.view(batch_size * inter_size, col_size, self.embedding_dim)
# batch_size * inter_size x col_size x col_token_size x utt_token_size
question_entity_similarity = question_entity_similarity.view(batch_size * inter_size,
col_size,
col_token_size,
-1)
# batch_size * inter_size x col_size x utt_token_size
question_entity_similarity_max_score, _ = torch.max(question_entity_similarity, 2)
linking_scores = question_entity_similarity_max_score
# calculate linking scores and probabilities
if self.use_feature_score:
linking_features = inter_schema['linking']
feature_size = linking_features.size(-1)
linking_features = linking_features.view(batch_size * inter_size, col_size, -1, feature_size)
# batch_size * inter_size x col_size x utt_token_siz`e
feature_scores = self.linking_layer(linking_features).squeeze(3)
# batch_size * inter_size x col_size x utt_token_size
linking_scores = linking_scores + feature_scores
entity_size = self.num_entity_types
# concat word embedding and type embedding
# batch_size * inter_size x utt_token_size x (link_embedded_size + utt_embedded_size)
# encoder_input = linking_embedding + embedded_with_segment
# encoder_input = embedded_with_segment
if self.use_turn_position and not self.use_discourse_encoder:
embedded_segment = self.turn_embedder.forward(inter_segment)
embedded_utterance = embedded_utterance + embedded_segment
if self.use_linking_embedding:
# batch_size * inter_size x col_size x entity_size (10 now)
entity_type_mat = torch.zeros((batch_size * inter_size, col_size, entity_size), dtype=torch.float32,
device=device)
# create one hot vector
expand_entity_type = entity_type.unsqueeze(dim=2)
entity_type_mat.scatter_(dim=2, index=expand_entity_type, value=1)
# add 1e-8 as epsilon
entity_type_mat = entity_type_mat + 1e-8
# batch_size * inter_size x utt_token_size x entity_size
linking_probabilities = self._get_linking_probabilities(linking_scores.transpose(1, 2),
entity_type_mat)
linking_probabilities = encoder_mask.unsqueeze(dim=-1).repeat(1, 1, entity_size) * linking_probabilities
# batch_size * inter_size x entity_size x entity_embedding_size
entity_ids = torch.arange(0, entity_size, 1, dtype=torch.long, device=device).unsqueeze(dim=0). \
repeat(batch_size * inter_size, 1)
entity_type_embeddings = self.link_entity_type_embedder.forward(entity_ids)
# non linear layer for embedding
# TODO: why tanh ?
entity_type_embeddings = torch.tanh(entity_type_embeddings)
# calculate the weighted entity embeddings
# batch_size * inter_size x utt_token_size x entity_embedding_size
linking_embedding = torch.bmm(linking_probabilities, entity_type_embeddings)
parser_input = torch.cat([embedded_utterance, linking_embedding], dim=-1)
else:
parser_input = embedded_utterance
elif self.bert_mode == "v3":
assert inter_segment is not None
assert schema_position is not None
batch_size, inter_size, col_size, _ = schema_position.size()
schema_position = schema_position.long()
# batch_size * inter_size x col_size x 2
schema_position = schema_position.view(batch_size * inter_size, col_size, -1)
max_col_token_size = (schema_position[:, :, 1] - schema_position[:, :, 0]).max()
# we do not use any schema
utter_end_indices = inter_segment.ne(0).sum(dim=1)
for key, value in inter_utterance.items():
inter_utterance[key] = inter_utterance[key].view(batch_size * inter_size, -1)
if 'type-ids' in key:
for i in range(batch_size * inter_size):
inter_utterance[key][i, :utter_end_indices[i]] = 0
inter_utterance[key][i, utter_end_indices[i]:] = 1
embedded_mix = self.text_embedder.forward(inter_utterance)
mask_mix = inter_utterance['mask']
embedded_mix = embedded_mix * mask_mix.unsqueeze(dim=2).float()
# split embedded mix into two parts: utterance & schema
embedded_utterance = []
encoder_mask = []
embedded_schema = []
encoder_schema_mask = []
for ind, end_ind in enumerate(utter_end_indices):
embedded_utterance.append(embedded_mix[ind, :end_ind, :])
encoder_mask.append(mask_mix[ind, :end_ind])
cur_embedded_schema = []
cur_schema_mask = []
for col_ind in range(col_size):
entity_start_ind = schema_position[ind, col_ind, 0]
entity_end_ind = schema_position[ind, col_ind, 1]
pad_len = max_col_token_size - (entity_end_ind - entity_start_ind)
# padding for concat
cur_embedded_schema.append(F.pad(embedded_mix[ind, entity_start_ind: entity_end_ind, :],
pad=[0, 0, 0, pad_len],
mode='constant'))
cur_schema_mask.append(F.pad(mask_mix[ind, entity_start_ind: entity_end_ind],
pad=[0, pad_len]))
cur_embedded_schema = torch.stack(cur_embedded_schema, dim=0)
embedded_schema.append(cur_embedded_schema)
cur_schema_mask = torch.stack(cur_schema_mask, dim=0)
encoder_schema_mask.append(cur_schema_mask)
embedded_utterance = pad_sequence(embedded_utterance, batch_first=True)
embedded_schema = pad_sequence(embedded_schema, batch_first=True)
# according to length of segment to identify which one is utterance/schema
encoder_mask = pad_sequence(encoder_mask, batch_first=True)
encoder_schema_mask = pad_sequence(encoder_schema_mask, batch_first=True)
if self.use_schema_encoder:
# resize schema and others
embedded_schema = embedded_schema.view(batch_size * inter_size * col_size, max_col_token_size, -1)
encoder_schema_mask = encoder_schema_mask.view(batch_size * inter_size * col_size, max_col_token_size)
# get the results, note the result is actually the final result of every column
encoding_schema = self.schema_encoder.forward(embedded_schema, encoder_schema_mask)
encoding_schema = encoding_schema.view(batch_size * inter_size, col_size, -1)
# encode table & column
linking_scores = torch.bmm(encoding_schema,
torch.transpose(embedded_utterance, 1, 2)) / self.scale
else:
# encode table & column
encoding_schema = embedded_schema.view(batch_size * inter_size,
col_size * max_col_token_size,
self.embedding_dim)
question_entity_similarity = torch.bmm(encoding_schema,
torch.transpose(embedded_utterance, 1, 2)) / self.scale
# eps for nan loss
encoder_sum = encoder_schema_mask.view(batch_size * inter_size * col_size,
max_col_token_size).sum(dim=1).float() + 1e-2
encoding_schema = encoding_schema.view(batch_size * inter_size * col_size,
max_col_token_size, self.embedding_dim).sum(dim=1)
encoding_schema = encoding_schema / encoder_sum.unsqueeze(dim=1).expand_as(encoding_schema)
encoding_schema = encoding_schema.view(batch_size * inter_size, col_size, self.embedding_dim)
# batch_size * inter_size x col_size x col_token_size x utt_token_size
question_entity_similarity = question_entity_similarity.view(batch_size * inter_size,
col_size,
max_col_token_size,
-1)
# batch_size * inter_size x col_size x utt_token_size
question_entity_similarity_max_score, _ = torch.max(question_entity_similarity, 2)
linking_scores = question_entity_similarity_max_score
if self.use_feature_score:
linking_features = inter_schema['linking']
feature_size = linking_features.size(-1)
linking_features = linking_features.view(batch_size * inter_size, col_size, -1, feature_size)
# batch_size * inter_size x col_size x utt_token_siz`e
feature_scores = self.linking_layer.forward(linking_features).squeeze(3)
linking_scores = linking_scores + feature_scores
parser_input = embedded_utterance
# calculate linking scores with utterance
if self.use_turn_position and not self.use_discourse_encoder:
embedded_segment = self.turn_embedder.forward(inter_segment)
parser_input = parser_input + embedded_segment
if self.use_linking_embedding:
entity_size = self.num_entity_types
# batch_size * inter_size x col_size x entity_size (10 now)
entity_type_mat = torch.zeros((batch_size * inter_size, col_size, entity_size), dtype=torch.float32,
device=device)
# create one hot vector
expand_entity_type = entity_type.unsqueeze(dim=2)
entity_type_mat.scatter_(dim=2, index=expand_entity_type, value=1)
# add 1e-8 as epsilon
entity_type_mat = entity_type_mat + 1e-8
# batch_size * inter_size x utt_token_size x entity_size
linking_probabilities = self._get_linking_probabilities(linking_scores.transpose(1, 2),
entity_type_mat)
linking_probabilities = encoder_mask.unsqueeze(dim=-1).repeat(1, 1,
entity_size).float() * linking_probabilities
# batch_size * inter_size x entity_size x entity_embedding_size
entity_ids = torch.arange(0, entity_size, 1, dtype=torch.long, device=device).unsqueeze(dim=0). \
repeat(batch_size * inter_size, 1)
entity_type_embeddings = self.link_entity_type_embedder.forward(entity_ids)
entity_type_embeddings = torch.tanh(entity_type_embeddings)
# calculate the weighted entity embeddings
# batch_size * inter_size x utt_token_size x entity_embedding_size
linking_embedding = torch.bmm(linking_probabilities, entity_type_embeddings)
parser_input = parser_input + linking_embedding
else:
raise Exception("DO NOT SUPPORT BERT MODE :{}".format(self.bert_mode))
return parser_input, encoder_mask, linking_scores, encoding_schema
def _init_grammar_state(self,
encoder_input: torch.FloatTensor,
encoder_mask: torch.LongTensor,
batch_size: int,
inter_size: int,
world_flatten: List[SparcWorld],
linking_scores: torch.FloatTensor,
valid_action_flatten: List[List[CopyProductionRule]],
entity_type: torch.LongTensor,
# encoding_schema, batch_size * inter_size x schema_size
encoding_schema: torch.FloatTensor):
encoder_mask = encoder_mask.float()
iter_size = batch_size * inter_size
# specific devices
device = encoder_mask.device
penalty = torch.zeros(1, device=device, requires_grad=True)
padding_utterance_mask = encoder_mask.clone()
padding_utterance_mask.data[:, 0].fill_(1)
# encode and output encoder memory
encoder_input = self.var_dropout(encoder_input)
# an unified process to handle bert/non-bert embedding as input
_, sequence_len, embedding_dim = encoder_input.size()
# record state of each utterance
encoder_vector_states = []
if self.use_discourse_encoder:
# iterative encoding on inputs
encoder_input = encoder_input.view(batch_size, inter_size, sequence_len, embedding_dim)
padding_utterance_mask = padding_utterance_mask.view(batch_size, inter_size, -1)
discourse_inp_dim = self.discourse_level_encoder.get_input_dim()
discourse_state_dim = self.discourse_level_encoder.get_output_dim()
discourse_input = torch.zeros((batch_size, 1, discourse_inp_dim), device=encoder_input.device)
discourse_state = torch.zeros((batch_size, 1, discourse_state_dim), device=encoder_input.device)
turn_level_mask = (encoder_mask.sum(dim=1) != 0).view(batch_size, inter_size)
# get encoder outputs
encoder_outputs_states = []
for i in range(inter_size):
if i != 0:
# discourse_state is the last hidden state to cached
discourse_state = self.discourse_level_encoder.forward(discourse_input,
turn_level_mask[:, i].unsqueeze(dim=1))
text_input = encoder_input[:, i, :]
text_input = torch.cat([text_input,
discourse_state.repeat(1, sequence_len, 1)
], dim=2)
encoder_outputs = self.text_encoder.forward(text_input,
padding_utterance_mask[:, i])
discourse_input = util.get_final_encoder_states(encoder_outputs=encoder_outputs,
mask=padding_utterance_mask[:, i],
bidirectional=self.text_encoder.is_bidirectional()).unsqueeze(
dim=1)
encoder_vector_states.append(discourse_input)
# dimension 1 is the interaction dimension
encoder_outputs_states.append(encoder_outputs.unsqueeze(dim=1))
# recover outputs and padding mask
utt_encoder_outputs = torch.cat(encoder_outputs_states, dim=1).view(batch_size * inter_size,
sequence_len, -1)
padding_utterance_mask = padding_utterance_mask.view(batch_size * inter_size, sequence_len)
else:
utt_encoder_outputs = self.text_encoder.forward(encoder_input, padding_utterance_mask)
utt_encoder_outputs = self.var_dropout(utt_encoder_outputs)
# This will be our initial hidden state and memory cell for the decoder LSTM.
# if use discourse encoder, it means the context is independent, so we should concat encoder outputs to
# compute `turn attention`
if self.use_turn_position:
# concat all outputs, meanwhile we should change the interaction mask
# 1 x turn_size
turn_ids = torch.arange(start=inter_size + 1, end=1, step=-1, device=device) \
.unsqueeze(dim=0).repeat(batch_size, 1).view(batch_size * inter_size)
loss_mask_ids = torch.zeros(turn_ids.size(), device=device).fill_(self.loss_mask).long()
turn_ids = torch.where(turn_ids.float() > self.loss_mask, loss_mask_ids, turn_ids)
turn_embedding = self.turn_embedder(turn_ids)
# batch_size x inter_size x embedding_dim x sequence_len
turn_encoder_embedding = turn_embedding.unsqueeze(dim=1).repeat(1, sequence_len, 1)
utt_encoder_outputs = torch.cat([utt_encoder_outputs, turn_encoder_embedding], dim=2)
# use a transform layer to normalize the shape
# dropout on encoder_vector_states
encoder_vector_states = torch.cat(encoder_vector_states, dim=1)
# use dropout to avoid overfitting
encoder_vector_states = self.dropout(encoder_vector_states)
encoder_vector_states = torch.split(encoder_vector_states, split_size_or_sections=1, dim=1)
encoder_vector_states = [torch.cat((encoder_vector_states[i], turn_embedding[i].
unsqueeze(dim=0).unsqueeze(dim=0).repeat(batch_size, 1, 1)),
dim=-1) for i in range(inter_size)]
# TODO: in the original mask, it will cause into fetching nothing because there may be an empty sentence.
final_encoder_output = util.get_final_encoder_states(encoder_outputs=utt_encoder_outputs,
mask=padding_utterance_mask,
bidirectional=self.text_encoder.is_bidirectional())
if self.use_attend_over_history:
utt_encoder_outputs = utt_encoder_outputs.view(batch_size, inter_size, sequence_len, -1)
encoder_mask = encoder_mask.view(batch_size, inter_size, sequence_len)
_, col_size, _ = linking_scores.size()
linking_scores = linking_scores.view(batch_size, inter_size, col_size, sequence_len)
# notice here you should concat all encoder output and all linking scores
his_encoder_outputs = []
his_encoder_mask = []
his_linking_score = []
if self.use_context_gate:
# info_gates[batch_ind, i, j] means keep how much information when encoding context_j in turn i
info_gates = torch.eye(n=inter_size, m=inter_size, device=device, dtype=torch.float). \
unsqueeze(dim=0).repeat(batch_size, 1, 1)
if self.use_discourse_encoder:
reattn_states = encoder_vector_states
else:
utt_states = util.get_final_encoder_states(
encoder_outputs=utt_encoder_outputs.view(batch_size * inter_size, sequence_len, -1),
mask=padding_utterance_mask,
bidirectional=self.text_encoder.is_bidirectional())
reattn_states = utt_states.view(batch_size, inter_size, 1, -1).transpose(0, 1)
reattn_states = list(reattn_states)
for i in range(1, inter_size):
# unk candidate is designed for softmax attention
if self.use_sigmoid_gate or not self.use_unk_candidate:
if self.attn_on_self:
cur_value_vector = torch.cat(reattn_states[:i + 1], dim=1)
else:
cur_value_vector = torch.cat(reattn_states[:i], dim=1)
else:
if self.attn_on_self:
cur_value_vector = torch.cat(reattn_states[:i + 1], dim=1)
else:
cur_value_vector = torch.cat(reattn_states[:i], dim=1)
# add a [UNK] candidate
candidate_vector = self.dropout(self.first_unk_context).unsqueeze(dim=0). \
unsqueeze(dim=0).repeat(batch_size, 1, 1)
cur_value_vector = torch.cat([candidate_vector,
cur_value_vector], dim=1)
# batch_size x sequence_len x hidden_size
cur_query_vector = reattn_states[i].repeat(1, cur_value_vector.size()[1], 1)
hidden_vector = torch.cat([self.context_u(cur_value_vector),
self.context_w(cur_query_vector)], dim=-1)
if self.use_sigmoid_gate:
info_gate = torch.sigmoid(self.context_v(hidden_vector)).squeeze(dim=-1)
penalty = penalty + info_gate.mean().norm()
else:
# softmax gate
info_gate = torch.softmax(self.context_v(torch.tanh(hidden_vector)), dim=1).squeeze(dim=-1)
if self.use_unk_candidate:
# drop the unk candidate
info_gate = info_gate[:, 1:]
# record info_gate into logs
self.gate_metrics['info'](float(info_gate.mean()))
if self.attn_on_self:
info_gates[:, i, :i + 1] = info_gate
else:
info_gates[:, i, :i] = info_gate
penalty = penalty / inter_size
for i in range(batch_size):
cur_all_encoder_output = []
cur_all_encoder_mask = []
cur_all_linking_score = []
for turn_ind in range(inter_size):
cur_all_encoder_output.append(utt_encoder_outputs[i, turn_ind])
if self.use_context_gate:
gates = info_gates[i, turn_ind, : turn_ind + 1]
his_encoder_output = [cur_all_encoder_output[i] * gates[i]
for i in range(turn_ind + 1)]
his_encoder_output = torch.cat(his_encoder_output, dim=0)
else:
his_encoder_output = torch.cat(cur_all_encoder_output, dim=0)
his_encoder_outputs.append(his_encoder_output)
cur_all_encoder_mask.append(encoder_mask[i, turn_ind])
his_encoder_mask.append(torch.cat(cur_all_encoder_mask, dim=0))
cur_all_linking_score.append(linking_scores[i, turn_ind])
his_linking_output = torch.cat(cur_all_linking_score, dim=1).transpose(0, 1)
# before padding, transpose col_size x utt_size -> utt_size x col_size
his_linking_score.append(his_linking_output)
utt_encoder_outputs = pad_sequence(his_encoder_outputs, batch_first=True)
encoder_mask = pad_sequence(his_encoder_mask, batch_first=True)
linking_scores = pad_sequence(his_linking_score, batch_first=True).transpose(1, 2)
memory_cell = utt_encoder_outputs.new_zeros(iter_size, self.encoder_output_dim)
# prepared for sql attention
if self.use_sql_attention:
# add extra parameters
sql_memory_cell = utt_encoder_outputs.new_zeros(iter_size, self.sql_output_size)
else:
sql_memory_cell = [None] * iter_size
initial_score = torch.zeros(iter_size, device=device, dtype=torch.float32)
# To make grouping states together in the decoder easier, we convert the batch dimension in
# all of our tensors into an outer list. For instance, the encoder outputs have shape
# `(batch_size, utterance_length, encoder_output_dim)`. We need to convert this into a list
# of `batch_size` tensors, each of shape `(utterance_length, encoder_output_dim)`. Then we
# won't have to do any index selects, or anything, we'll just do some `torch.cat()`s.
initial_score_list = [initial_score[i] for i in range(iter_size)]
encoder_output_list = [utt_encoder_outputs[i] for i in range(iter_size)]
utterance_mask_list = [encoder_mask[i] for i in range(iter_size)]
# TODO: reorganize the world and valid action list
# FIXME: Hack for computing efficiency. Here we mask the world which cannot arrive the loss_mask
db_context_flatten = [world.db_context if world is not None else None
for world in world_flatten]
fetch_sql_inform = [{} for i in range(iter_size)]
initial_grammar_state = [self._create_grammar_state(world_flatten[i],
valid_action_flatten[i],
linking_scores[i],
entity_type[i],
encoding_schema[i],
fetch_sql_inform[i])
for i in range(iter_size)]
if self.use_sql_attention:
sql_output_list = [ins['sql_output'] for ins in fetch_sql_inform]
sql_output_mask_list = [ins['sql_output_mask'] for ins in fetch_sql_inform]
sql_output_list = list(pad_sequence(sql_output_list, batch_first=True))
sql_output_mask_list = list(pad_sequence(sql_output_mask_list, batch_first=True))
else:
sql_output_list = [None] * iter_size
sql_output_mask_list = [None] * iter_size
initial_rnn_state = []
for i in range(iter_size):
initial_rnn_state.append(RnnStatelet(final_encoder_output[i],
memory_cell[i],
self.first_action_embedding,
self.first_attended_output,
encoder_output_list,
utterance_mask_list,
sql_memory_cell[i],
sql_output_list,
sql_output_mask_list))
# initialize constrain state
initial_condition_state = [ConditionStatelet(valid_action_flatten[i],
db_context_flatten[i],
# if self in training, the prune should not be pruned
enable_prune=not self.training)
for i in range(iter_size)]
initial_state = GrammarBasedState(batch_indices=list(range(iter_size)),
action_history=[[] for _ in range(iter_size)],
score=initial_score_list,
rnn_state=initial_rnn_state,
grammar_state=initial_grammar_state,
condition_state=initial_condition_state,
possible_actions=valid_action_flatten)
return initial_state, penalty
def _create_grammar_state(self,
world: SparcWorld,
possible_actions: List[CopyProductionRule],
linking_scores: torch.Tensor,
entity_types: torch.LongTensor,
# col_size x col_embedding_size
encoded_schema: torch.FloatTensor,
# construct take away array
take_away: Dict = None) -> 'GrammarStatelet':
"""
Construct initial grammar state let for decoding constraints
:param world: ``SparcWorld``
:param possible_actions: ``List[CopyProductionRule]``, tracking the all possible actions under current state
this rule is different from the one in allennlp as it is support `is_copy` attribute
:param linking_scores: ``torch.Tensor``, the linking score between every query token and each entity type
:param entity_types: ``torch.Tensor``, the entity type of each schema in database
:return:
"""
# map action into ind
action_map = {}
for action_index, action in enumerate(possible_actions):
action_string = action[0]
action_map[action_string] = action_index
translated_valid_actions = {}
device = linking_scores.device
# fake an empty Statement because there must be valid actions
if world is None:
translated_valid_actions[Statement.__name__] = {}
# assign to take away to keep consistent
# append to take away
if self.use_sql_attention and take_away is not None:
# take a padding vector
precedent_encoding = torch.zeros((1, self.sql_output_size), device=device) + 1e-6
precedent_mask = torch.ones((1, 1), device=device)
take_away['sql_output'] = precedent_encoding
take_away['sql_output_mask'] = precedent_mask.squeeze(dim=0)
return GrammarStatelet([Statement.__name__],
translated_valid_actions,
# callback function
SparcParser.is_nonterminal)
# map copy key to copy ids
copy_action_dict = {}
for copy_subtree in world.precedent_segment_seq:
copy_action_dict[str(copy_subtree)] = copy_subtree
# from non-terminal to action list, Dict[str, List[str]]
valid_actions = world.valid_actions
# 1 x precedent_action_len
precedent_action = world.precedent_action_seq
# we need the action entity mapping, indicating which action corresponds to which column/table
action_to_entity = world.get_action_entity_mapping()
ProductionTuple = namedtuple('ProductionTuple', ('rule', 'is_global', 'is_copy', 'tensor', 'nonterminal'))
# we need construct an embedding book for sql query encoder
if self.use_last_sql:
copy_embedding_book = {}
action_unit_indices = list(action_map.values())
production_unit_arrays = [(ProductionTuple(*possible_actions[index]), index)
for index in action_unit_indices]
production_unit_arrays = [production_rule for production_rule in production_unit_arrays
if not production_rule[0].is_copy]
global_unit_actions = []
linked_unit_actions = []
for production_unit_array, action_index in production_unit_arrays:
if production_unit_array.is_global:
global_unit_actions.append((production_unit_array.tensor, action_index))
# avoid padding rules
elif production_unit_array.rule in action_to_entity:
linked_unit_actions.append((production_unit_array.rule, action_index))
# construct embedding book
if global_unit_actions:
action_tensors, action_ids = zip(*global_unit_actions)
action_tensor = torch.cat(action_tensors, dim=0).long()
# batch_size * inter_size x embedding_size
action_unit_embedding = self.sql_global_embedder.forward(action_tensor)
for ind, idx in enumerate(action_ids):
copy_embedding_book[idx] = action_unit_embedding[ind]
if linked_unit_actions:
action_rules, action_ids = zip(*linked_unit_actions)
related_entity_ids = [action_to_entity[rule] for rule in action_rules]
# FIXME: -1 means it is actually not an entity
assert -1 not in related_entity_ids
if self.copy_encode_anon:
entity_type_tensor = entity_types[related_entity_ids]
entity_type_embeddings = (self.sql_schema_embedder(entity_type_tensor)
.to(entity_types.device)
.float())
for ind, idx in enumerate(action_ids):
copy_embedding_book[idx] = entity_type_embeddings[ind]
else:
# use specific representations of entity itself
for ind, idx in enumerate(action_ids):
entity_idx = related_entity_ids[ind]
copy_embedding_book[idx] = encoded_schema[entity_idx]
else:
copy_embedding_book = {}
# prepare action encodings for token-level copy operation or segment-level copy
if self.use_last_sql:
if len(precedent_action):
precedent_embedding = torch.stack([copy_embedding_book[action_idx] for action_idx in precedent_action])
precedent_mask = torch.ones((1, len(precedent_action)), device=device)
precedent_encoding = self.sql_context_encoder.forward(precedent_embedding.unsqueeze(dim=0),
precedent_mask).squeeze(dim=0)
precedent_forward = precedent_encoding[:, :self.sql_hidden_size]
precedent_backward = precedent_encoding[:, self.sql_hidden_size:]
else:
# eps to avoid nan loss
precedent_encoding = torch.zeros((1, self.sql_output_size), device=device) + 1e-6
precedent_mask = torch.ones((1, 1), device=device)
# append to take away
if self.use_sql_attention and take_away is not None:
take_away['sql_output'] = precedent_encoding
take_away['sql_output_mask'] = precedent_mask.squeeze(dim=0)
for key, action_strings in valid_actions.items():
# allocate dictionary
translated_valid_actions[key] = {}
# `key` here is a non-terminal from the grammar, and `action_strings` are all the valid
# productions of that non-terminal. We'll first split those productions by global vs.
# linked action.
action_indices = [action_map[action_string] for action_string in action_strings]
# named tuple for better reading
production_rule_arrays = [(ProductionTuple(*possible_actions[index]), index) for index in action_indices]
# split rules into two category
global_actions = []
linked_actions = []
copy_segment_actions = []
copy_token_actions = []
for production_rule_array, action_index in production_rule_arrays:
# copy action
if self.use_copy_segment and production_rule_array.is_copy:
# if encode segment without context, using its rule
if self.copy_encode_with_context:
# find the start/end pos in encode
related_copy_ids = copy_action_dict[production_rule_array.rule].copy_ins_idx
# remove padding index to find the position
related_copy_ids = [ind for ind in related_copy_ids if ind != self.action_padding_index]
# [start, end)
copy_action_start_end = find_start_end(precedent_action, related_copy_ids)
copy_segment_actions.append((copy_action_start_end, action_index))
else:
copy_segment_actions.append((production_rule_array.rule, action_index))
elif not self.use_copy_segment and production_rule_array.is_copy:
continue
else:
if self.use_copy_token and action_index in precedent_action:
# the index in precedent sequence
copy_token_actions.append((precedent_action.index(action_index), action_index))
# use copy token could work with global & linked
if production_rule_array.is_global:
global_actions.append((production_rule_array.tensor, action_index))
else:
linked_actions.append((production_rule_array.rule, action_index))
if copy_segment_actions:
assert len(precedent_action) > 0
assert self.use_copy_segment
if self.copy_encode_with_context:
# use the span repr
action_start_end, action_ids = zip(*copy_segment_actions)
copy_sql_encoding = []
for start_end_tup in action_start_end:
action_start, action_end = start_end_tup
copy_sql_encoding.append(get_span_representation(precedent_forward, precedent_backward,
action_start, action_end))
copy_sql_encoding = torch.stack(copy_sql_encoding, dim=0)
else:
action_rules, action_ids = zip(*copy_segment_actions)
# FIXME: we could consider encoding the segment within context
related_copy_ids = [copy_action_dict[rule].copy_ins_idx for rule in action_rules]
# construct tensor & encoder mask
related_copy_ids = torch.tensor(related_copy_ids, dtype=torch.long, device=device)
sql_encoder_mask = (related_copy_ids != self.action_padding_index).long()
# make related copy ids non-negative
related_copy_ids = torch.where(related_copy_ids > 0, related_copy_ids,
related_copy_ids.new_zeros(related_copy_ids.size()))
# construct embedding: action using sql global embedder, otherwise use sql schema embedder
copy_sql_embedding = torch.stack(
[torch.stack([copy_embedding_book[int(idx)] for idx in idx_list], dim=0)
for idx_list in related_copy_ids], dim=0)
copy_sql_encoding = self.sql_segment_encoder.forward(copy_sql_embedding, sql_encoder_mask)
# segment input
copy_sql_input = self.copy_sql_input.forward(copy_sql_encoding)
copy_sql_output = self.copy_sql_output.forward(copy_sql_encoding)
translated_valid_actions[key]['copy_seg'] = (copy_sql_input,
copy_sql_output,
list(action_ids))
if copy_token_actions:
assert len(precedent_action) > 0
assert self.use_copy_token
action_inds, action_ids = zip(*copy_token_actions)
copy_sql_encoding = []
for action_ind in action_inds:
if self.copy_encode_with_context:
copy_sql_encoding.append(precedent_encoding[action_ind])
else:
action_embedding = precedent_embedding[action_ind]
action_embedding = self.sql_embedder_transform.forward(action_embedding)
copy_sql_encoding.append(action_embedding)
copy_sql_encoding = torch.stack(copy_sql_encoding, dim=0)
# token input
copy_sql_input = self.copy_sql_input.forward(copy_sql_encoding)
copy_sql_output = self.copy_sql_output.forward(copy_sql_encoding)
translated_valid_actions[key]['copy_token'] = (copy_sql_input,
copy_sql_output,
list(action_ids))
if global_actions:
action_tensors, action_ids = zip(*global_actions)
action_tensor = torch.cat(action_tensors, dim=0).long()
# batch_size * inter_size x embedding_size
action_input_embedding = self.action_embedder.forward(action_tensor)
action_output_embedding = self.output_action_embedder.forward(action_tensor)
translated_valid_actions[key]['global'] = (action_input_embedding,
action_output_embedding,
list(action_ids))
if linked_actions:
# TODO: how to handle the embedding of *
action_rules, action_ids = zip(*linked_actions)
related_entity_ids = [action_to_entity[rule] for rule in action_rules]
# assert related entity ids does not contain -1
assert -1 not in related_entity_ids
entity_linking_scores = linking_scores[related_entity_ids]
entity_type_tensor = entity_types[related_entity_ids]
entity_type_embeddings = (self.output_entity_type_embedder(entity_type_tensor)
.to(entity_types.device)
.float())
translated_valid_actions[key]['linked'] = (entity_linking_scores,
entity_type_embeddings,
list(action_ids))
return GrammarStatelet([Statement.__name__],
translated_valid_actions,
# callback function
SparcParser.is_nonterminal)
@staticmethod
def is_nonterminal(token: str):
# nonterminal list
nonterminals = [child.__name__ for child in Action.__subclasses__()]
if token in nonterminals:
return True
else:
return False
@overrides
def get_metrics(self, reset: bool = False) -> Dict[str, float]:
action_metrics = self.action_metric.get_metric(reset)
sql_metrics = self.sql_metric.get_metric(reset)
if self.use_copy_segment or self.use_copy_token or self.use_context_gate:
gate_metrics = {key: self.gate_metrics[key].get_metric(reset) for key in self.gate_metrics.keys()}
metrics = {**action_metrics, **sql_metrics, **gate_metrics}
else:
metrics = {**action_metrics, **sql_metrics}
return metrics
@staticmethod
def _get_linking_probabilities(linking_scores: torch.Tensor,
entity_type_mat: torch.LongTensor) -> torch.FloatTensor:
"""
Produces the probability of an entity given a question word and type. The logic below
separates the entities by type since the softmax normalization term sums over entities
of a single type.
Parameters
----------
linking_scores : ``torch.FloatTensor``
Has shape (batch_size * inter_size, utt_token_size, col_size).
entity_type_mat : ``torch.LongTensor``
Has shape (batch_size * inter_size, col_size, entity_size)
Returns
-------
batch_probabilities : ``torch.FloatTensor``
Has shape ``(batch_size * inter_size, utt_token_size, entity_size)``.
Contains all the probabilities of entity types given an utterance word
"""
# normalize entity type mat into probability
entity_type_base = entity_type_mat.sum(dim=2, keepdim=True).expand_as(entity_type_mat)
# divide and get the probability, batch_size * inter_size x col_size x entity_size
entity_type_prob = entity_type_mat / entity_type_base
# bmm and get the result, batch_size * inter_size x utt_token_size x entity_size
type_linking_score = torch.bmm(linking_scores, entity_type_prob)
# normalize on entity dimension
type_linking_prob = torch.softmax(type_linking_score, dim=2)
return type_linking_prob
|
ContextualSP/semantic_parsing_in_context/models/sparc_parser.py/0
|
{
"file_path": "ContextualSP/semantic_parsing_in_context/models/sparc_parser.py",
"repo_id": "ContextualSP",
"token_count": 49779
}
| 263 |
# Copyright (c) Facebook, Inc. and Microsoft Corporation.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import copy
import logging
import os
from collections import defaultdict
from typing import Dict, List
import torch
from fairseq import search, utils
from fairseq.models.bart import BARTHubInterface, BARTModel
from omegaconf import open_dict
logger = logging.getLogger(__name__)
class GENREHubInterface(BARTHubInterface):
def sample(
self,
sentences: List[str],
beam: int = 5,
verbose: bool = False,
text_to_id=None,
marginalize=False,
marginalize_lenpen=0.5,
max_len_a=1024,
max_len_b=1024,
**kwargs,
) -> List[str]:
if isinstance(sentences, str):
return self.sample([sentences], beam=beam, verbose=verbose, **kwargs)[0]
tokenized_sentences = [self.encode(sentence) for sentence in sentences]
batched_hypos = self.generate(
tokenized_sentences,
beam,
verbose,
max_len_a=max_len_a,
max_len_b=max_len_b,
**kwargs,
)
outputs = [
[
{"text": self.decode(hypo["tokens"]), "score": hypo["score"]}
for hypo in hypos
]
for hypos in batched_hypos
]
if text_to_id:
outputs = [
[{**hypo, "id": text_to_id(hypo["text"])} for hypo in hypos]
for hypos in outputs
]
if marginalize:
for (i, hypos), hypos_tok in zip(enumerate(outputs), batched_hypos):
outputs_dict = defaultdict(list)
for hypo, hypo_tok in zip(hypos, hypos_tok):
outputs_dict[hypo["id"]].append(
{**hypo, "len": len(hypo_tok["tokens"])}
)
outputs[i] = sorted(
[
{
"id": _id,
"texts": [hypo["text"] for hypo in hypos],
"scores": torch.stack(
[hypo["score"] for hypo in hypos]
),
"score": torch.stack(
[
hypo["score"]
* hypo["len"]
/ (hypo["len"] ** marginalize_lenpen)
for hypo in hypos
]
).logsumexp(-1),
}
for _id, hypos in outputs_dict.items()
],
key=lambda x: x["score"],
reverse=True,
)
return outputs
def generate(self, *args, **kwargs) -> List[List[Dict[str, torch.Tensor]]]:
return super(BARTHubInterface, self).generate(*args, **kwargs)
def encode(self, sentence) -> torch.LongTensor:
tokens = super(BARTHubInterface, self).encode(sentence)
tokens[
tokens >= len(self.task.target_dictionary)
] = self.task.target_dictionary.unk_index
if tokens[0] != self.task.target_dictionary.bos_index:
return torch.cat(
(torch.tensor([self.task.target_dictionary.bos_index]), tokens)
)
else:
return tokens
class GENRE(BARTModel):
@classmethod
def from_pretrained(
cls,
model_name_or_path,
checkpoint_file="model.pt",
data_name_or_path=".",
bpe="gpt2",
**kwargs,
):
from fairseq import hub_utils
x = hub_utils.from_pretrained(
model_name_or_path,
checkpoint_file,
data_name_or_path,
archive_map=cls.hub_models(),
bpe=bpe,
load_checkpoint_heads=True,
**kwargs,
)
return GENREHubInterface(x["args"], x["task"], x["models"][0])
class mGENRE(BARTModel):
@classmethod
def from_pretrained(
cls,
model_name_or_path,
sentencepiece_model="sentence.bpe.model",
checkpoint_file="model.pt",
data_name_or_path=".",
bpe="sentencepiece",
layernorm_embedding=True,
**kwargs,
):
from fairseq import hub_utils
x = hub_utils.from_pretrained(
model_name_or_path,
checkpoint_file,
data_name_or_path,
archive_map=cls.hub_models(),
bpe=bpe,
load_checkpoint_heads=True,
sentencepiece_model=os.path.join(model_name_or_path, sentencepiece_model),
**kwargs,
)
return GENREHubInterface(x["args"], x["task"], x["models"][0])
|
ContextualSP/unified_parser_text_to_sql/genre/fairseq_model.py/0
|
{
"file_path": "ContextualSP/unified_parser_text_to_sql/genre/fairseq_model.py",
"repo_id": "ContextualSP",
"token_count": 2925
}
| 264 |
import os
import sqlite3
from semparse.worlds.evaluate import Evaluator, build_valid_col_units, rebuild_sql_val, rebuild_sql_col, \
build_foreign_key_map_from_json
from semparse.sql.process_sql import Schema, get_schema, get_sql
_schemas = {}
kmaps = None
def evaluate(gold, predict, db_name, db_dir, table, check_valid: bool=True, db_schema=None) -> bool:
global kmaps
gold=gold.replace("t1 . ","").replace("t2 . ",'').replace("t3 . ",'').replace("t4 . ",'').replace(" as t1",'').replace(" as t2",'').replace(" as t3",'').replace(" as t4",'')
predict=predict.replace("t1 . ","").replace("t2 . ",'').replace("t3 . ",'').replace("t4 . ",'').replace(" as t1",'').replace(" as t2",'').replace(" as t3",'').replace(" as t4",'')
# sgrammar = SpiderGrammar(
# output_from=True,
# use_table_pointer=True,
# include_literals=True,
# include_columns=True,
# )
# try:
evaluator = Evaluator()
if kmaps is None:
kmaps = build_foreign_key_map_from_json(table)
if 'chase' in db_dir:
schema = _schemas[db_name] = Schema(db_schema)
elif db_name in _schemas:
schema = _schemas[db_name]
else:
db = os.path.join(db_dir, db_name, db_name + ".sqlite")
schema = _schemas[db_name] = Schema(get_schema(db))
g_sql = get_sql(schema, gold)
# try:
p_sql = get_sql(schema, predict)
# except Exception as e:
# print('evaluate_spider.py L39')
# return False
# rebuild sql for value evaluation
kmap = kmaps[db_name]
g_valid_col_units = build_valid_col_units(g_sql['from']['table_units'], schema)
g_sql = rebuild_sql_val(g_sql)
g_sql = rebuild_sql_col(g_valid_col_units, g_sql, kmap)
p_valid_col_units = build_valid_col_units(p_sql['from']['table_units'], schema)
p_sql = rebuild_sql_val(p_sql)
p_sql = rebuild_sql_col(p_valid_col_units, p_sql, kmap)
exact_score = evaluator.eval_exact_match(p_sql, g_sql)
if not check_valid:
return exact_score
else:
return exact_score and check_valid_sql(predict, db_name, db_dir)
# except Exception as e:
# return 0
_conns = {}
def check_valid_sql(sql, db_name, db_dir, return_error=False):
return True
db = os.path.join(db_dir, db_name, db_name + ".sqlite")
if db_name == 'wta_1':
# TODO: seems like there is a problem with this dataset - slow response - add limit 1
return True if not return_error else (True, None)
if db_name not in _conns:
_conns[db_name] = sqlite3.connect(db)
# fixes an encoding bug
_conns[db_name].text_factory = bytes
conn = _conns[db_name]
cursor = conn.cursor()
try:
cursor.execute(sql)
cursor.fetchall()
return True if not return_error else (True, None)
except Exception as e:
return False if not return_error else (False, e.args[0])
|
ContextualSP/unified_parser_text_to_sql/semparse/worlds/evaluate_spider.py/0
|
{
"file_path": "ContextualSP/unified_parser_text_to_sql/semparse/worlds/evaluate_spider.py",
"repo_id": "ContextualSP",
"token_count": 1260
}
| 265 |
# AutoFormer: Searching Transformers for Visual Recognition
**This is an official implementation of AutoFormer.**
AutoFormer is new one-shot architecture search framework dedicated to vision transformer search. It entangles the weights of different vision transformer blocks in the same layers during supernet training.
Benefiting from the strategy, the trained supernet allows thousands of subnets to be very well-trained. Specifically, the performance of these subnets with weights inherited from the supernet is comparable to those retrained from scratch.
<div align="center">
<img width="49%" alt="AutoFormer overview" src="https://github.com/microsoft/AutoML/releases/download/static_files/autoformer_overview.gif"/>
<img width="49%" alt="AutoFormer detail" src="https://github.com/microsoft/AutoML/releases/download/static_files/autoformer_details.gif"/>
</div>
## Highlights
- Once-for-all
AutoFormer is a simple yet effective method to train a once-for-all vision transformer supernet.
- Competive performance
AutoFormers consistently outperform DeiTs.
## Environment Setup
To set up the enviroment you can easily run the following command:
```buildoutcfg
conda create -n Autoformer python=3.6
conda activate Autoformer
pip install -r requirements.txt
```
## Data Preparation
You need to first download the [ImageNet-2012](http://www.image-net.org/) to the folder `./data/imagenet` and move the validation set to the subfolder `./data/imagenet/val`. To move the validation set, you cloud use the following script: <https://raw.githubusercontent.com/soumith/imagenetloader.torch/master/valprep.sh>
The directory structure is the standard layout as following.
```
/path/to/imagenet/
train/
class1/
img1.jpeg
class2/
img2.jpeg
val/
class1/
img3.jpeg
class/2
img4.jpeg
```
## Model Zoo
For evaluation, we provide the checkpoints of our models in [Google Drive](https://drive.google.com/drive/folders/1HqzY3afqQUMI6pJ5_BgR2RquJU_b_3eg?usp=sharing) and [GitHub](https://github.com/silent-chen/AutoFormer-model-zoo).
After downloading the models, you can do the evaluation following the description in *Quick Start - Test*).
Model download links:
Model | Params. | Top-1 Acc. % | Top-5 Acc. % | Download link
--- |:---:|:---:|:---:|:---:
AutoFormer-T | 5.8M | 75.3 | 92.7 | [Google Drive](https://drive.google.com/file/d/1uRCW3doQHgn2H-LjyalYEZ4CvmnQtr6Q/view?usp=sharing), [GitHub](https://github.com/silent-chen/AutoFormer-model-zoo/releases/download/v1.0/supernet-tiny.pth)
AutoFormer-S | 22.9M | 81.7 | 95.7 | [Google Drive](https://drive.google.com/file/d/1JTBmLR_nW7-ZbTKafWFvSl8J2orJXiNa/view?usp=sharing), [GitHub](https://github.com/silent-chen/AutoFormer-model-zoo/releases/download/v1.0/supernet-small.pth)
AutoFormer-B | 53.7M | 82.4 | 95.7 | [Google Drive](https://drive.google.com/file/d/1KPjUshk0SbqkaTzlirjPHM9pu19N5w0e/view?usp=sharing), [GitHub](https://github.com/silent-chen/AutoFormer-model-zoo/releases/download/v1.0/supernet-base.pth)
## Quick Start
We provide *Supernet Train, Search, Test* code of AutoFormer as follows.
### Supernet Train
To train the supernet-T/S/B, we provided the corresponding supernet configuration files in `/experiments/supernet/`. For example, to train the supernet-B, you can run the following command. The default output path is `./`, you can specify the path with argument `--output`.
```buildoutcfg
python -m torch.distributed.launch --nproc_per_node=8 --use_env supernet_train.py --data-path /PATH/TO/IMAGENT --gp \
--change_qk --relative_position --mode super --dist-eval --cfg ./experiments/supernet/supernet-B.yaml --epochs 500 --warmup-epochs 20 \
--output /OUTPUT_PATH --batch-size 128
```
### Search
We run our evolution search on part of the ImageNet training dataset and use the validation set of ImageNet as the test set for fair comparison. To generate the subImagenet in `/PATH/TO/IMAGENET`, you could simply run:
```buildoutcfg
python ./lib/subImageNet.py --data-path /PATH/TO/IMAGENT
```
After obtaining the subImageNet and training of the supernet. We could perform the evolution search using below command. Please remember to config the specific constraint in this evolution search using `--min-param-limits` and `--param-limits`:
```buildoutcfg
python -m torch.distributed.launch --nproc_per_node=8 --use_env evolution.py --data-path /PATH/TO/IMAGENT --gp \
--change_qk --relative_position --dist-eval --cfg ./experiments/supernet/supernet-B.yaml --resume /PATH/TO/CHECKPOINT \
--min-param-limits YOUR/CONFIG --param-limits YOUR/CONFIG --data-set EVO_IMNET
```
### Test
To test our trained models, you need to put the downloaded model in `/PATH/TO/CHECKPOINT`. After that you could use the following command to test the model (Please change your config file and model checkpoint according to different models. Here we use the AutoFormer-B as an example).
```buildoutcfg
python -m torch.distributed.launch --nproc_per_node=8 --use_env supernet_train.py --data-path /PATH/TO/IMAGENT --gp \
--change_qk --relative_position --mode retrain --dist-eval --cfg ./experiments/subnet/AutoFormer-B.yaml --resume /PATH/TO/CHECKPOINT --eval
```
## Performance
**Left:** Top-1 accuracy on ImageNet. Our method achieves very competitive performance, being superior to the recent DeiT and ViT. **Right:** 1000 random sampled good architectures in the supernet-S. The supernet trained under our strategy allows subnets to be well optimized.
<div align="half">
<img src=".figure/performance.png" width="49%"/>
<img src=".figure/ofa.png" width="49%"/>
</div>
## Bibtex
If this repo is helpful for you, please consider to cite it. Thank you! :)
```bibtex
@InProceedings{AutoFormer,
title = {AutoFormer: Searching Transformers for Visual Recognition},
author = {Chen, Minghao and Peng, Houwen and Fu, Jianlong and Ling, Haibin},
booktitle = {Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV)},
month = {October},
year = {2021},
pages = {12270-12280}
}
```
## Acknowledgements
The codes are inspired by [HAT](https://github.com/mit-han-lab/hardware-aware-transformers), [timm](https://github.com/rwightman/pytorch-image-models), [DeiT](https://github.com/facebookresearch/deit), [SPOS](https://github.com/megvii-model/SinglePathOneShot).
|
Cream/AutoFormer/README.md/0
|
{
"file_path": "Cream/AutoFormer/README.md",
"repo_id": "Cream",
"token_count": 2067
}
| 266 |
import torch
import torch.nn as nn
import torch.nn.functional as F
from model.utils import to_2tuple
import numpy as np
class PatchembedSuper(nn.Module):
def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768, scale=False):
super(PatchembedSuper, self).__init__()
img_size = to_2tuple(img_size)
patch_size = to_2tuple(patch_size)
num_patches = (img_size[1] // patch_size[1]) * (img_size[0] // patch_size[0])
self.img_size = img_size
self.patch_size = patch_size
self.num_patches = num_patches
self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size)
self.super_embed_dim = embed_dim
self.scale = scale
# sampled_
self.sample_embed_dim = None
self.sampled_weight = None
self.sampled_bias = None
self.sampled_scale = None
def set_sample_config(self, sample_embed_dim):
self.sample_embed_dim = sample_embed_dim
self.sampled_weight = self.proj.weight[:sample_embed_dim, ...]
self.sampled_bias = self.proj.bias[:self.sample_embed_dim, ...]
if self.scale:
self.sampled_scale = self.super_embed_dim / sample_embed_dim
def forward(self, x):
B, C, H, W = x.shape
assert H == self.img_size[0] and W == self.img_size[1], \
f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]})."
x = F.conv2d(x, self.sampled_weight, self.sampled_bias, stride=self.patch_size, padding=self.proj.padding, dilation=self.proj.dilation).flatten(2).transpose(1,2)
if self.scale:
return x * self.sampled_scale
return x
def calc_sampled_param_num(self):
return self.sampled_weight.numel() + self.sampled_bias.numel()
def get_complexity(self, sequence_length):
total_flops = 0
if self.sampled_bias is not None:
total_flops += self.sampled_bias.size(0)
total_flops += sequence_length * np.prod(self.sampled_weight.size())
return total_flops
|
Cream/AutoFormer/model/module/embedding_super.py/0
|
{
"file_path": "Cream/AutoFormer/model/module/embedding_super.py",
"repo_id": "Cream",
"token_count": 948
}
| 267 |
import argparse
import datetime
import numpy as np
import time
import torch
import torch.backends.cudnn as cudnn
import json
import yaml
from pathlib import Path
from timm.data import Mixup
from timm.scheduler import create_scheduler
from timm.optim import create_optimizer
from timm.utils import NativeScaler
from lib.datasets import build_dataset
from engine import evaluate
from lib import utils
from lib.config import cfg, update_config_from_file
from model.SSS import SSSTransformer
def get_args_parser():
parser = argparse.ArgumentParser('S3 and evaluation script', add_help=False)
parser.add_argument('--batch-size', default=64, type=int)
parser.add_argument('--epochs', default=300, type=int)
# config file
parser.add_argument('--cfg',help='experiment configure file name',required=True,type=str)
# custom parameters
parser.add_argument('--platform', default='pai', type=str, choices=['itp', 'pai', 'aml'],
help='Name of model to train')
parser.add_argument('--teacher_model', default='', type=str,
help='Name of teacher model to train')
parser.add_argument('--relative_position', action='store_true')
parser.add_argument('--gp', action='store_true')
parser.add_argument('--change_qkv', action='store_true')
parser.add_argument('--max_relative_position', type=int, default=14, help='max distance in relative position embedding')
# Model parameters
parser.add_argument('--model', default='', type=str, metavar='MODEL',
help='Name of model to train')
# AutoFormer config
parser.add_argument('--mode', type=str, default='super', choices=['super', 'retrain'], help='mode of AutoFormer')
parser.add_argument('--input-size', default=224, type=int)
parser.add_argument('--patch_size', default=4, type=int)
parser.add_argument('--drop', type=float, default=0.0, metavar='PCT',
help='Dropout rate (default: 0.)')
parser.add_argument('--drop-path', type=float, default=0.1, metavar='PCT',
help='Drop path rate (default: 0.1)')
parser.add_argument('--drop-block', type=float, default=None, metavar='PCT',
help='Drop block rate (default: None)')
parser.add_argument('--model-ema', action='store_true')
parser.add_argument('--no-model-ema', action='store_false', dest='model_ema')
# parser.set_defaults(model_ema=True)
parser.add_argument('--model-ema-decay', type=float, default=0.99996, help='')
parser.add_argument('--model-ema-force-cpu', action='store_true', default=False, help='')
parser.add_argument('--rpe_type', type=str, default='bias', choices=['bias', 'direct'])
parser.add_argument('--post_norm', action='store_true')
parser.add_argument('--no_abs_pos', action='store_true')
# Optimizer parameters
parser.add_argument('--opt', default='adamw', type=str, metavar='OPTIMIZER',
help='Optimizer (default: "adamw"')
parser.add_argument('--opt-eps', default=1e-8, type=float, metavar='EPSILON',
help='Optimizer Epsilon (default: 1e-8)')
parser.add_argument('--opt-betas', default=None, type=float, nargs='+', metavar='BETA',
help='Optimizer Betas (default: None, use opt default)')
parser.add_argument('--clip-grad', type=float, default=None, metavar='NORM',
help='Clip gradient norm (default: None, no clipping)')
parser.add_argument('--momentum', type=float, default=0.9, metavar='M',
help='SGD momentum (default: 0.9)')
parser.add_argument('--weight-decay', type=float, default=0.05,
help='weight decay (default: 0.05)')
# Learning rate schedule parameters
parser.add_argument('--sched', default='cosine', type=str, metavar='SCHEDULER',
help='LR scheduler (default: "cosine"')
parser.add_argument('--lr', type=float, default=5e-4, metavar='LR',
help='learning rate (default: 5e-4)')
parser.add_argument('--lr-noise', type=float, nargs='+', default=None, metavar='pct, pct',
help='learning rate noise on/off epoch percentages')
parser.add_argument('--lr-noise-pct', type=float, default=0.67, metavar='PERCENT',
help='learning rate noise limit percent (default: 0.67)')
parser.add_argument('--lr-noise-std', type=float, default=1.0, metavar='STDDEV',
help='learning rate noise std-dev (default: 1.0)')
parser.add_argument('--warmup-lr', type=float, default=1e-6, metavar='LR',
help='warmup learning rate (default: 1e-6)')
parser.add_argument('--min-lr', type=float, default=1e-5, metavar='LR',
help='lower lr bound for cyclic schedulers that hit 0 (1e-5)')
parser.add_argument('--lr-power', type=float, default=1.0,
help='power of the polynomial lr scheduler')
parser.add_argument('--decay-epochs', type=float, default=30, metavar='N',
help='epoch interval to decay LR')
parser.add_argument('--warmup-epochs', type=int, default=5, metavar='N',
help='epochs to warmup LR, if scheduler supports')
parser.add_argument('--cooldown-epochs', type=int, default=10, metavar='N',
help='epochs to cooldown LR at min_lr, after cyclic schedule ends')
parser.add_argument('--patience-epochs', type=int, default=10, metavar='N',
help='patience epochs for Plateau LR scheduler (default: 10')
parser.add_argument('--decay-rate', '--dr', type=float, default=0.1, metavar='RATE',
help='LR decay rate (default: 0.1)')
# Augmentation parameters
parser.add_argument('--color-jitter', type=float, default=0.4, metavar='PCT',
help='Color jitter factor (default: 0.4)')
parser.add_argument('--aa', type=str, default='rand-m9-mstd0.5-inc1', metavar='NAME',
help='Use AutoAugment policy. "v0" or "original". " + \
"(default: rand-m9-mstd0.5-inc1)'),
parser.add_argument('--smoothing', type=float, default=0.1, help='Label smoothing (default: 0.1)')
parser.add_argument('--train-interpolation', type=str, default='bicubic',
help='Training interpolation (random, bilinear, bicubic default: "bicubic")')
parser.add_argument('--repeated-aug', action='store_true')
parser.add_argument('--no-repeated-aug', action='store_false', dest='repeated_aug')
parser.set_defaults(repeated_aug=True)
# * Random Erase params
parser.add_argument('--reprob', type=float, default=0.25, metavar='PCT',
help='Random erase prob (default: 0.25)')
parser.add_argument('--remode', type=str, default='pixel',
help='Random erase mode (default: "pixel")')
parser.add_argument('--recount', type=int, default=1,
help='Random erase count (default: 1)')
parser.add_argument('--resplit', action='store_true', default=False,
help='Do not random erase first (clean) augmentation split')
# * Mixup params
parser.add_argument('--mixup', type=float, default=0.8,
help='mixup alpha, mixup enabled if > 0. (default: 0.8)')
parser.add_argument('--cutmix', type=float, default=1.0,
help='cutmix alpha, cutmix enabled if > 0. (default: 1.0)')
parser.add_argument('--cutmix-minmax', type=float, nargs='+', default=None,
help='cutmix min/max ratio, overrides alpha and enables cutmix if set (default: None)')
parser.add_argument('--mixup-prob', type=float, default=1.0,
help='Probability of performing mixup or cutmix when either/both is enabled')
parser.add_argument('--mixup-switch-prob', type=float, default=0.5,
help='Probability of switching to cutmix when both mixup and cutmix enabled')
parser.add_argument('--mixup-mode', type=str, default='batch',
help='How to apply mixup/cutmix params. Per "batch", "pair", or "elem"')
# Dataset parameters
parser.add_argument('--data-path', default='./data/imagenet/', type=str,
help='dataset path')
parser.add_argument('--data-set', default='IMNET', choices=['CIFAR', 'IMNET', 'INAT', 'INAT19'],
type=str, help='Image Net dataset path')
parser.add_argument('--inat-category', default='name',
choices=['kingdom', 'phylum', 'class', 'order', 'supercategory', 'family', 'genus', 'name'],
type=str, help='semantic granularity')
parser.add_argument('--output_dir', default='./',
help='path where to save, empty for no saving')
parser.add_argument('--device', default='cuda',
help='device to use for training / testing')
parser.add_argument('--seed', default=0, type=int)
parser.add_argument('--resume', default='', help='resume from checkpoint')
parser.add_argument('--start_epoch', default=0, type=int, metavar='N',
help='start epoch')
parser.add_argument('--eval', action='store_true', help='Perform evaluation only')
parser.add_argument('--num_workers', default=10, type=int)
parser.add_argument('--dist-eval', action='store_true', default=False, help='Enabling distributed evaluation')
parser.add_argument('--pin-mem', action='store_true',
help='Pin CPU memory in DataLoader for more efficient (sometimes) transfer to GPU.')
parser.add_argument('--no-pin-mem', action='store_false', dest='pin_mem',
help='')
parser.set_defaults(pin_mem=True)
# distributed training parameters
parser.add_argument('--world_size', default=1, type=int,
help='number of distributed processes')
parser.add_argument('--dist_url', default='env://', help='url used to set up distributed training')
parser.add_argument('--amp', action='store_true')
parser.add_argument('--no-amp', action='store_false', dest='amp')
parser.set_defaults(amp=True)
return parser
def main(args):
utils.init_distributed_mode(args)
update_config_from_file(args.cfg)
print(args)
args_text = yaml.safe_dump(args.__dict__, default_flow_style=False)
device = torch.device(args.device)
# fix the seed for reproducibility
seed = args.seed + utils.get_rank()
torch.manual_seed(seed)
np.random.seed(seed)
# random.seed(seed)
cudnn.benchmark = True
dataset_train, args.nb_classes = build_dataset(is_train=True, args=args)
dataset_val, _ = build_dataset(is_train=False, args=args)
if args.distributed:
num_tasks = utils.get_world_size()
global_rank = utils.get_rank()
if args.dist_eval:
if len(dataset_val) % num_tasks != 0:
print(
'Warning: Enabling distributed evaluation with an eval dataset not divisible by process number. '
'This will slightly alter validation results as extra duplicate entries are added to achieve '
'equal num of samples per-process.')
sampler_val = torch.utils.data.DistributedSampler(
dataset_val, num_replicas=num_tasks, rank=global_rank, shuffle=False)
else:
sampler_val = torch.utils.data.SequentialSampler(dataset_val)
else:
sampler_val = torch.utils.data.SequentialSampler(dataset_val)
data_loader_val = torch.utils.data.DataLoader(
dataset_val, batch_size=int(2 * args.batch_size),
sampler=sampler_val, num_workers=args.num_workers,
pin_memory=args.pin_mem, drop_last=False
)
mixup_fn = None
mixup_active = args.mixup > 0 or args.cutmix > 0. or args.cutmix_minmax is not None
if mixup_active:
mixup_fn = Mixup(
mixup_alpha=args.mixup, cutmix_alpha=args.cutmix, cutmix_minmax=args.cutmix_minmax,
prob=args.mixup_prob, switch_prob=args.mixup_switch_prob, mode=args.mixup_mode,
label_smoothing=args.smoothing, num_classes=args.nb_classes)
print(f"Creating S3-Transformer")
model = SSSTransformer(img_size=args.input_size,
patch_size=args.patch_size,
num_classes=args.nb_classes,
embed_dim=cfg.EMBED_DIM,
depths=cfg.DEPTHS,
num_heads=cfg.NUM_HEADS,
window_size=cfg.WINDOW_SIZE,
mlp_ratio=cfg.MLP_RATIO,
qkv_bias=True,
drop_rate=args.drop,
drop_path_rate=args.drop_path,
patch_norm=True)
model.to(device)
model_ema = None
model_without_ddp = model
if args.distributed:
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu], find_unused_parameters=True)
model_without_ddp = model.module
n_parameters = sum(p.numel() for p in model.parameters() if p.requires_grad)
print('number of params:', n_parameters)
linear_scaled_lr = args.lr * args.batch_size * utils.get_world_size() / 512.0
args.lr = linear_scaled_lr
optimizer = create_optimizer(args, model_without_ddp)
loss_scaler = NativeScaler()
lr_scheduler, _ = create_scheduler(args, optimizer)
output_dir = Path(args.output_dir)
if not output_dir.exists():
output_dir.mkdir(parents=True)
# save config for later experiments
with open(output_dir / "config.yaml", 'w') as f:
f.write(args_text)
if args.resume:
if args.resume.startswith('https'):
checkpoint = torch.hub.load_state_dict_from_url(
args.resume, map_location='cpu', check_hash=True)
else:
checkpoint = torch.load(args.resume, map_location='cpu')
model_without_ddp.load_state_dict(checkpoint['model'])
if args.eval:
test_stats = evaluate(data_loader_val, model, device)
print(f"Accuracy of the network on the {len(dataset_val)} test images: {test_stats['acc1']:.1f}%")
return
if __name__ == '__main__':
parser = argparse.ArgumentParser('S3 evaluation script', parents=[get_args_parser()])
args = parser.parse_args()
if args.output_dir:
Path(args.output_dir).mkdir(parents=True, exist_ok=True)
main(args)
|
Cream/AutoFormerV2/evaluation.py/0
|
{
"file_path": "Cream/AutoFormerV2/evaluation.py",
"repo_id": "Cream",
"token_count": 6389
}
| 268 |
""" Search cell """
import _init_paths
import os
import copy
import json
import torch
import time
import math
import torch.nn as nn
import numpy as np
from tensorboardX import SummaryWriter
from lib.models.cdarts_controller import CDARTSController
from lib.utils.visualize import plot
from lib.utils import utils
from lib.core.search_function import search, retrain_warmup
from lib.config import SearchConfig
config = SearchConfig()
if 'cifar' in config.dataset:
from lib.datasets.cifar import get_search_datasets
elif 'imagenet' in config.dataset:
from lib.datasets.imagenet import get_search_datasets
# tensorboard
writer = SummaryWriter(log_dir=os.path.join(config.path, "tb"))
writer.add_text('config', config.as_markdown(), 0)
logger = utils.get_logger(os.path.join(config.path, "{}.log".format(config.name)))
if config.local_rank == 0:
config.print_params(logger.info)
try:
os.makedirs(config.retrain_path)
except:
pass
if config.use_apex:
import apex
from apex.parallel import DistributedDataParallel as DDP
else:
DDP = torch.nn.parallel.DistributedDataParallel
def main():
logger.info("Logger is set - training start")
# set seed
np.random.seed(config.seed)
torch.manual_seed(config.seed)
torch.cuda.manual_seed_all(config.seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = True
if config.distributed:
config.gpu = config.local_rank % torch.cuda.device_count()
torch.cuda.set_device(config.gpu)
# distributed init
torch.distributed.init_process_group(backend='nccl', init_method=config.dist_url,
world_size=config.world_size, rank=config.local_rank)
config.world_size = torch.distributed.get_world_size()
config.total_batch_size = config.world_size * config.batch_size
else:
config.total_batch_size = config.batch_size
loaders, samplers = get_search_datasets(config)
train_loader, valid_loader = loaders
train_sampler, valid_sampler = samplers
net_crit = nn.CrossEntropyLoss().cuda()
controller = CDARTSController(config, net_crit, n_nodes=4, stem_multiplier=config.stem_multiplier)
if config.param_pool_path is not None:
param_pool = torch.load(config.param_pool_path, map_location='cpu')
controller.load_state_dict(param_pool, strict=False)
resume_state = None
if config.resume:
resume_state = torch.load(config.resume_path, map_location='cpu')
sta_layer_idx = 0
if config.resume:
controller.load_state_dict(resume_state['controller'])
sta_layer_idx = resume_state['sta_layer_idx']
controller = controller.cuda()
if config.sync_bn:
if config.use_apex:
controller = apex.parallel.convert_syncbn_model(controller)
else:
controller = torch.nn.SyncBatchNorm.convert_sync_batchnorm(controller)
if config.use_apex:
controller = DDP(controller, delay_allreduce=True)
else:
controller = DDP(controller, device_ids=[config.gpu])
# warm up model_search
layer_idx=0
if config.ensemble_param:
w_optim = torch.optim.SGD([ {"params": controller.module.feature_extractor.parameters()},
{"params": controller.module.super_layers[layer_idx].parameters(), 'lr':config.w_lr},
{"params": controller.module.super_layers[layer_idx+1:].parameters()},
{"params": controller.module.fc_super.parameters()},
{"params": controller.module.distill_aux_head1.parameters()},
{"params": controller.module.distill_aux_head2.parameters()},
{"params": controller.module.ensemble_param},
{"params": controller.module.nas_layers[:layer_idx].parameters()}],
lr=config.w_lr, momentum=config.w_momentum, weight_decay=config.w_weight_decay)
else:
w_optim = torch.optim.SGD([ {"params": controller.module.feature_extractor.parameters()},
{"params": controller.module.super_layers[layer_idx].parameters(), 'lr':config.w_lr},
{"params": controller.module.super_layers[layer_idx+1:].parameters()},
{"params": controller.module.fc_super.parameters()},
{"params": controller.module.distill_aux_head1.parameters()},
{"params": controller.module.distill_aux_head2.parameters()},
{"params": controller.module.nas_layers[:layer_idx].parameters()}],
lr=config.w_lr, momentum=config.w_momentum, weight_decay=config.w_weight_decay)
for layer_idx in range(sta_layer_idx, config.layer_num):
if config.one_stage:
if layer_idx > 0:
break
# clean arch params in model_search
if config.clean_arch:
controller.module.init_arch_params(layer_idx)
# search training loop
best_top1 = 0.
best_genotypes = []
best_connects = []
sta_search_iter, sta_search_epoch = 0, 0
is_best = True
if (layer_idx == sta_layer_idx) and (resume_state is not None):
sta_search_iter = resume_state['sta_search_iter']
sta_search_epoch = resume_state['sta_search_epoch']
best_top1 = resume_state['best_top1']
best_genotypes = resume_state['best_genotypes']
best_connects = resume_state['best_connects']
else:
# init model main
if config.gumbel_sample:
genotype, connect = controller.module.generate_genotype_gumbel(0)
else:
genotype, connect = controller.module.generate_genotype(0)
for i in range(config.layer_num):
best_genotypes.append(genotype)
best_connects.append(connect)
for i in range(config.layer_num):
controller.module.genotypes[i] = best_genotypes[i]
controller.module.connects[i] = best_connects[i]
lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
w_optim, config.search_iter * config.search_iter_epochs, eta_min=config.w_lr_min)
lr_scheduler_retrain = nn.ModuleList()
alpha_optim = nn.ModuleList()
optimizer = nn.ModuleList()
sub_epoch = 0
for search_iter in range(sta_search_iter, config.search_iter):
if search_iter < config.pretrain_epochs:
if config.local_rank == 0:
logger.info("####### Super model warmup #######")
train_sampler.set_epoch(search_iter)
retrain_warmup(train_loader, controller, w_optim, layer_idx, search_iter, writer, logger, True, config.pretrain_epochs, config)
#lr_scheduler.step()
else:
# build new controller
for i, genotype in enumerate(best_genotypes):
controller.module.build_nas_layers(i, genotype, config.same_structure)
controller_b = copy.deepcopy(controller.module)
del controller
controller = controller_b.cuda()
controller.fix_pre_layers(layer_idx)
#if search_iter > config.regular_ratio * config.search_iter:
# config.regular = False
# sync params from super layer pool
for i in range(layer_idx, config.layer_num):
controller.copy_params_from_super_layer(i)
if config.sync_bn:
if config.use_apex:
controller = apex.parallel.convert_syncbn_model(controller)
else:
controller = torch.nn.SyncBatchNorm.convert_sync_batchnorm(controller)
if config.use_apex:
controller = DDP(controller, delay_allreduce=True)
else:
controller = DDP(controller, device_ids=[config.gpu])
# weights optimizer
if config.ensemble_param:
w_optim = torch.optim.SGD([ {"params": controller.module.feature_extractor.parameters()},
{"params": controller.module.super_layers[layer_idx].parameters(), 'lr':config.w_lr},
{"params": controller.module.super_layers[layer_idx+1:].parameters()},
{"params": controller.module.fc_super.parameters()},
{"params": controller.module.distill_aux_head1.parameters()},
{"params": controller.module.distill_aux_head2.parameters()},
{"params": controller.module.ensemble_param},
{"params": controller.module.nas_layers[:layer_idx].parameters()}],
lr=config.w_lr, momentum=config.w_momentum, weight_decay=config.w_weight_decay)
else:
w_optim = torch.optim.SGD([ {"params": controller.module.feature_extractor.parameters()},
{"params": controller.module.super_layers[layer_idx].parameters(), 'lr':config.w_lr},
{"params": controller.module.super_layers[layer_idx+1:].parameters()},
{"params": controller.module.fc_super.parameters()},
{"params": controller.module.distill_aux_head1.parameters()},
{"params": controller.module.distill_aux_head2.parameters()},
{"params": controller.module.nas_layers[:layer_idx].parameters()}],
lr=config.w_lr, momentum=config.w_momentum, weight_decay=config.w_weight_decay)
# arch_params optimizer
if config.repeat_cell:
alpha_optim = torch.optim.Adam(controller.module.super_layers_arch[0].parameters(), config.alpha_lr, betas=(0.5, 0.999),
weight_decay=config.alpha_weight_decay)
else:
alpha_optim = torch.optim.Adam(controller.module.super_layers_arch[layer_idx:].parameters(), config.alpha_lr, betas=(0.5, 0.999),
weight_decay=config.alpha_weight_decay)
if config.ensemble_param:
optimizer = torch.optim.SGD([{"params": controller.module.feature_extractor.parameters()},
{"params": controller.module.nas_layers.parameters(), 'lr':config.nasnet_lr*0.1 if config.param_pool_path else config.nasnet_lr},
{"params": controller.module.ensemble_param},
{"params": controller.module.distill_aux_head1.parameters()},
{"params": controller.module.distill_aux_head2.parameters()},
{"params": controller.module.fc_nas.parameters()}],
lr=config.nasnet_lr, momentum=config.w_momentum, weight_decay=config.w_weight_decay)
else:
optimizer = torch.optim.SGD([{"params": controller.module.feature_extractor.parameters()},
{"params": controller.module.nas_layers.parameters(), 'lr':config.nasnet_lr*0.1 if config.param_pool_path else config.nasnet_lr},
{"params": controller.module.distill_aux_head1.parameters()},
{"params": controller.module.distill_aux_head2.parameters()},
{"params": controller.module.fc_nas.parameters()}],
lr=config.nasnet_lr, momentum=config.w_momentum, weight_decay=config.w_weight_decay)
lr_scheduler_retrain = torch.optim.lr_scheduler.CosineAnnealingLR(
optimizer, config.search_iter_epochs, eta_min=config.w_lr_min)
lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
w_optim, config.search_iter * config.search_iter_epochs, eta_min=config.w_lr_min)
if (layer_idx == sta_layer_idx) and (resume_state is not None) and (resume_state['sta_search_epoch'] > config.pretrain_epochs):
w_optim.load_state_dict(resume_state['w_optim'])
alpha_optim.load_state_dict(resume_state['alpha_optim'])
lr_scheduler.load_state_dict(resume_state['lr_scheduler'])
lr_scheduler_retrain.load_state_dict(resume_state['lr_scheduler_retrain'])
else:
# lr_scheduler
pass
#for i in range(search_iter * config.search_iter_epochs):
# lr_scheduler.step()
# warmup model main
if config.local_rank == 0:
logger.info("####### Sub model warmup #######")
for warmup_epoch in range(config.nasnet_warmup):
valid_sampler.set_epoch(warmup_epoch)
retrain_warmup(valid_loader, controller, optimizer, layer_idx, warmup_epoch, writer, logger, False, config.nasnet_warmup, config)
best_top1 = 0.
sub_epoch = 0
for sub_epoch in range(sta_search_epoch, config.search_iter_epochs):
lr_search = lr_scheduler.get_lr()[0]
lr_main = lr_scheduler_retrain.get_lr()[0]
search_epoch = search_iter * config.search_iter_epochs + sub_epoch
# reset iterators
train_sampler.set_epoch(search_epoch)
valid_sampler.set_epoch(search_epoch)
# training
search(train_loader, valid_loader, controller, optimizer, w_optim, alpha_optim, layer_idx, search_epoch, writer, logger, config)
# validation
step_num = len(valid_loader)
cur_step = (search_epoch+1) * step_num
top1 = 1.
genotypes = []
connects = []
if config.gumbel_sample:
genotype, connect = controller.module.generate_genotype_gumbel(0)
else:
genotype, connect = controller.module.generate_genotype(0)
for i in range(config.layer_num):
genotypes.append(genotype)
connects.append(connect)
if config.local_rank == 0:
# for i in range(config.layer_num - layer_idx):
# logger.info ("Stage: {} Layer: {}".format(layer_idx, i+layer_idx+1))
logger.info ("Genotypes: ")
# controller.module.print_arch_params(logger, i+layer_idx)
controller.module.print_arch_params(logger, 0)
for i in range(config.layer_num - layer_idx):
if config.local_rank == 0:
# genotype
genotype = genotypes[i]
logger.info("Stage: {} Layer: {} genotype = {}".format(layer_idx, i+layer_idx+1, genotype))
# genotype as a image
plot_path = os.path.join(config.plot_path, "Stage_{}_Layer_{}_EP_{:02d}".format(layer_idx, layer_idx+i+1, search_epoch+1))
caption = "Stage_{}_Layer_{}_Epoch_{}".format(layer_idx, layer_idx+i+1, search_epoch+1)
plot(genotype.normal, plot_path + "-normal", caption)
plot(genotype.reduce, plot_path + "-reduce", caption)
# sync params to super layer pool
for i in range(layer_idx, config.layer_num):
controller.module.copy_params_from_nas_layer(i)
# save
best_top1 = top1
best_genotypes = genotypes
best_connects = connects
for i in range(config.layer_num):
controller.module.genotypes[i] = best_genotypes[i]
controller.module.connects[i] = best_connects[i]
#lr_scheduler.step()
#lr_scheduler_retrain.step()
if config.local_rank == 0:
utils.save_checkpoint(controller.module, config.path, is_best)
torch.save({
'controller': controller.module.state_dict(),
'sta_layer_idx': layer_idx,
'w_optim': w_optim.state_dict(),
'alpha_optim': alpha_optim.state_dict(),
'lr_scheduler': lr_scheduler.state_dict(),
'sta_search_iter': search_iter,
'sta_search_epoch': sub_epoch + 1,
'best_top1': best_top1,
'best_genotypes': best_genotypes,
'best_connects': best_connects,
'lr_scheduler_retrain': lr_scheduler_retrain.state_dict(),
'optimizer': optimizer.state_dict()
}, os.path.join(config.path, 'search_resume.pth.tar'))
torch.cuda.empty_cache()
sta_search_epoch = 0
# clean
del w_optim
del alpha_optim
del optimizer
torch.cuda.empty_cache()
config.pretrain_epochs = max(config.pretrain_epochs - config.pretrain_decay, 0)
# genotype as a image
for i in range(config.layer_num):
genotype, connect = controller.module.generate_genotype(i)
controller.module.genotypes[i] = genotype
controller.module.connects[i] = connect
if config.local_rank == 0:
for layer_idx, genotype in controller.module.genotypes.items():
logger.info("layer_idx : {}".format(layer_idx+1))
logger.info("genotype = {}".format(genotype))
plot_path = os.path.join(config.plot_path, "Final_Layer_{}_genotype".format(layer_idx+1))
caption = "Layer_{}".format(layer_idx+1)
plot(genotype.normal, plot_path + "-normal", caption)
plot(genotype.reduce, plot_path + "-reduce", caption)
# save dict as json
if config.local_rank == 0:
for layer_idx, genotype in controller.module.genotypes.items():
controller.module.genotypes[layer_idx] = str(genotype)
js = json.dumps(controller.module.genotypes)
file = open('genotypes.json', 'w')
file.write(js)
file.close()
if __name__ == "__main__":
sta_time = time.time()
main()
search_time = time.time() - sta_time
search_hour = math.floor(search_time / 3600)
search_min = math.floor(search_time / 60 - search_hour * 60)
if config.local_rank==0:
logger.info("Search time: hour: {} minute: {}".format(search_hour, search_min))
|
Cream/CDARTS/CDARTS/search.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS/search.py",
"repo_id": "Cream",
"token_count": 10413
}
| 269 |
import json
from .base import BaseFileHandler
class JsonHandler(BaseFileHandler):
def load_from_fileobj(self, file):
return json.load(file)
def dump_to_fileobj(self, obj, file, **kwargs):
json.dump(obj, file, **kwargs)
def dump_to_str(self, obj, **kwargs):
return json.dumps(obj, **kwargs)
|
Cream/CDARTS/CDARTS_detection/mmcv/fileio/handlers/json_handler.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_detection/mmcv/fileio/handlers/json_handler.py",
"repo_id": "Cream",
"token_count": 135
}
| 270 |
import functools
import torch
def assert_tensor_type(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
if not isinstance(args[0].data, torch.Tensor):
raise AttributeError('{} has no attribute {} for type {}'.format(
args[0].__class__.__name__, func.__name__, args[0].datatype))
return func(*args, **kwargs)
return wrapper
class DataContainer(object):
"""A container for any type of objects.
Typically tensors will be stacked in the collate function and sliced along
some dimension in the scatter function. This behavior has some limitations.
1. All tensors have to be the same size.
2. Types are limited (numpy array or Tensor).
We design `DataContainer` and `MMDataParallel` to overcome these
limitations. The behavior can be either of the following.
- copy to GPU, pad all tensors to the same size and stack them
- copy to GPU without stacking
- leave the objects as is and pass it to the model
- pad_dims specifies the number of last few dimensions to do padding
"""
def __init__(self,
data,
stack=False,
padding_value=0,
cpu_only=False,
pad_dims=2):
self._data = data
self._cpu_only = cpu_only
self._stack = stack
self._padding_value = padding_value
assert pad_dims in [None, 1, 2, 3]
self._pad_dims = pad_dims
def __repr__(self):
return '{}({})'.format(self.__class__.__name__, repr(self.data))
@property
def data(self):
return self._data
@property
def datatype(self):
if isinstance(self.data, torch.Tensor):
return self.data.type()
else:
return type(self.data)
@property
def cpu_only(self):
return self._cpu_only
@property
def stack(self):
return self._stack
@property
def padding_value(self):
return self._padding_value
@property
def pad_dims(self):
return self._pad_dims
@assert_tensor_type
def size(self, *args, **kwargs):
return self.data.size(*args, **kwargs)
@assert_tensor_type
def dim(self):
return self.data.dim()
|
Cream/CDARTS/CDARTS_detection/mmcv/parallel/data_container.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_detection/mmcv/parallel/data_container.py",
"repo_id": "Cream",
"token_count": 946
}
| 271 |
import datetime
import torch.nn.functional as F
import os
import os.path as osp
from collections import OrderedDict
import numpy as np
import torch
import torch.distributed as dist
import mmcv
from .base import LoggerHook
class TextLoggerHook(LoggerHook):
def __init__(self, interval=10, ignore_last=True, reset_flag=False):
super(TextLoggerHook, self).__init__(interval, ignore_last, reset_flag)
self.time_sec_tot = 0
def before_run(self, runner):
super(TextLoggerHook, self).before_run(runner)
self.start_iter = runner.iter
self.json_log_path = osp.join(runner.work_dir,
'{}.log.json'.format(runner.timestamp))
def _get_max_memory(self, runner):
mem = torch.cuda.max_memory_allocated()
mem_mb = torch.tensor([mem / (1024 * 1024)],
dtype=torch.int,
device=torch.device('cuda'))
if runner.world_size > 1:
dist.reduce(mem_mb, 0, op=dist.ReduceOp.MAX)
return mem_mb.item()
def _log_info(self, log_dict, runner):
if runner.mode == 'train':
log_str = 'Epoch [{}][{}/{}]\tlr: {:.5f}, '.format(
log_dict['epoch'], log_dict['iter'], len(runner.data_loader),
log_dict['lr'])
if 'time' in log_dict.keys():
self.time_sec_tot += (log_dict['time'] * self.interval)
time_sec_avg = self.time_sec_tot / (
runner.iter - self.start_iter + 1)
eta_sec = time_sec_avg * (runner.max_iters - runner.iter - 1)
eta_str = str(datetime.timedelta(seconds=int(eta_sec)))
log_str += 'eta: {}, '.format(eta_str)
log_str += ('time: {:.3f}, data_time: {:.3f}, '.format(
log_dict['time'], log_dict['data_time']))
log_str += 'memory: {}, '.format(log_dict['memory'])
else:
log_str = 'Epoch({}) [{}][{}]\t'.format(log_dict['mode'],
log_dict['epoch'] - 1,
log_dict['iter'])
log_items = []
for name, val in log_dict.items():
# TODO: resolve this hack
# these items have been in log_str
if name in [
'mode', 'Epoch', 'iter', 'lr', 'time', 'data_time',
'memory', 'epoch'
]:
continue
if isinstance(val, float):
val = '{:.4f}'.format(val)
log_items.append('{}: {}'.format(name, val))
log_str += ', '.join(log_items)
runner.logger.info(log_str)
def _dump_log(self, log_dict, runner):
# dump log in json format
json_log = OrderedDict()
for k, v in log_dict.items():
json_log[k] = self._round_float(v)
# only append log at last line
if runner.rank == 0:
with open(self.json_log_path, 'a+') as f:
mmcv.dump(json_log, f, file_format='json')
f.write('\n')
def _round_float(self, items):
if isinstance(items, list):
return [self._round_float(item) for item in items]
elif isinstance(items, float):
return round(items, 5)
else:
return items
def log(self, runner):
log_dict = OrderedDict()
# training mode if the output contains the key "time"
mode = 'train' if 'time' in runner.log_buffer.output else 'val'
log_dict['mode'] = mode
log_dict['epoch'] = runner.epoch + 1
log_dict['iter'] = runner.inner_iter + 1
# only record lr of the first param group
log_dict['lr'] = runner.current_lr()[0]
if runner.optimizer_arch is not None and (runner.rank == 0):
# os.system('df -h /dev/shm/')
detector = runner.model.module.module
# searching code can not be open sourced now.
if 'backbone' in runner.arch_name:
raise NotImplementedError
if 'neck' in runner.arch_name:
raise NotImplementedError
if 'head' in runner.arch_name:
raise NotImplementedError
if mode == 'train':
log_dict['time'] = runner.log_buffer.output['time']
log_dict['data_time'] = runner.log_buffer.output['data_time']
# statistic memory
if torch.cuda.is_available():
log_dict['memory'] = self._get_max_memory(runner)
for name, val in runner.log_buffer.output.items():
if name in ['time', 'data_time']:
continue
log_dict[name] = val
self._log_info(log_dict, runner)
self._dump_log(log_dict, runner)
if runner.rank == 0:
print()
|
Cream/CDARTS/CDARTS_detection/mmcv/runner/hooks/logger/text.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_detection/mmcv/runner/hooks/logger/text.py",
"repo_id": "Cream",
"token_count": 2496
}
| 272 |
__version__ = '0.2.12'
|
Cream/CDARTS/CDARTS_detection/mmcv/version.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_detection/mmcv/version.py",
"repo_id": "Cream",
"token_count": 12
}
| 273 |
from .base_assigner import BaseAssigner
from .max_iou_assigner import MaxIoUAssigner
from .approx_max_iou_assigner import ApproxMaxIoUAssigner
from .assign_result import AssignResult
__all__ = [
'BaseAssigner', 'MaxIoUAssigner', 'ApproxMaxIoUAssigner', 'AssignResult'
]
|
Cream/CDARTS/CDARTS_detection/mmdet/core/bbox/assigners/__init__.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_detection/mmdet/core/bbox/assigners/__init__.py",
"repo_id": "Cream",
"token_count": 111
}
| 274 |
import mmcv
import numpy as np
import torch
def bbox2delta(proposals, gt, means=[0, 0, 0, 0], stds=[1, 1, 1, 1]):
assert proposals.size() == gt.size()
proposals = proposals.float()
gt = gt.float()
px = (proposals[..., 0] + proposals[..., 2]) * 0.5
py = (proposals[..., 1] + proposals[..., 3]) * 0.5
pw = proposals[..., 2] - proposals[..., 0] + 1.0
ph = proposals[..., 3] - proposals[..., 1] + 1.0
gx = (gt[..., 0] + gt[..., 2]) * 0.5
gy = (gt[..., 1] + gt[..., 3]) * 0.5
gw = gt[..., 2] - gt[..., 0] + 1.0
gh = gt[..., 3] - gt[..., 1] + 1.0
dx = (gx - px) / pw
dy = (gy - py) / ph
dw = torch.log(gw / pw)
dh = torch.log(gh / ph)
deltas = torch.stack([dx, dy, dw, dh], dim=-1)
means = deltas.new_tensor(means).unsqueeze(0)
stds = deltas.new_tensor(stds).unsqueeze(0)
deltas = deltas.sub_(means).div_(stds)
return deltas
def delta2bbox(rois,
deltas,
means=[0, 0, 0, 0],
stds=[1, 1, 1, 1],
max_shape=None,
wh_ratio_clip=16 / 1000):
means = deltas.new_tensor(means).repeat(1, deltas.size(1) // 4)
stds = deltas.new_tensor(stds).repeat(1, deltas.size(1) // 4)
denorm_deltas = deltas * stds + means
dx = denorm_deltas[:, 0::4]
dy = denorm_deltas[:, 1::4]
dw = denorm_deltas[:, 2::4]
dh = denorm_deltas[:, 3::4]
max_ratio = np.abs(np.log(wh_ratio_clip))
dw = dw.clamp(min=-max_ratio, max=max_ratio)
dh = dh.clamp(min=-max_ratio, max=max_ratio)
px = ((rois[:, 0] + rois[:, 2]) * 0.5).unsqueeze(1).expand_as(dx)
py = ((rois[:, 1] + rois[:, 3]) * 0.5).unsqueeze(1).expand_as(dy)
pw = (rois[:, 2] - rois[:, 0] + 1.0).unsqueeze(1).expand_as(dw)
ph = (rois[:, 3] - rois[:, 1] + 1.0).unsqueeze(1).expand_as(dh)
gw = pw * dw.exp()
gh = ph * dh.exp()
gx = torch.addcmul(px, 1, pw, dx) # gx = px + pw * dx
gy = torch.addcmul(py, 1, ph, dy) # gy = py + ph * dy
x1 = gx - gw * 0.5 + 0.5
y1 = gy - gh * 0.5 + 0.5
x2 = gx + gw * 0.5 - 0.5
y2 = gy + gh * 0.5 - 0.5
if max_shape is not None:
x1 = x1.clamp(min=0, max=max_shape[1] - 1)
y1 = y1.clamp(min=0, max=max_shape[0] - 1)
x2 = x2.clamp(min=0, max=max_shape[1] - 1)
y2 = y2.clamp(min=0, max=max_shape[0] - 1)
bboxes = torch.stack([x1, y1, x2, y2], dim=-1).view_as(deltas)
return bboxes
def bbox_flip(bboxes, img_shape):
"""Flip bboxes horizontally.
Args:
bboxes(Tensor or ndarray): Shape (..., 4*k)
img_shape(tuple): Image shape.
Returns:
Same type as `bboxes`: Flipped bboxes.
"""
if isinstance(bboxes, torch.Tensor):
assert bboxes.shape[-1] % 4 == 0
flipped = bboxes.clone()
flipped[:, 0::4] = img_shape[1] - bboxes[:, 2::4] - 1
flipped[:, 2::4] = img_shape[1] - bboxes[:, 0::4] - 1
return flipped
elif isinstance(bboxes, np.ndarray):
return mmcv.bbox_flip(bboxes, img_shape)
def bbox_mapping(bboxes, img_shape, scale_factor, flip):
"""Map bboxes from the original image scale to testing scale"""
new_bboxes = bboxes * scale_factor
if flip:
new_bboxes = bbox_flip(new_bboxes, img_shape)
return new_bboxes
def bbox_mapping_back(bboxes, img_shape, scale_factor, flip):
"""Map bboxes from testing scale to original image scale"""
new_bboxes = bbox_flip(bboxes, img_shape) if flip else bboxes
new_bboxes = new_bboxes / scale_factor
return new_bboxes
def bbox2roi(bbox_list):
"""Convert a list of bboxes to roi format.
Args:
bbox_list (list[Tensor]): a list of bboxes corresponding to a batch
of images.
Returns:
Tensor: shape (n, 5), [batch_ind, x1, y1, x2, y2]
"""
rois_list = []
for img_id, bboxes in enumerate(bbox_list):
if bboxes.size(0) > 0:
img_inds = bboxes.new_full((bboxes.size(0), 1), img_id)
rois = torch.cat([img_inds, bboxes[:, :4]], dim=-1)
else:
rois = bboxes.new_zeros((0, 5))
rois_list.append(rois)
rois = torch.cat(rois_list, 0)
return rois
def roi2bbox(rois):
bbox_list = []
img_ids = torch.unique(rois[:, 0].cpu(), sorted=True)
for img_id in img_ids:
inds = (rois[:, 0] == img_id.item())
bbox = rois[inds, 1:]
bbox_list.append(bbox)
return bbox_list
def bbox2result(bboxes, labels, num_classes):
"""Convert detection results to a list of numpy arrays.
Args:
bboxes (Tensor): shape (n, 5)
labels (Tensor): shape (n, )
num_classes (int): class number, including background class
Returns:
list(ndarray): bbox results of each class
"""
if bboxes.shape[0] == 0:
return [
np.zeros((0, 5), dtype=np.float32) for i in range(num_classes - 1)
]
else:
bboxes = bboxes.cpu().numpy()
labels = labels.cpu().numpy()
return [bboxes[labels == i, :] for i in range(num_classes - 1)]
def distance2bbox(points, distance, max_shape=None):
"""Decode distance prediction to bounding box.
Args:
points (Tensor): Shape (n, 2), [x, y].
distance (Tensor): Distance from the given point to 4
boundaries (left, top, right, bottom).
max_shape (tuple): Shape of the image.
Returns:
Tensor: Decoded bboxes.
"""
x1 = points[:, 0] - distance[:, 0]
y1 = points[:, 1] - distance[:, 1]
x2 = points[:, 0] + distance[:, 2]
y2 = points[:, 1] + distance[:, 3]
if max_shape is not None:
x1 = x1.clamp(min=0, max=max_shape[1] - 1)
y1 = y1.clamp(min=0, max=max_shape[0] - 1)
x2 = x2.clamp(min=0, max=max_shape[1] - 1)
y2 = y2.clamp(min=0, max=max_shape[0] - 1)
return torch.stack([x1, y1, x2, y2], -1)
|
Cream/CDARTS/CDARTS_detection/mmdet/core/bbox/transforms.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_detection/mmdet/core/bbox/transforms.py",
"repo_id": "Cream",
"token_count": 2913
}
| 275 |
import torch
from mmdet.ops.nms import nms_wrapper
def multiclass_nms(multi_bboxes,
multi_scores,
score_thr,
nms_cfg,
max_num=-1,
score_factors=None):
"""NMS for multi-class bboxes.
Args:
multi_bboxes (Tensor): shape (n, #class*4) or (n, 4)
multi_scores (Tensor): shape (n, #class)
score_thr (float): bbox threshold, bboxes with scores lower than it
will not be considered.
nms_thr (float): NMS IoU threshold
max_num (int): if there are more than max_num bboxes after NMS,
only top max_num will be kept.
score_factors (Tensor): The factors multiplied to scores before
applying NMS
Returns:
tuple: (bboxes, labels), tensors of shape (k, 5) and (k, 1). Labels
are 0-based.
"""
num_classes = multi_scores.shape[1]
bboxes, labels = [], []
nms_cfg_ = nms_cfg.copy()
nms_type = nms_cfg_.pop('type', 'nms')
nms_op = getattr(nms_wrapper, nms_type)
for i in range(1, num_classes):
cls_inds = multi_scores[:, i] > score_thr
if not cls_inds.any():
continue
# get bboxes and scores of this class
if multi_bboxes.shape[1] == 4:
_bboxes = multi_bboxes[cls_inds, :]
else:
_bboxes = multi_bboxes[cls_inds, i * 4:(i + 1) * 4]
_scores = multi_scores[cls_inds, i]
if score_factors is not None:
_scores *= score_factors[cls_inds]
cls_dets = torch.cat([_bboxes, _scores[:, None]], dim=1)
cls_dets, _ = nms_op(cls_dets, **nms_cfg_)
cls_labels = multi_bboxes.new_full(
(cls_dets.shape[0], ), i - 1, dtype=torch.long)
bboxes.append(cls_dets)
labels.append(cls_labels)
if bboxes:
bboxes = torch.cat(bboxes)
labels = torch.cat(labels)
if bboxes.shape[0] > max_num:
_, inds = bboxes[:, -1].sort(descending=True)
inds = inds[:max_num]
bboxes = bboxes[inds]
labels = labels[inds]
else:
bboxes = multi_bboxes.new_zeros((0, 5))
labels = multi_bboxes.new_zeros((0, ), dtype=torch.long)
return bboxes, labels
|
Cream/CDARTS/CDARTS_detection/mmdet/core/post_processing/bbox_nms.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_detection/mmdet/core/post_processing/bbox_nms.py",
"repo_id": "Cream",
"token_count": 1182
}
| 276 |
from collections.abc import Sequence
import mmcv
import numpy as np
import torch
from mmcv.parallel import DataContainer as DC
from ..registry import PIPELINES
def to_tensor(data):
"""Convert objects of various python types to :obj:`torch.Tensor`.
Supported types are: :class:`numpy.ndarray`, :class:`torch.Tensor`,
:class:`Sequence`, :class:`int` and :class:`float`.
"""
if isinstance(data, torch.Tensor):
return data
elif isinstance(data, np.ndarray):
return torch.from_numpy(data)
elif isinstance(data, Sequence) and not mmcv.is_str(data):
return torch.tensor(data)
elif isinstance(data, int):
return torch.LongTensor([data])
elif isinstance(data, float):
return torch.FloatTensor([data])
else:
raise TypeError('type {} cannot be converted to tensor.'.format(
type(data)))
@PIPELINES.register_module
class ToTensor(object):
def __init__(self, keys):
self.keys = keys
def __call__(self, results):
for key in self.keys:
results[key] = to_tensor(results[key])
return results
def __repr__(self):
return self.__class__.__name__ + '(keys={})'.format(self.keys)
@PIPELINES.register_module
class ImageToTensor(object):
def __init__(self, keys):
self.keys = keys
def __call__(self, results):
for key in self.keys:
results[key] = to_tensor(results[key].transpose(2, 0, 1))
return results
def __repr__(self):
return self.__class__.__name__ + '(keys={})'.format(self.keys)
@PIPELINES.register_module
class Transpose(object):
def __init__(self, keys, order):
self.keys = keys
self.order = order
def __call__(self, results):
for key in self.keys:
results[key] = results[key].transpose(self.order)
return results
def __repr__(self):
return self.__class__.__name__ + '(keys={}, order={})'.format(
self.keys, self.order)
@PIPELINES.register_module
class ToDataContainer(object):
def __init__(self,
fields=(dict(key='img', stack=True), dict(key='gt_bboxes'),
dict(key='gt_labels'))):
self.fields = fields
def __call__(self, results):
for field in self.fields:
field = field.copy()
key = field.pop('key')
results[key] = DC(results[key], **field)
return results
def __repr__(self):
return self.__class__.__name__ + '(fields={})'.format(self.fields)
@PIPELINES.register_module
class DefaultFormatBundle(object):
"""Default formatting bundle.
It simplifies the pipeline of formatting common fields, including "img",
"proposals", "gt_bboxes", "gt_labels", "gt_masks" and "gt_semantic_seg".
These fields are formatted as follows.
- img: (1)transpose, (2)to tensor, (3)to DataContainer (stack=True)
- proposals: (1)to tensor, (2)to DataContainer
- gt_bboxes: (1)to tensor, (2)to DataContainer
- gt_bboxes_ignore: (1)to tensor, (2)to DataContainer
- gt_labels: (1)to tensor, (2)to DataContainer
- gt_masks: (1)to tensor, (2)to DataContainer (cpu_only=True)
- gt_semantic_seg: (1)unsqueeze dim-0 (2)to tensor,
(3)to DataContainer (stack=True)
"""
def __call__(self, results):
if 'img' in results:
img = np.ascontiguousarray(results['img'].transpose(2, 0, 1))
results['img'] = DC(to_tensor(img), stack=True)
for key in ['proposals', 'gt_bboxes', 'gt_bboxes_ignore', 'gt_labels']:
if key not in results:
continue
results[key] = DC(to_tensor(results[key]))
if 'gt_masks' in results:
results['gt_masks'] = DC(results['gt_masks'], cpu_only=True)
if 'gt_semantic_seg' in results:
results['gt_semantic_seg'] = DC(
to_tensor(results['gt_semantic_seg'][None, ...]), stack=True)
return results
def __repr__(self):
return self.__class__.__name__
@PIPELINES.register_module
class Collect(object):
"""
Collect data from the loader relevant to the specific task.
This is usually the last stage of the data loader pipeline. Typically keys
is set to some subset of "img", "proposals", "gt_bboxes",
"gt_bboxes_ignore", "gt_labels", and/or "gt_masks".
The "img_meta" item is always populated. The contents of the "img_meta"
dictionary depends on "meta_keys". By default this includes:
- "img_shape": shape of the image input to the network as a tuple
(h, w, c). Note that images may be zero padded on the bottom/right
if the batch tensor is larger than this shape.
- "scale_factor": a float indicating the preprocessing scale
- "flip": a boolean indicating if image flip transform was used
- "filename": path to the image file
- "ori_shape": original shape of the image as a tuple (h, w, c)
- "pad_shape": image shape after padding
- "img_norm_cfg": a dict of normalization information:
- mean - per channel mean subtraction
- std - per channel std divisor
- to_rgb - bool indicating if bgr was converted to rgb
"""
def __init__(self,
keys,
meta_keys=('filename', 'ori_shape', 'img_shape', 'pad_shape',
'scale_factor', 'flip', 'img_norm_cfg')):
self.keys = keys
self.meta_keys = meta_keys
def __call__(self, results):
data = {}
img_meta = {}
for key in self.meta_keys:
img_meta[key] = results[key]
data['img_meta'] = DC(img_meta, cpu_only=True)
for key in self.keys:
data[key] = results[key]
return data
def __repr__(self):
return self.__class__.__name__ + '(keys={}, meta_keys={})'.format(
self.keys, self.meta_keys)
|
Cream/CDARTS/CDARTS_detection/mmdet/datasets/pipelines/formating.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_detection/mmdet/datasets/pipelines/formating.py",
"repo_id": "Cream",
"token_count": 2590
}
| 277 |
from __future__ import division
import numpy as np
import torch
import torch.nn as nn
from mmcv.cnn import normal_init
from mmdet.core import (AnchorGenerator, anchor_target, anchor_inside_flags,
ga_loc_target, ga_shape_target, delta2bbox,
multi_apply, multiclass_nms, force_fp32)
from mmdet.ops import DeformConv, MaskedConv2d
from ..builder import build_loss
from .anchor_head import AnchorHead
from ..registry import HEADS
from ..utils import bias_init_with_prob
class FeatureAdaption(nn.Module):
"""Feature Adaption Module.
Feature Adaption Module is implemented based on DCN v1.
It uses anchor shape prediction rather than feature map to
predict offsets of deformable conv layer.
Args:
in_channels (int): Number of channels in the input feature map.
out_channels (int): Number of channels in the output feature map.
kernel_size (int): Deformable conv kernel size.
deformable_groups (int): Deformable conv group size.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size=3,
deformable_groups=4):
super(FeatureAdaption, self).__init__()
offset_channels = kernel_size * kernel_size * 2
self.conv_offset = nn.Conv2d(
2, deformable_groups * offset_channels, 1, bias=False)
self.conv_adaption = DeformConv(
in_channels,
out_channels,
kernel_size=kernel_size,
padding=(kernel_size - 1) // 2,
deformable_groups=deformable_groups)
self.relu = nn.ReLU(inplace=True)
def init_weights(self):
normal_init(self.conv_offset, std=0.1)
normal_init(self.conv_adaption, std=0.01)
def forward(self, x, shape):
offset = self.conv_offset(shape.detach())
x = self.relu(self.conv_adaption(x, offset))
return x
@HEADS.register_module
class GuidedAnchorHead(AnchorHead):
"""Guided-Anchor-based head (GA-RPN, GA-RetinaNet, etc.).
This GuidedAnchorHead will predict high-quality feature guided
anchors and locations where anchors will be kept in inference.
There are mainly 3 categories of bounding-boxes.
- Sampled (9) pairs for target assignment. (approxes)
- The square boxes where the predicted anchors are based on.
(squares)
- Guided anchors.
Please refer to https://arxiv.org/abs/1901.03278 for more details.
Args:
num_classes (int): Number of classes.
in_channels (int): Number of channels in the input feature map.
feat_channels (int): Number of channels of the feature map.
octave_base_scale (int): Base octave scale of each level of
feature map.
scales_per_octave (int): Number of octave scales in each level of
feature map
octave_ratios (Iterable): octave aspect ratios.
anchor_strides (Iterable): Anchor strides.
anchor_base_sizes (Iterable): Anchor base sizes.
anchoring_means (Iterable): Mean values of anchoring targets.
anchoring_stds (Iterable): Std values of anchoring targets.
target_means (Iterable): Mean values of regression targets.
target_stds (Iterable): Std values of regression targets.
deformable_groups: (int): Group number of DCN in
FeatureAdaption module.
loc_filter_thr (float): Threshold to filter out unconcerned regions.
loss_loc (dict): Config of location loss.
loss_shape (dict): Config of anchor shape loss.
loss_cls (dict): Config of classification loss.
loss_bbox (dict): Config of bbox regression loss.
"""
def __init__(
self,
num_classes,
in_channels,
feat_channels=256,
octave_base_scale=8,
scales_per_octave=3,
octave_ratios=[0.5, 1.0, 2.0],
anchor_strides=[4, 8, 16, 32, 64],
anchor_base_sizes=None,
anchoring_means=(.0, .0, .0, .0),
anchoring_stds=(1.0, 1.0, 1.0, 1.0),
target_means=(.0, .0, .0, .0),
target_stds=(1.0, 1.0, 1.0, 1.0),
deformable_groups=4,
loc_filter_thr=0.01,
loss_loc=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_shape=dict(type='BoundedIoULoss', beta=0.2, loss_weight=1.0),
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)):
super(AnchorHead, self).__init__()
self.in_channels = in_channels
self.num_classes = num_classes
self.feat_channels = feat_channels
self.octave_base_scale = octave_base_scale
self.scales_per_octave = scales_per_octave
self.octave_scales = octave_base_scale * np.array(
[2**(i / scales_per_octave) for i in range(scales_per_octave)])
self.approxs_per_octave = len(self.octave_scales) * len(octave_ratios)
self.octave_ratios = octave_ratios
self.anchor_strides = anchor_strides
self.anchor_base_sizes = list(
anchor_strides) if anchor_base_sizes is None else anchor_base_sizes
self.anchoring_means = anchoring_means
self.anchoring_stds = anchoring_stds
self.target_means = target_means
self.target_stds = target_stds
self.deformable_groups = deformable_groups
self.loc_filter_thr = loc_filter_thr
self.approx_generators = []
self.square_generators = []
for anchor_base in self.anchor_base_sizes:
# Generators for approxs
self.approx_generators.append(
AnchorGenerator(anchor_base, self.octave_scales,
self.octave_ratios))
# Generators for squares
self.square_generators.append(
AnchorGenerator(anchor_base, [self.octave_base_scale], [1.0]))
# one anchor per location
self.num_anchors = 1
self.use_sigmoid_cls = loss_cls.get('use_sigmoid', False)
self.cls_focal_loss = loss_cls['type'] in ['FocalLoss']
self.loc_focal_loss = loss_loc['type'] in ['FocalLoss']
if self.use_sigmoid_cls:
self.cls_out_channels = self.num_classes - 1
else:
self.cls_out_channels = self.num_classes
# build losses
self.loss_loc = build_loss(loss_loc)
self.loss_shape = build_loss(loss_shape)
self.loss_cls = build_loss(loss_cls)
self.loss_bbox = build_loss(loss_bbox)
self.fp16_enabled = False
self._init_layers()
def _init_layers(self):
self.relu = nn.ReLU(inplace=True)
self.conv_loc = nn.Conv2d(self.feat_channels, 1, 1)
self.conv_shape = nn.Conv2d(self.feat_channels, self.num_anchors * 2,
1)
self.feature_adaption = FeatureAdaption(
self.feat_channels,
self.feat_channels,
kernel_size=3,
deformable_groups=self.deformable_groups)
self.conv_cls = MaskedConv2d(self.feat_channels,
self.num_anchors * self.cls_out_channels,
1)
self.conv_reg = MaskedConv2d(self.feat_channels, self.num_anchors * 4,
1)
def init_weights(self):
normal_init(self.conv_cls, std=0.01)
normal_init(self.conv_reg, std=0.01)
bias_cls = bias_init_with_prob(0.01)
normal_init(self.conv_loc, std=0.01, bias=bias_cls)
normal_init(self.conv_shape, std=0.01)
self.feature_adaption.init_weights()
def forward_single(self, x):
loc_pred = self.conv_loc(x)
shape_pred = self.conv_shape(x)
x = self.feature_adaption(x, shape_pred)
# masked conv is only used during inference for speed-up
if not self.training:
mask = loc_pred.sigmoid()[0] >= self.loc_filter_thr
else:
mask = None
cls_score = self.conv_cls(x, mask)
bbox_pred = self.conv_reg(x, mask)
return cls_score, bbox_pred, shape_pred, loc_pred
def forward(self, feats):
return multi_apply(self.forward_single, feats)
def get_sampled_approxs(self, featmap_sizes, img_metas, cfg):
"""Get sampled approxs and inside flags according to feature map sizes.
Args:
featmap_sizes (list[tuple]): Multi-level feature map sizes.
img_metas (list[dict]): Image meta info.
Returns:
tuple: approxes of each image, inside flags of each image
"""
num_imgs = len(img_metas)
num_levels = len(featmap_sizes)
# since feature map sizes of all images are the same, we only compute
# approxes for one time
multi_level_approxs = []
for i in range(num_levels):
approxs = self.approx_generators[i].grid_anchors(
featmap_sizes[i], self.anchor_strides[i])
multi_level_approxs.append(approxs)
approxs_list = [multi_level_approxs for _ in range(num_imgs)]
# for each image, we compute inside flags of multi level approxes
inside_flag_list = []
for img_id, img_meta in enumerate(img_metas):
multi_level_flags = []
multi_level_approxs = approxs_list[img_id]
for i in range(num_levels):
approxs = multi_level_approxs[i]
anchor_stride = self.anchor_strides[i]
feat_h, feat_w = featmap_sizes[i]
h, w, _ = img_meta['pad_shape']
valid_feat_h = min(int(np.ceil(h / anchor_stride)), feat_h)
valid_feat_w = min(int(np.ceil(w / anchor_stride)), feat_w)
flags = self.approx_generators[i].valid_flags(
(feat_h, feat_w), (valid_feat_h, valid_feat_w))
inside_flags_list = []
for i in range(self.approxs_per_octave):
split_valid_flags = flags[i::self.approxs_per_octave]
split_approxs = approxs[i::self.approxs_per_octave, :]
inside_flags = anchor_inside_flags(
split_approxs, split_valid_flags,
img_meta['img_shape'][:2], cfg.allowed_border)
inside_flags_list.append(inside_flags)
# inside_flag for a position is true if any anchor in this
# position is true
inside_flags = (
torch.stack(inside_flags_list, 0).sum(dim=0) > 0)
multi_level_flags.append(inside_flags)
inside_flag_list.append(multi_level_flags)
return approxs_list, inside_flag_list
def get_anchors(self,
featmap_sizes,
shape_preds,
loc_preds,
img_metas,
use_loc_filter=False):
"""Get squares according to feature map sizes and guided
anchors.
Args:
featmap_sizes (list[tuple]): Multi-level feature map sizes.
shape_preds (list[tensor]): Multi-level shape predictions.
loc_preds (list[tensor]): Multi-level location predictions.
img_metas (list[dict]): Image meta info.
use_loc_filter (bool): Use loc filter or not.
Returns:
tuple: square approxs of each image, guided anchors of each image,
loc masks of each image
"""
num_imgs = len(img_metas)
num_levels = len(featmap_sizes)
# since feature map sizes of all images are the same, we only compute
# squares for one time
multi_level_squares = []
for i in range(num_levels):
squares = self.square_generators[i].grid_anchors(
featmap_sizes[i], self.anchor_strides[i])
multi_level_squares.append(squares)
squares_list = [multi_level_squares for _ in range(num_imgs)]
# for each image, we compute multi level guided anchors
guided_anchors_list = []
loc_mask_list = []
for img_id, img_meta in enumerate(img_metas):
multi_level_guided_anchors = []
multi_level_loc_mask = []
for i in range(num_levels):
squares = squares_list[img_id][i]
shape_pred = shape_preds[i][img_id]
loc_pred = loc_preds[i][img_id]
guided_anchors, loc_mask = self.get_guided_anchors_single(
squares,
shape_pred,
loc_pred,
use_loc_filter=use_loc_filter)
multi_level_guided_anchors.append(guided_anchors)
multi_level_loc_mask.append(loc_mask)
guided_anchors_list.append(multi_level_guided_anchors)
loc_mask_list.append(multi_level_loc_mask)
return squares_list, guided_anchors_list, loc_mask_list
def get_guided_anchors_single(self,
squares,
shape_pred,
loc_pred,
use_loc_filter=False):
"""Get guided anchors and loc masks for a single level.
Args:
square (tensor): Squares of a single level.
shape_pred (tensor): Shape predections of a single level.
loc_pred (tensor): Loc predections of a single level.
use_loc_filter (list[tensor]): Use loc filter or not.
Returns:
tuple: guided anchors, location masks
"""
# calculate location filtering mask
loc_pred = loc_pred.sigmoid().detach()
if use_loc_filter:
loc_mask = loc_pred >= self.loc_filter_thr
else:
loc_mask = loc_pred >= 0.0
mask = loc_mask.permute(1, 2, 0).expand(-1, -1, self.num_anchors)
mask = mask.contiguous().view(-1)
# calculate guided anchors
squares = squares[mask]
anchor_deltas = shape_pred.permute(1, 2, 0).contiguous().view(
-1, 2).detach()[mask]
bbox_deltas = anchor_deltas.new_full(squares.size(), 0)
bbox_deltas[:, 2:] = anchor_deltas
guided_anchors = delta2bbox(
squares,
bbox_deltas,
self.anchoring_means,
self.anchoring_stds,
wh_ratio_clip=1e-6)
return guided_anchors, mask
def loss_shape_single(self, shape_pred, bbox_anchors, bbox_gts,
anchor_weights, anchor_total_num):
shape_pred = shape_pred.permute(0, 2, 3, 1).contiguous().view(-1, 2)
bbox_anchors = bbox_anchors.contiguous().view(-1, 4)
bbox_gts = bbox_gts.contiguous().view(-1, 4)
anchor_weights = anchor_weights.contiguous().view(-1, 4)
bbox_deltas = bbox_anchors.new_full(bbox_anchors.size(), 0)
bbox_deltas[:, 2:] += shape_pred
# filter out negative samples to speed-up weighted_bounded_iou_loss
inds = torch.nonzero(anchor_weights[:, 0] > 0).squeeze(1)
bbox_deltas_ = bbox_deltas[inds]
bbox_anchors_ = bbox_anchors[inds]
bbox_gts_ = bbox_gts[inds]
anchor_weights_ = anchor_weights[inds]
pred_anchors_ = delta2bbox(
bbox_anchors_,
bbox_deltas_,
self.anchoring_means,
self.anchoring_stds,
wh_ratio_clip=1e-6)
loss_shape = self.loss_shape(
pred_anchors_,
bbox_gts_,
anchor_weights_,
avg_factor=anchor_total_num)
return loss_shape
def loss_loc_single(self, loc_pred, loc_target, loc_weight, loc_avg_factor,
cfg):
loss_loc = self.loss_loc(
loc_pred.reshape(-1, 1),
loc_target.reshape(-1, 1).long(),
loc_weight.reshape(-1, 1),
avg_factor=loc_avg_factor)
return loss_loc
@force_fp32(
apply_to=('cls_scores', 'bbox_preds', 'shape_preds', 'loc_preds'))
def loss(self,
cls_scores,
bbox_preds,
shape_preds,
loc_preds,
gt_bboxes,
gt_labels,
img_metas,
cfg,
gt_bboxes_ignore=None):
featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]
assert len(featmap_sizes) == len(self.approx_generators)
# get loc targets
loc_targets, loc_weights, loc_avg_factor = ga_loc_target(
gt_bboxes,
featmap_sizes,
self.octave_base_scale,
self.anchor_strides,
center_ratio=cfg.center_ratio,
ignore_ratio=cfg.ignore_ratio)
# get sampled approxes
approxs_list, inside_flag_list = self.get_sampled_approxs(
featmap_sizes, img_metas, cfg)
# get squares and guided anchors
squares_list, guided_anchors_list, _ = self.get_anchors(
featmap_sizes, shape_preds, loc_preds, img_metas)
# get shape targets
sampling = False if not hasattr(cfg, 'ga_sampler') else True
shape_targets = ga_shape_target(
approxs_list,
inside_flag_list,
squares_list,
gt_bboxes,
img_metas,
self.approxs_per_octave,
cfg,
sampling=sampling)
if shape_targets is None:
return None
(bbox_anchors_list, bbox_gts_list, anchor_weights_list, anchor_fg_num,
anchor_bg_num) = shape_targets
anchor_total_num = (
anchor_fg_num if not sampling else anchor_fg_num + anchor_bg_num)
# get anchor targets
sampling = False if self.cls_focal_loss else True
label_channels = self.cls_out_channels if self.use_sigmoid_cls else 1
cls_reg_targets = anchor_target(
guided_anchors_list,
inside_flag_list,
gt_bboxes,
img_metas,
self.target_means,
self.target_stds,
cfg,
gt_bboxes_ignore_list=gt_bboxes_ignore,
gt_labels_list=gt_labels,
label_channels=label_channels,
sampling=sampling)
if cls_reg_targets is None:
return None
(labels_list, label_weights_list, bbox_targets_list, bbox_weights_list,
num_total_pos, num_total_neg) = cls_reg_targets
num_total_samples = (
num_total_pos if self.cls_focal_loss else num_total_pos +
num_total_neg)
# get classification and bbox regression losses
losses_cls, losses_bbox = multi_apply(
self.loss_single,
cls_scores,
bbox_preds,
labels_list,
label_weights_list,
bbox_targets_list,
bbox_weights_list,
num_total_samples=num_total_samples,
cfg=cfg)
# get anchor location loss
losses_loc = []
for i in range(len(loc_preds)):
loss_loc = self.loss_loc_single(
loc_preds[i],
loc_targets[i],
loc_weights[i],
loc_avg_factor=loc_avg_factor,
cfg=cfg)
losses_loc.append(loss_loc)
# get anchor shape loss
losses_shape = []
for i in range(len(shape_preds)):
loss_shape = self.loss_shape_single(
shape_preds[i],
bbox_anchors_list[i],
bbox_gts_list[i],
anchor_weights_list[i],
anchor_total_num=anchor_total_num)
losses_shape.append(loss_shape)
return dict(
loss_cls=losses_cls,
loss_bbox=losses_bbox,
loss_shape=losses_shape,
loss_loc=losses_loc)
@force_fp32(
apply_to=('cls_scores', 'bbox_preds', 'shape_preds', 'loc_preds'))
def get_bboxes(self,
cls_scores,
bbox_preds,
shape_preds,
loc_preds,
img_metas,
cfg,
rescale=False):
assert len(cls_scores) == len(bbox_preds) == len(shape_preds) == len(
loc_preds)
num_levels = len(cls_scores)
featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]
# get guided anchors
_, guided_anchors, loc_masks = self.get_anchors(
featmap_sizes,
shape_preds,
loc_preds,
img_metas,
use_loc_filter=not self.training)
result_list = []
for img_id in range(len(img_metas)):
cls_score_list = [
cls_scores[i][img_id].detach() for i in range(num_levels)
]
bbox_pred_list = [
bbox_preds[i][img_id].detach() for i in range(num_levels)
]
guided_anchor_list = [
guided_anchors[img_id][i].detach() for i in range(num_levels)
]
loc_mask_list = [
loc_masks[img_id][i].detach() for i in range(num_levels)
]
img_shape = img_metas[img_id]['img_shape']
scale_factor = img_metas[img_id]['scale_factor']
proposals = self.get_bboxes_single(cls_score_list, bbox_pred_list,
guided_anchor_list,
loc_mask_list, img_shape,
scale_factor, cfg, rescale)
result_list.append(proposals)
return result_list
def get_bboxes_single(self,
cls_scores,
bbox_preds,
mlvl_anchors,
mlvl_masks,
img_shape,
scale_factor,
cfg,
rescale=False):
assert len(cls_scores) == len(bbox_preds) == len(mlvl_anchors)
mlvl_bboxes = []
mlvl_scores = []
for cls_score, bbox_pred, anchors, mask in zip(cls_scores, bbox_preds,
mlvl_anchors,
mlvl_masks):
assert cls_score.size()[-2:] == bbox_pred.size()[-2:]
# if no location is kept, end.
if mask.sum() == 0:
continue
# reshape scores and bbox_pred
cls_score = cls_score.permute(1, 2,
0).reshape(-1, self.cls_out_channels)
if self.use_sigmoid_cls:
scores = cls_score.sigmoid()
else:
scores = cls_score.softmax(-1)
bbox_pred = bbox_pred.permute(1, 2, 0).reshape(-1, 4)
# filter scores, bbox_pred w.r.t. mask.
# anchors are filtered in get_anchors() beforehand.
scores = scores[mask, :]
bbox_pred = bbox_pred[mask, :]
if scores.dim() == 0:
anchors = anchors.unsqueeze(0)
scores = scores.unsqueeze(0)
bbox_pred = bbox_pred.unsqueeze(0)
# filter anchors, bbox_pred, scores w.r.t. scores
nms_pre = cfg.get('nms_pre', -1)
if nms_pre > 0 and scores.shape[0] > nms_pre:
if self.use_sigmoid_cls:
max_scores, _ = scores.max(dim=1)
else:
max_scores, _ = scores[:, 1:].max(dim=1)
_, topk_inds = max_scores.topk(nms_pre)
anchors = anchors[topk_inds, :]
bbox_pred = bbox_pred[topk_inds, :]
scores = scores[topk_inds, :]
bboxes = delta2bbox(anchors, bbox_pred, self.target_means,
self.target_stds, img_shape)
mlvl_bboxes.append(bboxes)
mlvl_scores.append(scores)
mlvl_bboxes = torch.cat(mlvl_bboxes)
if rescale:
mlvl_bboxes /= mlvl_bboxes.new_tensor(scale_factor)
mlvl_scores = torch.cat(mlvl_scores)
if self.use_sigmoid_cls:
padding = mlvl_scores.new_zeros(mlvl_scores.shape[0], 1)
mlvl_scores = torch.cat([padding, mlvl_scores], dim=1)
# multi class NMS
det_bboxes, det_labels = multiclass_nms(mlvl_bboxes, mlvl_scores,
cfg.score_thr, cfg.nms,
cfg.max_per_img)
return det_bboxes, det_labels
|
Cream/CDARTS/CDARTS_detection/mmdet/models/anchor_heads/guided_anchor_head.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_detection/mmdet/models/anchor_heads/guided_anchor_head.py",
"repo_id": "Cream",
"token_count": 13135
}
| 278 |
import logging
import torch
import torch.nn as nn
import torch.utils.checkpoint as cp
from torch.nn.modules.batchnorm import _BatchNorm
from mmcv.cnn import constant_init, kaiming_init
from .utils import load_checkpoint
from ..registry import BACKBONES
norm_cfg = {
'BN': nn.BatchNorm2d,
'SyncBN': nn.SyncBatchNorm,
'GN': nn.GroupNorm,
}
_norm = 'BN'
norm_layer = norm_cfg[_norm]
class InvertedResidual(nn.Module):
def __init__(self, inp, oup, stride, expand_ratio, kernel_size=3, rf_series=1, rf_sd=1, rf_bn=True, rf_relu=True):
super(InvertedResidual, self).__init__()
self.stride = stride
assert stride in [1, 2]
hidden_dim = round(inp * expand_ratio)
self.use_res_connect = self.stride == 1 and inp == oup
if expand_ratio == 1:
self.conv = nn.Sequential(
# dw
nn.Conv2d(hidden_dim, hidden_dim, kernel_size, stride, 1, groups=hidden_dim, bias=False),
norm_layer(hidden_dim),
nn.ReLU6(inplace=True),
# pw-linear
nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False),
norm_layer(oup),
)
else:
self.conv = []
# pw
self.conv.append(nn.Conv2d(inp, hidden_dim, 1, 1, 0, bias=False))
self.conv.append(norm_layer(hidden_dim))
self.conv.append(nn.ReLU6(inplace=True))
# dw
for idx in range(rf_series):
self.conv.append(nn.Conv2d(hidden_dim, hidden_dim, kernel_size, stride,
padding=int((kernel_size-1)*(idx+1)/2),
dilation=idx+1, groups=hidden_dim, bias=False))
if rf_bn:
self.conv.append(norm_layer(hidden_dim))
if rf_relu:
self.conv.append(nn.ReLU6(inplace=True))
if not rf_bn:
self.conv.append(norm_layer(hidden_dim))
if not rf_relu:
self.conv.append(nn.ReLU6(inplace=True))
# pw-linear
self.conv.append(nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False))
self.conv.append(norm_layer(oup))
self.conv = nn.Sequential(*self.conv)
def forward(self, x):
if self.use_res_connect:
return x + self.conv(x)
else:
return self.conv(x)
@BACKBONES.register_module
class MobileNetV2(nn.Module):
def __init__(self,
width_mult=1.,
input_channel=32,
last_channel = 1280,
kernel_size=3,
out_indices=(2, 5, 12, 17),
style='pytorch',
frozen_stages=-1,
conv_cfg=None,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True):
super(MobileNetV2, self).__init__()
block = InvertedResidual
input_channel = int(input_channel * width_mult)
last_channel = int(last_channel * width_mult) if width_mult > 1.0 else last_channel
interverted_residual_setting = [
# t, c, n, s
[1, 16, 1, 1], # 112x112 0
[6, 24, 2, 2], # 56x56 2
[6, 32, 3, 2], # 28x28 5
[6, 64, 4, 2], # 14x14 9
[6, 96, 3, 1], # 14x14 12
[6, 160, 3, 2], # 7x7 15
[6, 320, 1, 1], # 7x7 16
]
self.kernel_size=kernel_size
self.out_indices = out_indices
self.style = style
self.frozen_stages = frozen_stages
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.norm_eval = norm_eval
self.mv2_layer = []
features = []
features.append(
nn.Sequential(
nn.Conv2d(3, input_channel, 3, stride=2, padding=1, bias=False),
norm_layer(input_channel),
nn.ReLU6(inplace=True)
)
)
# building inverted residual blocks
for t, c, n, s in interverted_residual_setting:
output_channel = int(c * width_mult)
for i in range(n):
if i == 0:
features.append(block(input_channel, output_channel, s, expand_ratio=t,
kernel_size=3))
else:
features.append(block(input_channel, output_channel, 1, expand_ratio=t,
kernel_size=kernel_size))
input_channel = output_channel
features.append(
nn.Sequential(
nn.Conv2d(input_channel, last_channel, 1, 1, 0, bias=False),
norm_layer(last_channel),
nn.ReLU6(inplace=True)
)
)
for i, module in enumerate(features):
layer_name = 'features{}'.format(i)
self.add_module(layer_name, module)
self.mv2_layer.append(layer_name)
for m in self.modules():
if isinstance(m, nn.SyncBatchNorm):
m._specify_ddp_gpu_num(1)
self._freeze_stages()
@property
def norm1(self):
return getattr(self, self.norm1_name)
def _freeze_stages(self):
if self.frozen_stages >= 0:
self.norm1.eval()
for m in [self.conv1, self.norm1]:
for param in m.parameters():
param.requires_grad = False
for i in range(1, self.frozen_stages + 1):
m = getattr(self, 'layer{}'.format(i))
m.eval()
for param in m.parameters():
param.requires_grad = False
def init_weights(self, pretrained=None):
if isinstance(pretrained, str):
logger = logging.getLogger()
load_checkpoint(self, pretrained, strict=False, logger=logger)
elif pretrained is None:
for m in self.modules():
if isinstance(m, nn.Conv2d):
kaiming_init(m)
elif isinstance(m, (_BatchNorm, nn.GroupNorm)):
constant_init(m, 1)
else:
raise TypeError('pretrained must be a str or None')
def forward(self, x):
outs = []
for i, layer_name in enumerate(self.mv2_layer):
layer = getattr(self, layer_name)
x = layer(x)
if i in self.out_indices:
outs.append(x)
return tuple(outs)
'''
def train(self, mode=True):
super(MobileNetV2, self).train(mode)
self._freeze_stages()
if mode and self.norm_eval:
for m in self.modules():
# trick: eval have effect on BatchNorm only
if isinstance(m, _BatchNorm):
m.eval()
'''
|
Cream/CDARTS/CDARTS_detection/mmdet/models/backbones/mobilenetv2.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_detection/mmdet/models/backbones/mobilenetv2.py",
"repo_id": "Cream",
"token_count": 3779
}
| 279 |
import logging
from abc import ABCMeta, abstractmethod
import mmcv
import numpy as np
import torch.nn as nn
import pycocotools.mask as maskUtils
from mmdet.core import tensor2imgs, get_classes, auto_fp16
class BaseDetector(nn.Module):
"""Base class for detectors"""
__metaclass__ = ABCMeta
def __init__(self):
super(BaseDetector, self).__init__()
self.fp16_enabled = False
@property
def with_neck(self):
return hasattr(self, 'neck') and self.neck is not None
@property
def with_shared_head(self):
return hasattr(self, 'shared_head') and self.shared_head is not None
@property
def with_bbox(self):
return hasattr(self, 'bbox_head') and self.bbox_head is not None
@property
def with_mask(self):
return hasattr(self, 'mask_head') and self.mask_head is not None
@abstractmethod
def extract_feat(self, imgs):
pass
def extract_feats(self, imgs):
assert isinstance(imgs, list)
for img in imgs:
yield self.extract_feat(img)
@abstractmethod
def forward_train(self, imgs, img_metas, **kwargs):
"""
Args:
img (list[Tensor]): list of tensors of shape (1, C, H, W).
Typically these should be mean centered and std scaled.
img_metas (list[dict]): list of image info dict where each dict
has:
'img_shape', 'scale_factor', 'flip', and my also contain
'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
For details on the values of these keys see
`mmdet/datasets/pipelines/formatting.py:Collect`.
**kwargs: specific to concrete implementation
"""
pass
@abstractmethod
def simple_test(self, img, img_meta, **kwargs):
pass
@abstractmethod
def aug_test(self, imgs, img_metas, **kwargs):
pass
def init_weights(self, pretrained=None):
if pretrained is not None:
logger = logging.getLogger()
logger.info('load model from: {}'.format(pretrained))
def forward_test(self, imgs, img_metas, **kwargs):
"""
Args:
imgs (List[Tensor]): the outer list indicates test-time
augmentations and inner Tensor should have a shape NxCxHxW,
which contains all images in the batch.
img_meta (List[List[dict]]): the outer list indicates test-time
augs (multiscale, flip, etc.) and the inner list indicates
images in a batch
"""
for var, name in [(imgs, 'imgs'), (img_metas, 'img_metas')]:
if not isinstance(var, list):
raise TypeError('{} must be a list, but got {}'.format(
name, type(var)))
num_augs = len(imgs)
if num_augs != len(img_metas):
raise ValueError(
'num of augmentations ({}) != num of image meta ({})'.format(
len(imgs), len(img_metas)))
# TODO: remove the restriction of imgs_per_gpu == 1 when prepared
imgs_per_gpu = imgs[0].size(0)
assert imgs_per_gpu == 1
if num_augs == 1:
return self.simple_test(imgs[0], img_metas[0], **kwargs)
else:
return self.aug_test(imgs, img_metas, **kwargs)
@auto_fp16(apply_to=('img', ))
def forward(self, img, img_meta, return_loss=True, **kwargs):
"""
Calls either forward_train or forward_test depending on whether
return_loss=True. Note this setting will change the expected inputs.
When `return_loss=False`, img and img_meta are single-nested (i.e.
Tensor and List[dict]), and when `resturn_loss=True`, img and img_meta
should be double nested (i.e. List[Tensor], List[List[dict]]), with
the outer list indicating test time augmentations.
"""
if return_loss:
return self.forward_train(img, img_meta, **kwargs)
else:
return self.forward_test(img, img_meta, **kwargs)
def show_result(self,
data,
result,
img_norm_cfg,
dataset=None,
score_thr=0.5,
out_file=None):
if isinstance(result, tuple):
bbox_result, segm_result = result
else:
bbox_result, segm_result = result, None
img_tensor = data['img'][0]
img_metas = data['img_meta'][0].data[0]
imgs = tensor2imgs(img_tensor, **img_norm_cfg)
assert len(imgs) == len(img_metas)
if dataset is None:
class_names = self.CLASSES
elif isinstance(dataset, str):
class_names = get_classes(dataset)
elif isinstance(dataset, (list, tuple)):
class_names = dataset
else:
raise TypeError(
'dataset must be a valid dataset name or a sequence'
' of class names, not {}'.format(type(dataset)))
for img, img_meta in zip(imgs, img_metas):
h, w, _ = img_meta['img_shape']
img_show = img[:h, :w, :]
bboxes = np.vstack(bbox_result)
# draw segmentation masks
if segm_result is not None:
segms = mmcv.concat_list(segm_result)
inds = np.where(bboxes[:, -1] > score_thr)[0]
for i in inds:
color_mask = np.random.randint(
0, 256, (1, 3), dtype=np.uint8)
mask = maskUtils.decode(segms[i]).astype(np.bool)
img_show[mask] = img_show[mask] * 0.5 + color_mask * 0.5
# draw bounding boxes
labels = [
np.full(bbox.shape[0], i, dtype=np.int32)
for i, bbox in enumerate(bbox_result)
]
labels = np.concatenate(labels)
mmcv.imshow_det_bboxes(
img_show,
bboxes,
labels,
class_names=class_names,
score_thr=score_thr,
show=out_file is None,
out_file=out_file)
|
Cream/CDARTS/CDARTS_detection/mmdet/models/detectors/base.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_detection/mmdet/models/detectors/base.py",
"repo_id": "Cream",
"token_count": 3061
}
| 280 |
import torch.nn as nn
def accuracy(pred, target, topk=1):
assert isinstance(topk, (int, tuple))
if isinstance(topk, int):
topk = (topk, )
return_single = True
else:
return_single = False
maxk = max(topk)
_, pred_label = pred.topk(maxk, dim=1)
pred_label = pred_label.t()
correct = pred_label.eq(target.view(1, -1).expand_as(pred_label))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / pred.size(0)))
return res[0] if return_single else res
class Accuracy(nn.Module):
def __init__(self, topk=(1, )):
super().__init__()
self.topk = topk
def forward(self, pred, target):
return accuracy(pred, target, self.topk)
|
Cream/CDARTS/CDARTS_detection/mmdet/models/losses/accuracy.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_detection/mmdet/models/losses/accuracy.py",
"repo_id": "Cream",
"token_count": 359
}
| 281 |
from .res_layer import ResLayer
__all__ = ['ResLayer']
|
Cream/CDARTS/CDARTS_detection/mmdet/models/shared_heads/__init__.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_detection/mmdet/models/shared_heads/__init__.py",
"repo_id": "Cream",
"token_count": 19
}
| 282 |
from torch import nn
from ..functions.deform_pool import deform_roi_pooling
class DeformRoIPooling(nn.Module):
def __init__(self,
spatial_scale,
out_size,
out_channels,
no_trans,
group_size=1,
part_size=None,
sample_per_part=4,
trans_std=.0):
super(DeformRoIPooling, self).__init__()
self.spatial_scale = spatial_scale
self.out_size = out_size
self.out_channels = out_channels
self.no_trans = no_trans
self.group_size = group_size
self.part_size = out_size if part_size is None else part_size
self.sample_per_part = sample_per_part
self.trans_std = trans_std
def forward(self, data, rois, offset):
if self.no_trans:
offset = data.new_empty(0)
return deform_roi_pooling(
data, rois, offset, self.spatial_scale, self.out_size,
self.out_channels, self.no_trans, self.group_size, self.part_size,
self.sample_per_part, self.trans_std)
class DeformRoIPoolingPack(DeformRoIPooling):
def __init__(self,
spatial_scale,
out_size,
out_channels,
no_trans,
group_size=1,
part_size=None,
sample_per_part=4,
trans_std=.0,
num_offset_fcs=3,
deform_fc_channels=1024):
super(DeformRoIPoolingPack,
self).__init__(spatial_scale, out_size, out_channels, no_trans,
group_size, part_size, sample_per_part, trans_std)
self.num_offset_fcs = num_offset_fcs
self.deform_fc_channels = deform_fc_channels
if not no_trans:
seq = []
ic = self.out_size * self.out_size * self.out_channels
for i in range(self.num_offset_fcs):
if i < self.num_offset_fcs - 1:
oc = self.deform_fc_channels
else:
oc = self.out_size * self.out_size * 2
seq.append(nn.Linear(ic, oc))
ic = oc
if i < self.num_offset_fcs - 1:
seq.append(nn.ReLU(inplace=True))
self.offset_fc = nn.Sequential(*seq)
self.offset_fc[-1].weight.data.zero_()
self.offset_fc[-1].bias.data.zero_()
def forward(self, data, rois):
assert data.size(1) == self.out_channels
if self.no_trans:
offset = data.new_empty(0)
return deform_roi_pooling(
data, rois, offset, self.spatial_scale, self.out_size,
self.out_channels, self.no_trans, self.group_size,
self.part_size, self.sample_per_part, self.trans_std)
else:
n = rois.shape[0]
offset = data.new_empty(0)
x = deform_roi_pooling(data, rois, offset, self.spatial_scale,
self.out_size, self.out_channels, True,
self.group_size, self.part_size,
self.sample_per_part, self.trans_std)
offset = self.offset_fc(x.view(n, -1))
offset = offset.view(n, 2, self.out_size, self.out_size)
return deform_roi_pooling(
data, rois, offset, self.spatial_scale, self.out_size,
self.out_channels, self.no_trans, self.group_size,
self.part_size, self.sample_per_part, self.trans_std)
class ModulatedDeformRoIPoolingPack(DeformRoIPooling):
def __init__(self,
spatial_scale,
out_size,
out_channels,
no_trans,
group_size=1,
part_size=None,
sample_per_part=4,
trans_std=.0,
num_offset_fcs=3,
num_mask_fcs=2,
deform_fc_channels=1024):
super(ModulatedDeformRoIPoolingPack, self).__init__(
spatial_scale, out_size, out_channels, no_trans, group_size,
part_size, sample_per_part, trans_std)
self.num_offset_fcs = num_offset_fcs
self.num_mask_fcs = num_mask_fcs
self.deform_fc_channels = deform_fc_channels
if not no_trans:
offset_fc_seq = []
ic = self.out_size * self.out_size * self.out_channels
for i in range(self.num_offset_fcs):
if i < self.num_offset_fcs - 1:
oc = self.deform_fc_channels
else:
oc = self.out_size * self.out_size * 2
offset_fc_seq.append(nn.Linear(ic, oc))
ic = oc
if i < self.num_offset_fcs - 1:
offset_fc_seq.append(nn.ReLU(inplace=True))
self.offset_fc = nn.Sequential(*offset_fc_seq)
self.offset_fc[-1].weight.data.zero_()
self.offset_fc[-1].bias.data.zero_()
mask_fc_seq = []
ic = self.out_size * self.out_size * self.out_channels
for i in range(self.num_mask_fcs):
if i < self.num_mask_fcs - 1:
oc = self.deform_fc_channels
else:
oc = self.out_size * self.out_size
mask_fc_seq.append(nn.Linear(ic, oc))
ic = oc
if i < self.num_mask_fcs - 1:
mask_fc_seq.append(nn.ReLU(inplace=True))
else:
mask_fc_seq.append(nn.Sigmoid())
self.mask_fc = nn.Sequential(*mask_fc_seq)
self.mask_fc[-2].weight.data.zero_()
self.mask_fc[-2].bias.data.zero_()
def forward(self, data, rois):
assert data.size(1) == self.out_channels
if self.no_trans:
offset = data.new_empty(0)
return deform_roi_pooling(
data, rois, offset, self.spatial_scale, self.out_size,
self.out_channels, self.no_trans, self.group_size,
self.part_size, self.sample_per_part, self.trans_std)
else:
n = rois.shape[0]
offset = data.new_empty(0)
x = deform_roi_pooling(data, rois, offset, self.spatial_scale,
self.out_size, self.out_channels, True,
self.group_size, self.part_size,
self.sample_per_part, self.trans_std)
offset = self.offset_fc(x.view(n, -1))
offset = offset.view(n, 2, self.out_size, self.out_size)
mask = self.mask_fc(x.view(n, -1))
mask = mask.view(n, 1, self.out_size, self.out_size)
return deform_roi_pooling(
data, rois, offset, self.spatial_scale, self.out_size,
self.out_channels, self.no_trans, self.group_size,
self.part_size, self.sample_per_part, self.trans_std) * mask
|
Cream/CDARTS/CDARTS_detection/mmdet/ops/dcn/modules/deform_pool.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_detection/mmdet/ops/dcn/modules/deform_pool.py",
"repo_id": "Cream",
"token_count": 4013
}
| 283 |
from .nms_wrapper import nms, soft_nms
__all__ = ['nms', 'soft_nms']
|
Cream/CDARTS/CDARTS_detection/mmdet/ops/nms/__init__.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_detection/mmdet/ops/nms/__init__.py",
"repo_id": "Cream",
"token_count": 31
}
| 284 |
#include <torch/extension.h>
#include <cmath>
#include <vector>
int ROIAlignForwardLaucher(const at::Tensor features, const at::Tensor rois,
const float spatial_scale, const int sample_num,
const int channels, const int height,
const int width, const int num_rois,
const int pooled_height, const int pooled_width,
at::Tensor output);
int ROIAlignBackwardLaucher(const at::Tensor top_grad, const at::Tensor rois,
const float spatial_scale, const int sample_num,
const int channels, const int height,
const int width, const int num_rois,
const int pooled_height, const int pooled_width,
at::Tensor bottom_grad);
#define CHECK_CUDA(x) TORCH_CHECK(x.type().is_cuda(), #x, " must be a CUDAtensor ")
#define CHECK_CONTIGUOUS(x) \
TORCH_CHECK(x.is_contiguous(), #x, " must be contiguous ")
#define CHECK_INPUT(x) \
CHECK_CUDA(x); \
CHECK_CONTIGUOUS(x)
int roi_align_forward_cuda(at::Tensor features, at::Tensor rois,
int pooled_height, int pooled_width,
float spatial_scale, int sample_num,
at::Tensor output) {
CHECK_INPUT(features);
CHECK_INPUT(rois);
CHECK_INPUT(output);
// Number of ROIs
int num_rois = rois.size(0);
int size_rois = rois.size(1);
if (size_rois != 5) {
printf("wrong roi size\n");
return 0;
}
int num_channels = features.size(1);
int data_height = features.size(2);
int data_width = features.size(3);
ROIAlignForwardLaucher(features, rois, spatial_scale, sample_num,
num_channels, data_height, data_width, num_rois,
pooled_height, pooled_width, output);
return 1;
}
int roi_align_backward_cuda(at::Tensor top_grad, at::Tensor rois,
int pooled_height, int pooled_width,
float spatial_scale, int sample_num,
at::Tensor bottom_grad) {
CHECK_INPUT(top_grad);
CHECK_INPUT(rois);
CHECK_INPUT(bottom_grad);
// Number of ROIs
int num_rois = rois.size(0);
int size_rois = rois.size(1);
if (size_rois != 5) {
printf("wrong roi size\n");
return 0;
}
int num_channels = bottom_grad.size(1);
int data_height = bottom_grad.size(2);
int data_width = bottom_grad.size(3);
ROIAlignBackwardLaucher(top_grad, rois, spatial_scale, sample_num,
num_channels, data_height, data_width, num_rois,
pooled_height, pooled_width, bottom_grad);
return 1;
}
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
m.def("forward", &roi_align_forward_cuda, "Roi_Align forward (CUDA)");
m.def("backward", &roi_align_backward_cuda, "Roi_Align backward (CUDA)");
}
|
Cream/CDARTS/CDARTS_detection/mmdet/ops/roi_align/src/roi_align_cuda.cpp/0
|
{
"file_path": "Cream/CDARTS/CDARTS_detection/mmdet/ops/roi_align/src/roi_align_cuda.cpp",
"repo_id": "Cream",
"token_count": 1461
}
| 285 |
from setuptools import setup
from torch.utils.cpp_extension import BuildExtension, CUDAExtension
setup(
name='SigmoidFocalLoss',
ext_modules=[
CUDAExtension('sigmoid_focal_loss_cuda', [
'src/sigmoid_focal_loss.cpp',
'src/sigmoid_focal_loss_cuda.cu',
]),
],
cmdclass={'build_ext': BuildExtension})
|
Cream/CDARTS/CDARTS_detection/mmdet/ops/sigmoid_focal_loss/setup.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_detection/mmdet/ops/sigmoid_focal_loss/setup.py",
"repo_id": "Cream",
"token_count": 164
}
| 286 |
import argparse
import os.path as osp
import xml.etree.ElementTree as ET
import mmcv
import numpy as np
from mmdet.core import voc_classes
label_ids = {name: i + 1 for i, name in enumerate(voc_classes())}
def parse_xml(args):
xml_path, img_path = args
tree = ET.parse(xml_path)
root = tree.getroot()
size = root.find('size')
w = int(size.find('width').text)
h = int(size.find('height').text)
bboxes = []
labels = []
bboxes_ignore = []
labels_ignore = []
for obj in root.findall('object'):
name = obj.find('name').text
label = label_ids[name]
difficult = int(obj.find('difficult').text)
bnd_box = obj.find('bndbox')
bbox = [
int(bnd_box.find('xmin').text),
int(bnd_box.find('ymin').text),
int(bnd_box.find('xmax').text),
int(bnd_box.find('ymax').text)
]
if difficult:
bboxes_ignore.append(bbox)
labels_ignore.append(label)
else:
bboxes.append(bbox)
labels.append(label)
if not bboxes:
bboxes = np.zeros((0, 4))
labels = np.zeros((0, ))
else:
bboxes = np.array(bboxes, ndmin=2) - 1
labels = np.array(labels)
if not bboxes_ignore:
bboxes_ignore = np.zeros((0, 4))
labels_ignore = np.zeros((0, ))
else:
bboxes_ignore = np.array(bboxes_ignore, ndmin=2) - 1
labels_ignore = np.array(labels_ignore)
annotation = {
'filename': img_path,
'width': w,
'height': h,
'ann': {
'bboxes': bboxes.astype(np.float32),
'labels': labels.astype(np.int64),
'bboxes_ignore': bboxes_ignore.astype(np.float32),
'labels_ignore': labels_ignore.astype(np.int64)
}
}
return annotation
def cvt_annotations(devkit_path, years, split, out_file):
if not isinstance(years, list):
years = [years]
annotations = []
for year in years:
filelist = osp.join(devkit_path, 'VOC{}/ImageSets/Main/{}.txt'.format(
year, split))
if not osp.isfile(filelist):
print('filelist does not exist: {}, skip voc{} {}'.format(
filelist, year, split))
return
img_names = mmcv.list_from_file(filelist)
xml_paths = [
osp.join(devkit_path, 'VOC{}/Annotations/{}.xml'.format(
year, img_name)) for img_name in img_names
]
img_paths = [
'VOC{}/JPEGImages/{}.jpg'.format(year, img_name)
for img_name in img_names
]
part_annotations = mmcv.track_progress(parse_xml,
list(zip(xml_paths, img_paths)))
annotations.extend(part_annotations)
mmcv.dump(annotations, out_file)
return annotations
def parse_args():
parser = argparse.ArgumentParser(
description='Convert PASCAL VOC annotations to mmdetection format')
parser.add_argument('devkit_path', help='pascal voc devkit path')
parser.add_argument('-o', '--out-dir', help='output path')
args = parser.parse_args()
return args
def main():
args = parse_args()
devkit_path = args.devkit_path
out_dir = args.out_dir if args.out_dir else devkit_path
mmcv.mkdir_or_exist(out_dir)
years = []
if osp.isdir(osp.join(devkit_path, 'VOC2007')):
years.append('2007')
if osp.isdir(osp.join(devkit_path, 'VOC2012')):
years.append('2012')
if '2007' in years and '2012' in years:
years.append(['2007', '2012'])
if not years:
raise IOError('The devkit path {} contains neither "VOC2007" nor '
'"VOC2012" subfolder'.format(devkit_path))
for year in years:
if year == '2007':
prefix = 'voc07'
elif year == '2012':
prefix = 'voc12'
elif year == ['2007', '2012']:
prefix = 'voc0712'
for split in ['train', 'val', 'trainval']:
dataset_name = prefix + '_' + split
print('processing {} ...'.format(dataset_name))
cvt_annotations(devkit_path, year, split,
osp.join(out_dir, dataset_name + '.pkl'))
if not isinstance(year, list):
dataset_name = prefix + '_test'
print('processing {} ...'.format(dataset_name))
cvt_annotations(devkit_path, year, 'test',
osp.join(out_dir, dataset_name + '.pkl'))
print('Done!')
if __name__ == '__main__':
main()
|
Cream/CDARTS/CDARTS_detection/tools/convert_datasets/pascal_voc.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_detection/tools/convert_datasets/pascal_voc.py",
"repo_id": "Cream",
"token_count": 2230
}
| 287 |
import os
import numpy as np
from PIL import Image
from torch.utils import data
from dataloaders import custom_transforms as tr
def twoTrainSeg(args, root):
images_base = os.path.join(root, 'leftImg8bit', 'train')
train_files = [os.path.join(looproot, filename) for looproot, _, filenames in os.walk(images_base)
for filename in filenames if filename.endswith('.png')]
number_images = len(train_files)
permuted_indices_ls = np.random.permutation(number_images)
indices_1 = permuted_indices_ls[: int(0.5 * number_images) + 1]
indices_2 = permuted_indices_ls[int(0.5 * number_images):]
if len(indices_1) % 2 != 0 or len(indices_2) % 2 != 0:
raise Exception('indices lists need to be even numbers for batch norm')
return CityscapesSegmentation(args, split='train', indices_for_split=indices_1), CityscapesSegmentation(args,
split='train',
indices_for_split=indices_2)
class CityscapesSegmentation(data.Dataset):
NUM_CLASSES = 19
CLASSES = [
'road', 'sidewalk', 'building', 'wall', 'fence', 'pole', 'traffic light',
'traffic sign', 'vegetation', 'terrain', 'sky', 'person', 'rider', 'car',
'truck', 'bus', 'train', 'motorcycle', 'bicycle'
]
def __init__(self, args, root, split="train", indices_for_split=None):
self.root = root
self.split = split
self.args = args
self.files = {}
self.mean = (0.485, 0.456, 0.406)
self.std = (0.229, 0.224, 0.225)
self.crop = self.args.crop_size
if split.startswith('re'):
self.images_base = os.path.join(self.root, 'leftImg8bit', self.split[2:])
self.annotations_base = os.path.join(self.root, 'gtFine', self.split[2:])
else:
self.images_base = os.path.join(self.root, 'leftImg8bit', self.split)
self.annotations_base = os.path.join(self.root, 'gtFine', self.split)
self.files[split] = self.recursive_glob(rootdir=self.images_base, suffix='.png')
if indices_for_split is not None:
self.files[split] = np.array(self.files[split])[indices_for_split].tolist()
self.void_classes = [0, 1, 2, 3, 4, 5, 6, 9, 10, 14, 15, 16, 18, 29, 30, -1]
self.valid_classes = [7, 8, 11, 12, 13, 17, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 31, 32, 33]
self.class_names = ['unlabelled', 'road', 'sidewalk', 'building', 'wall', 'fence',
'pole', 'traffic_light', 'traffic_sign', 'vegetation', 'terrain',
'sky', 'person', 'rider', 'car', 'truck', 'bus', 'train',
'motorcycle', 'bicycle']
self.ignore_index = 255
self.class_map = dict(zip(self.valid_classes, range(self.NUM_CLASSES)))
if not self.files[split]:
raise Exception("No files for split=[%s] found in %s" % (split, self.images_base))
print("Found %d %s images" % (len(self.files[split]), split))
self.transform = self.get_transform()
def __len__(self):
return len(self.files[self.split])
def __getitem__(self, index):
img_path = self.files[self.split][index].rstrip()
lbl_path = os.path.join(self.annotations_base,
img_path.split(os.sep)[-2],
os.path.basename(img_path)[:-15] + 'gtFine_labelIds.png')
_img = Image.open(img_path).convert('RGB')
_tmp = np.array(Image.open(lbl_path), dtype=np.uint8)
_tmp = self.encode_segmap(_tmp)
_target = Image.fromarray(_tmp)
sample = {'image': _img, 'label': _target}
return self.transform(sample)
def encode_segmap(self, mask):
# Put all void classes to zero
for _voidc in self.void_classes:
mask[mask == _voidc] = self.ignore_index
for _validc in self.valid_classes:
mask[mask == _validc] = self.class_map[_validc]
return mask
def recursive_glob(self, rootdir='.', suffix=''):
"""Performs recursive glob with given suffix and rootdir
:param rootdir is the root directory
:param suffix is the suffix to be searched
"""
return [os.path.join(looproot, filename)
for looproot, _, filenames in os.walk(rootdir)
for filename in filenames if filename.endswith(suffix)]
def get_transform(self):
if self.split == 'train':
return tr.transform_tr(self.args, self.mean, self.std)
elif self.split == 'val':
return tr.transform_val(self.args, self.mean, self.std)
elif self.split == 'test':
return tr.transform_ts(self.args, self.mean, self.std)
elif self.split == 'retrain':
return tr.transform_retr(self.args, self.mean, self.std)
elif self.split == 'reval':
return tr.transform_reval(self.args, self.mean, self.std)
if __name__ == '__main__':
from dataloaders.dataloader_utils import decode_segmap
from torch.utils.data import DataLoader
import matplotlib.pyplot as plt
import argparse
parser = argparse.ArgumentParser()
args = parser.parse_args()
args.resize = 513
args.base_size = 513
args.crop_size = 513
cityscapes_train = CityscapesSegmentation(args, split='retrain')
dataloader = DataLoader(cityscapes_train, batch_size=2, shuffle=True, num_workers=2)
for ii, sample in enumerate(dataloader):
for jj in range(sample["image"].size()[0]):
img = sample['image'].numpy()
gt = sample['label'].numpy()
tmp = np.array(gt[jj]).astype(np.uint8)
segmap = decode_segmap(tmp, dataset='cityscapes')
img_tmp = np.transpose(img[jj], axes=[1, 2, 0])
img_tmp *= (0.229, 0.224, 0.225)
img_tmp += (0.485, 0.456, 0.406)
img_tmp *= 255.0
img_tmp = img_tmp.astype(np.uint8)
plt.figure()
plt.title('display')
plt.subplot(211)
plt.imshow(img_tmp)
plt.subplot(212)
plt.imshow(segmap)
if ii == 1:
break
plt.show(block=True)
|
Cream/CDARTS/CDARTS_segmentation/dataloaders/datasets/cityscapes.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_segmentation/dataloaders/datasets/cityscapes.py",
"repo_id": "Cream",
"token_count": 3075
}
| 288 |
# ------------------------------------------------------------------------------
# Data augmentation following DeepLab
# (https://github.com/tensorflow/models/blob/master/research/deeplab/input_preprocess.py#L28).
# Written by Bowen Cheng ([email protected])
# ------------------------------------------------------------------------------
import random
import cv2
import numpy as np
from torchvision.transforms import functional as F
class Compose(object):
"""
Composes a sequence of transforms.
Arguments:
transforms: A list of transforms.
"""
def __init__(self, transforms):
self.transforms = transforms
def __call__(self, image, label):
for t in self.transforms:
image, label = t(image, label)
return image, label
def __repr__(self):
format_string = self.__class__.__name__ + "("
for t in self.transforms:
format_string += "\n"
format_string += " {0}".format(t)
format_string += "\n)"
return format_string
class ToTensor(object):
"""
Converts image to torch Tensor.
"""
def __call__(self, image, label):
return F.to_tensor(image), label
class Normalize(object):
"""
Normalizes image by mean and std.
"""
def __init__(self, mean, std):
self.mean = mean
self.std = std
def __call__(self, image, label):
image = F.normalize(image, mean=self.mean, std=self.std)
return image, label
class RandomScale(object):
"""
Applies random scale augmentation.
Arguments:
min_scale: Minimum scale value.
max_scale: Maximum scale value.
scale_step_size: The step size from minimum to maximum value.
"""
def __init__(self, min_scale, max_scale, scale_step_size):
self.min_scale = min_scale
self.max_scale = max_scale
self.scale_step_size = scale_step_size
@staticmethod
def get_random_scale(min_scale_factor, max_scale_factor, step_size):
"""Gets a random scale value.
Args:
min_scale_factor: Minimum scale value.
max_scale_factor: Maximum scale value.
step_size: The step size from minimum to maximum value.
Returns:
A random scale value selected between minimum and maximum value.
Raises:
ValueError: min_scale_factor has unexpected value.
"""
if min_scale_factor < 0 or min_scale_factor > max_scale_factor:
raise ValueError('Unexpected value of min_scale_factor.')
if min_scale_factor == max_scale_factor:
return min_scale_factor
# When step_size = 0, we sample the value uniformly from [min, max).
if step_size == 0:
return random.uniform(min_scale_factor, max_scale_factor)
# When step_size != 0, we randomly select one discrete value from [min, max].
num_steps = int((max_scale_factor - min_scale_factor) / step_size + 1)
scale_factors = np.linspace(min_scale_factor, max_scale_factor, num_steps)
np.random.shuffle(scale_factors)
return scale_factors[0]
def __call__(self, image, label):
f_scale = self.get_random_scale(self.min_scale, self.max_scale, self.scale_step_size)
# TODO: cv2 uses align_corner=False
# TODO: use fvcore (https://github.com/facebookresearch/fvcore/blob/master/fvcore/transforms/transform.py#L377)
image_dtype = image.dtype
label_dtype = label.dtype
image = cv2.resize(image.astype(np.float), None, fx=f_scale, fy=f_scale, interpolation=cv2.INTER_LINEAR)
label = cv2.resize(label.astype(np.float), None, fx=f_scale, fy=f_scale, interpolation=cv2.INTER_NEAREST)
return image.astype(image_dtype), label.astype(label_dtype)
class RandomCrop(object):
"""
Applies random crop augmentation.
Arguments:
crop_h: Integer, crop height size.
crop_w: Integer, crop width size.
pad_value: Tuple, pad value for image, length 3.
ignore_label: Tuple, pad value for label, length could be 1 (semantic) or 3 (panoptic).
random_pad: Bool, when crop size larger than image size, whether to randomly pad four boundaries,
or put image to top-left and only pad bottom and right boundaries.
"""
def __init__(self, crop_h, crop_w, pad_value, ignore_label, random_pad):
self.crop_h = crop_h
self.crop_w = crop_w
self.pad_value = pad_value
self.ignore_label = ignore_label
self.random_pad = random_pad
def __call__(self, image, label):
img_h, img_w = image.shape[0], image.shape[1]
# save dtype
image_dtype = image.dtype
label_dtype = label.dtype
# padding
pad_h = max(self.crop_h - img_h, 0)
pad_w = max(self.crop_w - img_w, 0)
if pad_h > 0 or pad_w > 0:
if self.random_pad:
pad_top = random.randint(0, pad_h)
pad_bottom = pad_h - pad_top
pad_left = random.randint(0, pad_w)
pad_right = pad_w - pad_left
else:
pad_top, pad_bottom, pad_left, pad_right = 0, pad_h, 0, pad_w
img_pad = cv2.copyMakeBorder(image, pad_top, pad_bottom, pad_left, pad_right, cv2.BORDER_CONSTANT,
value=self.pad_value)
label_pad = cv2.copyMakeBorder(label, pad_top, pad_bottom, pad_left, pad_right, cv2.BORDER_CONSTANT,
value=self.ignore_label)
else:
img_pad, label_pad = image, label
img_h, img_w = img_pad.shape[0], img_pad.shape[1]
h_off = random.randint(0, img_h - self.crop_h)
w_off = random.randint(0, img_w - self.crop_w)
image = np.asarray(img_pad[h_off:h_off + self.crop_h, w_off:w_off + self.crop_w], np.float32)
label = np.asarray(label_pad[h_off:h_off + self.crop_h, w_off:w_off + self.crop_w], np.float32)
return image.astype(image_dtype), label.astype(label_dtype)
class RandomHorizontalFlip(object):
"""
Applies random flip augmentation.
Arguments:
prob: Probability of flip.
"""
def __init__(self, prob=0.5):
self.prob = prob
def __call__(self, image, label):
if random.random() < self.prob:
# https://discuss.pytorch.org/t/torch-from-numpy-not-support-negative-strides/3663
image = image[:, ::-1].copy()
label = label[:, ::-1].copy()
return image, label
|
Cream/CDARTS/CDARTS_segmentation/dataloaders/transforms/transforms.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_segmentation/dataloaders/transforms/transforms.py",
"repo_id": "Cream",
"token_count": 2884
}
| 289 |
# ------------------------------------------------------------------------------
# Reference: https://github.com/LikeLy-Journey/SegmenTron/blob/master/segmentron/models/backbones/xception.py
# Modified by Bowen Cheng ([email protected])
# ------------------------------------------------------------------------------
from collections import OrderedDict
import torch.nn as nn
from torchvision.models.utils import load_state_dict_from_url
__all__ = ['Xception65', 'xception65']
model_urls = {
'xception65': 'https://github.com/LikeLy-Journey/SegmenTron/releases/download/v0.1.0/tf-xception65-270e81cf.pth',
}
class SeparableConv2d(nn.Module):
def __init__(self, inplanes, planes, kernel_size=3, stride=1, dilation=1, relu_first=True,
bias=False, norm_layer=nn.BatchNorm2d):
super(SeparableConv2d, self).__init__()
depthwise = nn.Conv2d(inplanes, inplanes, kernel_size,
stride=stride, padding=dilation,
dilation=dilation, groups=inplanes, bias=bias)
bn_depth = norm_layer(inplanes)
pointwise = nn.Conv2d(inplanes, planes, 1, bias=bias)
bn_point = norm_layer(planes)
if relu_first:
self.block = nn.Sequential(OrderedDict([('relu', nn.ReLU()),
('depthwise', depthwise),
('bn_depth', bn_depth),
('pointwise', pointwise),
('bn_point', bn_point)
]))
else:
self.block = nn.Sequential(OrderedDict([('depthwise', depthwise),
('bn_depth', bn_depth),
('relu1', nn.ReLU(inplace=True)),
('pointwise', pointwise),
('bn_point', bn_point),
('relu2', nn.ReLU(inplace=True))
]))
def forward(self, x):
return self.block(x)
class XceptionBlock(nn.Module):
def __init__(self, channel_list, stride=1, dilation=1, skip_connection_type='conv', relu_first=True,
low_feat=False, norm_layer=nn.BatchNorm2d):
super(XceptionBlock, self).__init__()
assert len(channel_list) == 4
self.skip_connection_type = skip_connection_type
self.relu_first = relu_first
self.low_feat = low_feat
if self.skip_connection_type == 'conv':
self.conv = nn.Conv2d(channel_list[0], channel_list[-1], 1, stride=stride, bias=False)
self.bn = norm_layer(channel_list[-1])
self.sep_conv1 = SeparableConv2d(channel_list[0], channel_list[1], dilation=dilation,
relu_first=relu_first, norm_layer=norm_layer)
self.sep_conv2 = SeparableConv2d(channel_list[1], channel_list[2], dilation=dilation,
relu_first=relu_first, norm_layer=norm_layer)
self.sep_conv3 = SeparableConv2d(channel_list[2], channel_list[3], dilation=dilation,
relu_first=relu_first, stride=stride, norm_layer=norm_layer)
self.last_inp_channels = channel_list[3]
def forward(self, inputs):
sc1 = self.sep_conv1(inputs)
sc2 = self.sep_conv2(sc1)
residual = self.sep_conv3(sc2)
if self.skip_connection_type == 'conv':
shortcut = self.conv(inputs)
shortcut = self.bn(shortcut)
outputs = residual + shortcut
elif self.skip_connection_type == 'sum':
outputs = residual + inputs
elif self.skip_connection_type == 'none':
outputs = residual
else:
raise ValueError('Unsupported skip connection type.')
if self.low_feat:
return outputs, sc2
else:
return outputs
class Xception65(nn.Module):
def __init__(self, replace_stride_with_dilation=None,
norm_layer=None):
super(Xception65, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
if replace_stride_with_dilation is None:
# each element in the tuple indicates if we should replace
# the 2x2 stride with a dilated convolution instead
replace_stride_with_dilation = [False, False, False]
if len(replace_stride_with_dilation) != 3:
raise ValueError("replace_stride_with_dilation should be None "
"or a 3-element tuple, got {}".format(replace_stride_with_dilation))
if replace_stride_with_dilation[1]:
assert replace_stride_with_dilation[2]
output_stride = 8
elif replace_stride_with_dilation[2]:
output_stride = 16
else:
output_stride = 32
if output_stride == 32:
entry_block3_stride = 2
middle_block_dilation = 1
exit_block_dilations = (1, 1)
exit_block_stride = 2
elif output_stride == 16:
entry_block3_stride = 2
middle_block_dilation = 1
exit_block_dilations = (1, 2)
exit_block_stride = 1
elif output_stride == 8:
entry_block3_stride = 1
middle_block_dilation = 2
exit_block_dilations = (2, 4)
exit_block_stride = 1
else:
raise NotImplementedError
# Entry flow
self.conv1 = nn.Conv2d(3, 32, 3, stride=2, padding=1, bias=False)
self.bn1 = norm_layer(32)
self.relu = nn.ReLU()
self.conv2 = nn.Conv2d(32, 64, 3, stride=1, padding=1, bias=False)
self.bn2 = norm_layer(64)
self.block1 = XceptionBlock([64, 128, 128, 128], stride=2, norm_layer=norm_layer)
self.block2 = XceptionBlock([128, 256, 256, 256], stride=2, low_feat=True, norm_layer=norm_layer)
self.block3 = XceptionBlock([256, 728, 728, 728], stride=entry_block3_stride, low_feat=True,
norm_layer=norm_layer)
# Middle flow (16 units)
self.block4 = XceptionBlock([728, 728, 728, 728], dilation=middle_block_dilation,
skip_connection_type='sum', norm_layer=norm_layer)
self.block5 = XceptionBlock([728, 728, 728, 728], dilation=middle_block_dilation,
skip_connection_type='sum', norm_layer=norm_layer)
self.block6 = XceptionBlock([728, 728, 728, 728], dilation=middle_block_dilation,
skip_connection_type='sum', norm_layer=norm_layer)
self.block7 = XceptionBlock([728, 728, 728, 728], dilation=middle_block_dilation,
skip_connection_type='sum', norm_layer=norm_layer)
self.block8 = XceptionBlock([728, 728, 728, 728], dilation=middle_block_dilation,
skip_connection_type='sum', norm_layer=norm_layer)
self.block9 = XceptionBlock([728, 728, 728, 728], dilation=middle_block_dilation,
skip_connection_type='sum', norm_layer=norm_layer)
self.block10 = XceptionBlock([728, 728, 728, 728], dilation=middle_block_dilation,
skip_connection_type='sum', norm_layer=norm_layer)
self.block11 = XceptionBlock([728, 728, 728, 728], dilation=middle_block_dilation,
skip_connection_type='sum', norm_layer=norm_layer)
self.block12 = XceptionBlock([728, 728, 728, 728], dilation=middle_block_dilation,
skip_connection_type='sum', norm_layer=norm_layer)
self.block13 = XceptionBlock([728, 728, 728, 728], dilation=middle_block_dilation,
skip_connection_type='sum', norm_layer=norm_layer)
self.block14 = XceptionBlock([728, 728, 728, 728], dilation=middle_block_dilation,
skip_connection_type='sum', norm_layer=norm_layer)
self.block15 = XceptionBlock([728, 728, 728, 728], dilation=middle_block_dilation,
skip_connection_type='sum', norm_layer=norm_layer)
self.block16 = XceptionBlock([728, 728, 728, 728], dilation=middle_block_dilation,
skip_connection_type='sum', norm_layer=norm_layer)
self.block17 = XceptionBlock([728, 728, 728, 728], dilation=middle_block_dilation,
skip_connection_type='sum', norm_layer=norm_layer)
self.block18 = XceptionBlock([728, 728, 728, 728], dilation=middle_block_dilation,
skip_connection_type='sum', norm_layer=norm_layer)
self.block19 = XceptionBlock([728, 728, 728, 728], dilation=middle_block_dilation,
skip_connection_type='sum', norm_layer=norm_layer)
# Exit flow
self.block20 = XceptionBlock([728, 728, 1024, 1024], stride=exit_block_stride,
dilation=exit_block_dilations[0], norm_layer=norm_layer)
self.block21 = XceptionBlock([1024, 1536, 1536, 2048], dilation=exit_block_dilations[1],
skip_connection_type='none', relu_first=False, norm_layer=norm_layer)
def forward(self, x):
outputs = {}
# Entry flow
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.conv2(x)
x = self.bn2(x)
x = self.relu(x)
outputs['stem'] = x
x = self.block1(x)
x, c1 = self.block2(x) # b, h//4, w//4, 256
outputs['res2'] = c1
x, c2 = self.block3(x) # b, h//8, w//8, 728
outputs['res3'] = c2
# Middle flow
x = self.block4(x)
x = self.block5(x)
x = self.block6(x)
x = self.block7(x)
x = self.block8(x)
x = self.block9(x)
x = self.block10(x)
x = self.block11(x)
x = self.block12(x)
x = self.block13(x)
x = self.block14(x)
x = self.block15(x)
x = self.block16(x)
x = self.block17(x)
x = self.block18(x)
c3 = self.block19(x)
outputs['res4'] = c3
# Exit flow
x = self.block20(c3)
c4 = self.block21(x)
outputs['res5'] = c4
return outputs
def xception65(pretrained=False, progress=True, **kwargs):
model = Xception65(**kwargs)
if pretrained:
state_dict = load_state_dict_from_url(model_urls['xception65'],
progress=progress)
model.load_state_dict(state_dict, strict=False)
return model
|
Cream/CDARTS/CDARTS_segmentation/segmentation/model/backbone/xception.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_segmentation/segmentation/model/backbone/xception.py",
"repo_id": "Cream",
"token_count": 5751
}
| 290 |
# ------------------------------------------------------------------------------
# Generates the correct format for official evaluation code.
# Written by Bowen Cheng ([email protected])
# ------------------------------------------------------------------------------
from collections import OrderedDict
import numpy as np
def get_cityscapes_instance_format(panoptic, sem, ctr_hmp, label_divisor, score_type="semantic"):
"""
Get Cityscapes instance segmentation format.
Arguments:
panoptic: A Numpy Ndarray of shape [H, W].
sem: A Numpy Ndarray of shape [C, H, W] of raw semantic output.
ctr_hmp: A Numpy Ndarray of shape [H, W] of raw center heatmap output.
label_divisor: An Integer, used to convert panoptic id = semantic id * label_divisor + instance_id.
score_type: A string, how to calculates confidence scores for instance segmentation.
- "semantic": average of semantic segmentation confidence within the instance mask.
- "instance": confidence of heatmap at center point of the instance mask.
- "both": multiply "semantic" and "instance".
Returns:
A List contains instance segmentation in Cityscapes format.
"""
instances = []
pan_labels = np.unique(panoptic)
for pan_lab in pan_labels:
if pan_lab % label_divisor == 0:
# This is either stuff or ignored region.
continue
ins = OrderedDict()
train_class_id = pan_lab // label_divisor
ins['pred_class'] = train_class_id
mask = panoptic == pan_lab
ins['pred_mask'] = np.array(mask, dtype='uint8')
sem_scores = sem[train_class_id, ...]
ins_score = np.mean(sem_scores[mask])
# mask center point
mask_index = np.where(panoptic == pan_lab)
center_y, center_x = np.mean(mask_index[0]), np.mean(mask_index[1])
ctr_score = ctr_hmp[int(center_y), int(center_x)]
if score_type == "semantic":
ins['score'] = ins_score
elif score_type == "instance":
ins['score'] = ctr_score
elif score_type == "both":
ins['score'] = ins_score * ctr_score
else:
raise ValueError("Unknown confidence score type: {}".format(score_type))
instances.append(ins)
return instances
|
Cream/CDARTS/CDARTS_segmentation/segmentation/model/post_processing/evaluation_format.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_segmentation/segmentation/model/post_processing/evaluation_format.py",
"repo_id": "Cream",
"token_count": 901
}
| 291 |
import os
import sys
import logging
_default_level_name = os.getenv('ENGINE_LOGGING_LEVEL', 'INFO')
_default_level = logging.getLevelName(_default_level_name.upper())
class LogFormatter(logging.Formatter):
log_fout = None
date_full = '[%(asctime)s %(lineno)d@%(filename)s:%(name)s] '
date = '%(asctime)s '
msg = '%(message)s'
def format(self, record):
if record.levelno == logging.DEBUG:
mcl, mtxt = self._color_dbg, 'DBG'
elif record.levelno == logging.WARNING:
mcl, mtxt = self._color_warn, 'WRN'
elif record.levelno == logging.ERROR:
mcl, mtxt = self._color_err, 'ERR'
else:
mcl, mtxt = self._color_normal, ''
if mtxt:
mtxt += ' '
if self.log_fout:
self.__set_fmt(self.date_full + mtxt + self.msg)
formatted = super(LogFormatter, self).format(record)
# self.log_fout.write(formatted)
# self.log_fout.write('\n')
# self.log_fout.flush()
return formatted
self.__set_fmt(self._color_date(self.date) + mcl(mtxt + self.msg))
formatted = super(LogFormatter, self).format(record)
return formatted
if sys.version_info.major < 3:
def __set_fmt(self, fmt):
self._fmt = fmt
else:
def __set_fmt(self, fmt):
self._style._fmt = fmt
@staticmethod
def _color_dbg(msg):
return '\x1b[36m{}\x1b[0m'.format(msg)
@staticmethod
def _color_warn(msg):
return '\x1b[1;31m{}\x1b[0m'.format(msg)
@staticmethod
def _color_err(msg):
return '\x1b[1;4;31m{}\x1b[0m'.format(msg)
@staticmethod
def _color_omitted(msg):
return '\x1b[35m{}\x1b[0m'.format(msg)
@staticmethod
def _color_normal(msg):
return msg
@staticmethod
def _color_date(msg):
return '\x1b[32m{}\x1b[0m'.format(msg)
def get_logger(log_dir=None, log_file=None, formatter=LogFormatter):
logger = logging.getLogger()
logger.setLevel(_default_level)
del logger.handlers[:]
if log_dir and log_file:
if not os.path.isdir(log_dir): os.makedirs(log_dir)
LogFormatter.log_fout = True
file_handler = logging.FileHandler(log_file, mode='a')
file_handler.setLevel(logging.INFO)
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
stream_handler = logging.StreamHandler()
stream_handler.setFormatter(formatter(datefmt='%d %H:%M:%S'))
stream_handler.setLevel(0)
logger.addHandler(stream_handler)
return logger
|
Cream/CDARTS/CDARTS_segmentation/tools/engine/logger.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_segmentation/tools/engine/logger.py",
"repo_id": "Cream",
"token_count": 1250
}
| 292 |
import torch
import cv2
cv2.setNumThreads(0)
from torch.utils import data
from utils.img_utils import random_scale, random_mirror, normalize, generate_random_crop_pos, random_crop_pad_to_shape
class TrainPre(object):
def __init__(self, config, img_mean, img_std):
self.img_mean = img_mean
self.img_std = img_std
self.config = config
def __call__(self, img, gt):
img, gt = random_mirror(img, gt)
if self.config.train_scale_array is not None:
img, gt, scale = random_scale(img, gt, self.config.train_scale_array)
img = normalize(img, self.img_mean, self.img_std)
crop_size = (self.config.image_height, self.config.image_width)
crop_pos = generate_random_crop_pos(img.shape[:2], crop_size)
p_img, _ = random_crop_pad_to_shape(img, crop_pos, crop_size, 0)
p_gt, _ = random_crop_pad_to_shape(gt, crop_pos, crop_size, 255)
p_gt = cv2.resize(p_gt, (self.config.image_width // self.config.gt_down_sampling, self.config.image_height // self.config.gt_down_sampling), interpolation=cv2.INTER_NEAREST)
p_img = p_img.transpose(2, 0, 1)
extra_dict = None
return p_img, p_gt, extra_dict
class CyclicIterator:
def __init__(self, loader, sampler, distributed):
self.loader = loader
self.sampler = sampler
self.epoch = 0
self.distributed = distributed
self._next_epoch()
def _next_epoch(self):
if self.distributed:
self.sampler.set_epoch(self.epoch)
self.iterator = iter(self.loader)
self.epoch += 1
def __len__(self):
return len(self.loader)
def __iter__(self):
return self
def __next__(self):
try:
return next(self.iterator)
except StopIteration:
self._next_epoch()
return next(self.iterator)
def get_train_loader(config, dataset, portion=None, worker=None, test=False):
data_setting = {'img_root': config.img_root_folder,
'gt_root': config.gt_root_folder,
'train_source': config.train_source,
'eval_source': config.eval_source,
'down_sampling': config.down_sampling,
'portion': portion}
if test:
data_setting = {'img_root': config.img_root_folder,
'gt_root': config.gt_root_folder,
'train_source': config.train_eval_source,
'eval_source': config.eval_source,
'down_sampling': config.down_sampling,
'portion': portion}
train_preprocess = TrainPre(config, config.image_mean, config.image_std)
train_dataset = dataset(data_setting, "train", train_preprocess, config.batch_size * config.niters_per_epoch)
is_shuffle = True
batch_size = config.batch_size
train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset)
train_loader = data.DataLoader(train_dataset,
batch_size=batch_size,
sampler = train_sampler,
num_workers=config.num_workers if worker is None else worker,
# drop_last=True,
# shuffle=is_shuffle,
pin_memory=True)
return train_loader, train_sampler
|
Cream/CDARTS/CDARTS_segmentation/train/dataloader.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_segmentation/train/dataloader.py",
"repo_id": "Cream",
"token_count": 1672
}
| 293 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# Written by Hao Du and Houwen Peng
# email: [email protected] and [email protected]
# This file is to define the architecture of the residual block.
import torch
import torch.nn as nn
import torch.nn.functional as F
def conv3x3(in_planes, out_planes, stride=1):
"3x3 convolution with padding"
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=True)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
def __init__(self, inplanes, planes, stride=1, expansion=4):
super(Bottleneck, self).__init__()
planes = int(planes / expansion)
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=True)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=True)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(
planes,
planes * expansion,
kernel_size=1,
bias=True)
self.bn3 = nn.BatchNorm2d(planes * expansion)
self.relu = nn.ReLU(inplace=True)
self.stride = stride
self.expansion = expansion
if inplanes != planes * self.expansion:
self.downsample = nn.Sequential(
nn.Conv2d(inplanes, planes * self.expansion,
kernel_size=1, stride=stride, bias=True),
nn.BatchNorm2d(planes * self.expansion),
)
else:
self.downsample = None
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
def get_Bottleneck(in_c, out_c, stride):
return Bottleneck(in_c, out_c, stride=stride)
def get_BasicBlock(in_c, out_c, stride):
return BasicBlock(in_c, out_c, stride=stride)
|
Cream/Cream/lib/models/blocks/residual_block.py/0
|
{
"file_path": "Cream/Cream/lib/models/blocks/residual_block.py",
"repo_id": "Cream",
"token_count": 1474
}
| 294 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# Written by Hao Du and Houwen Peng
# email: [email protected] and [email protected]
import os
import sys
import datetime
import torch
import numpy as np
import torch.nn as nn
import _init_paths
# import timm packages
from timm.utils import CheckpointSaver, update_summary
from timm.loss import LabelSmoothingCrossEntropy
from timm.data import Dataset, create_loader
from timm.models import resume_checkpoint
# import apex as distributed package otherwise we use torch.nn.parallel.distributed as distributed package
try:
from apex.parallel import DistributedDataParallel as DDP
from apex.parallel import convert_syncbn_model
USE_APEX = True
except ImportError:
from torch.nn.parallel import DistributedDataParallel as DDP
USE_APEX = False
# import models and training functions
from lib.utils.flops_table import FlopsEst
from lib.core.train import train_epoch, validate
from lib.models.structures.supernet import gen_supernet
from lib.models.PrioritizedBoard import PrioritizedBoard
from lib.models.MetaMatchingNetwork import MetaMatchingNetwork
from lib.config import DEFAULT_CROP_PCT, IMAGENET_DEFAULT_STD, IMAGENET_DEFAULT_MEAN
from lib.utils.util import parse_config_args, get_logger, \
create_optimizer_supernet, create_supernet_scheduler
def main():
args, cfg = parse_config_args('super net training')
# resolve logging
output_dir = os.path.join(cfg.SAVE_PATH,
"{}-{}".format(datetime.date.today().strftime('%m%d'),
cfg.MODEL))
if args.local_rank == 0:
logger = get_logger(os.path.join(output_dir, "train.log"))
else:
logger = None
# initialize distributed parameters
torch.cuda.set_device(args.local_rank)
torch.distributed.init_process_group(backend='nccl', init_method='env://')
if args.local_rank == 0:
logger.info(
'Training on Process %d with %d GPUs.',
args.local_rank, cfg.NUM_GPU)
# fix random seeds
torch.manual_seed(cfg.SEED)
torch.cuda.manual_seed_all(cfg.SEED)
np.random.seed(cfg.SEED)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
# generate supernet
model, sta_num, resolution = gen_supernet(
flops_minimum=cfg.SUPERNET.FLOPS_MINIMUM,
flops_maximum=cfg.SUPERNET.FLOPS_MAXIMUM,
num_classes=cfg.DATASET.NUM_CLASSES,
drop_rate=cfg.NET.DROPOUT_RATE,
global_pool=cfg.NET.GP,
resunit=cfg.SUPERNET.RESUNIT,
dil_conv=cfg.SUPERNET.DIL_CONV,
slice=cfg.SUPERNET.SLICE,
verbose=cfg.VERBOSE,
logger=logger)
# initialize meta matching networks
MetaMN = MetaMatchingNetwork(cfg)
# number of choice blocks in supernet
choice_num = len(model.blocks[1][0])
if args.local_rank == 0:
logger.info('Supernet created, param count: %d', (
sum([m.numel() for m in model.parameters()])))
logger.info('resolution: %d', (resolution))
logger.info('choice number: %d', (choice_num))
#initialize prioritized board
prioritized_board = PrioritizedBoard(cfg, CHOICE_NUM=choice_num, sta_num=sta_num)
# initialize flops look-up table
model_est = FlopsEst(model)
# optionally resume from a checkpoint
optimizer_state = None
resume_epoch = None
if cfg.AUTO_RESUME:
optimizer_state, resume_epoch = resume_checkpoint(
model, cfg.RESUME_PATH)
# create optimizer and resume from checkpoint
optimizer = create_optimizer_supernet(cfg, model, USE_APEX)
if optimizer_state is not None:
optimizer.load_state_dict(optimizer_state['optimizer'])
model = model.cuda()
# convert model to distributed mode
if cfg.BATCHNORM.SYNC_BN:
try:
if USE_APEX:
model = convert_syncbn_model(model)
else:
model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)
if args.local_rank == 0:
logger.info('Converted model to use Synchronized BatchNorm.')
except Exception as exception:
logger.info(
'Failed to enable Synchronized BatchNorm. '
'Install Apex or Torch >= 1.1 with Exception %s', exception)
if USE_APEX:
model = DDP(model, delay_allreduce=True)
else:
if args.local_rank == 0:
logger.info(
"Using torch DistributedDataParallel. Install NVIDIA Apex for Apex DDP.")
# can use device str in Torch >= 1.1
model = DDP(model, device_ids=[args.local_rank])
# create learning rate scheduler
lr_scheduler, num_epochs = create_supernet_scheduler(cfg, optimizer)
start_epoch = resume_epoch if resume_epoch is not None else 0
if start_epoch > 0:
lr_scheduler.step(start_epoch)
if args.local_rank == 0:
logger.info('Scheduled epochs: %d', num_epochs)
# imagenet train dataset
train_dir = os.path.join(cfg.DATA_DIR, 'train')
if not os.path.exists(train_dir):
logger.info('Training folder does not exist at: %s', train_dir)
sys.exit()
dataset_train = Dataset(train_dir)
loader_train = create_loader(
dataset_train,
input_size=(3, cfg.DATASET.IMAGE_SIZE, cfg.DATASET.IMAGE_SIZE),
batch_size=cfg.DATASET.BATCH_SIZE,
is_training=True,
use_prefetcher=True,
re_prob=cfg.AUGMENTATION.RE_PROB,
re_mode=cfg.AUGMENTATION.RE_MODE,
color_jitter=cfg.AUGMENTATION.COLOR_JITTER,
interpolation='random',
num_workers=cfg.WORKERS,
distributed=True,
collate_fn=None,
crop_pct=DEFAULT_CROP_PCT,
mean=IMAGENET_DEFAULT_MEAN,
std=IMAGENET_DEFAULT_STD
)
# imagenet validation dataset
eval_dir = os.path.join(cfg.DATA_DIR, 'val')
if not os.path.isdir(eval_dir):
logger.info('Validation folder does not exist at: %s', eval_dir)
sys.exit()
dataset_eval = Dataset(eval_dir)
loader_eval = create_loader(
dataset_eval,
input_size=(3, cfg.DATASET.IMAGE_SIZE, cfg.DATASET.IMAGE_SIZE),
batch_size=4 * cfg.DATASET.BATCH_SIZE,
is_training=False,
use_prefetcher=True,
num_workers=cfg.WORKERS,
distributed=True,
crop_pct=DEFAULT_CROP_PCT,
mean=IMAGENET_DEFAULT_MEAN,
std=IMAGENET_DEFAULT_STD,
interpolation=cfg.DATASET.INTERPOLATION
)
# whether to use label smoothing
if cfg.AUGMENTATION.SMOOTHING > 0.:
train_loss_fn = LabelSmoothingCrossEntropy(
smoothing=cfg.AUGMENTATION.SMOOTHING).cuda()
validate_loss_fn = nn.CrossEntropyLoss().cuda()
else:
train_loss_fn = nn.CrossEntropyLoss().cuda()
validate_loss_fn = train_loss_fn
# initialize training parameters
eval_metric = cfg.EVAL_METRICS
best_metric, best_epoch, saver, best_children_pool = None, None, None, []
if args.local_rank == 0:
decreasing = True if eval_metric == 'loss' else False
saver = CheckpointSaver(
checkpoint_dir=output_dir,
decreasing=decreasing)
# training scheme
try:
for epoch in range(start_epoch, num_epochs):
loader_train.sampler.set_epoch(epoch)
# train one epoch
train_metrics = train_epoch(epoch, model, loader_train, optimizer,
train_loss_fn, prioritized_board, MetaMN, cfg,
lr_scheduler=lr_scheduler, saver=saver,
output_dir=output_dir, logger=logger,
est=model_est, local_rank=args.local_rank)
# evaluate one epoch
eval_metrics = validate(model, loader_eval, validate_loss_fn,
prioritized_board, cfg,
local_rank=args.local_rank, logger=logger)
update_summary(epoch, train_metrics, eval_metrics, os.path.join(
output_dir, 'summary.csv'), write_header=best_metric is None)
if saver is not None:
# save proper checkpoint with eval metric
save_metric = eval_metrics[eval_metric]
best_metric, best_epoch = saver.save_checkpoint(
model, optimizer, cfg,
epoch=epoch, metric=save_metric)
except KeyboardInterrupt:
pass
if __name__ == '__main__':
main()
|
Cream/Cream/tools/train.py/0
|
{
"file_path": "Cream/Cream/tools/train.py",
"repo_id": "Cream",
"token_count": 3892
}
| 295 |
'''
Build the EfficientViT model family
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
from .efficientvit import EfficientViT
from timm.models.registry import register_model
EfficientViT_m0 = {
'img_size': 224,
'patch_size': 16,
'embed_dim': [64, 128, 192],
'depth': [1, 2, 3],
'num_heads': [4, 4, 4],
'window_size': [7, 7, 7],
'kernels': [5, 5, 5, 5],
}
EfficientViT_m1 = {
'img_size': 224,
'patch_size': 16,
'embed_dim': [128, 144, 192],
'depth': [1, 2, 3],
'num_heads': [2, 3, 3],
'window_size': [7, 7, 7],
'kernels': [7, 5, 3, 3],
}
EfficientViT_m2 = {
'img_size': 224,
'patch_size': 16,
'embed_dim': [128, 192, 224],
'depth': [1, 2, 3],
'num_heads': [4, 3, 2],
'window_size': [7, 7, 7],
'kernels': [7, 5, 3, 3],
}
EfficientViT_m3 = {
'img_size': 224,
'patch_size': 16,
'embed_dim': [128, 240, 320],
'depth': [1, 2, 3],
'num_heads': [4, 3, 4],
'window_size': [7, 7, 7],
'kernels': [5, 5, 5, 5],
}
EfficientViT_m4 = {
'img_size': 224,
'patch_size': 16,
'embed_dim': [128, 256, 384],
'depth': [1, 2, 3],
'num_heads': [4, 4, 4],
'window_size': [7, 7, 7],
'kernels': [7, 5, 3, 3],
}
EfficientViT_m5 = {
'img_size': 224,
'patch_size': 16,
'embed_dim': [192, 288, 384],
'depth': [1, 3, 4],
'num_heads': [3, 3, 4],
'window_size': [7, 7, 7],
'kernels': [7, 5, 3, 3],
}
@register_model
def EfficientViT_M0(num_classes=1000, pretrained=False, distillation=False, fuse=False, pretrained_cfg=None, model_cfg=EfficientViT_m0):
model = EfficientViT(num_classes=num_classes, distillation=distillation, **model_cfg)
if pretrained:
pretrained = _checkpoint_url_format.format(pretrained)
checkpoint = torch.hub.load_state_dict_from_url(
pretrained, map_location='cpu')
d = checkpoint['model']
D = model.state_dict()
for k in d.keys():
if D[k].shape != d[k].shape:
d[k] = d[k][:, :, None, None]
model.load_state_dict(d)
if fuse:
replace_batchnorm(model)
return model
@register_model
def EfficientViT_M1(num_classes=1000, pretrained=False, distillation=False, fuse=False, pretrained_cfg=None, model_cfg=EfficientViT_m1):
model = EfficientViT(num_classes=num_classes, distillation=distillation, **model_cfg)
if pretrained:
pretrained = _checkpoint_url_format.format(pretrained)
checkpoint = torch.hub.load_state_dict_from_url(
pretrained, map_location='cpu')
d = checkpoint['model']
D = model.state_dict()
for k in d.keys():
if D[k].shape != d[k].shape:
d[k] = d[k][:, :, None, None]
model.load_state_dict(d)
if fuse:
replace_batchnorm(model)
return model
@register_model
def EfficientViT_M2(num_classes=1000, pretrained=False, distillation=False, fuse=False, pretrained_cfg=None, model_cfg=EfficientViT_m2):
model = EfficientViT(num_classes=num_classes, distillation=distillation, **model_cfg)
if pretrained:
pretrained = _checkpoint_url_format.format(pretrained)
checkpoint = torch.hub.load_state_dict_from_url(
pretrained, map_location='cpu')
d = checkpoint['model']
D = model.state_dict()
for k in d.keys():
if D[k].shape != d[k].shape:
d[k] = d[k][:, :, None, None]
model.load_state_dict(d)
if fuse:
replace_batchnorm(model)
return model
@register_model
def EfficientViT_M3(num_classes=1000, pretrained=False, distillation=False, fuse=False, pretrained_cfg=None, model_cfg=EfficientViT_m3):
model = EfficientViT(num_classes=num_classes, distillation=distillation, **model_cfg)
if pretrained:
pretrained = _checkpoint_url_format.format(pretrained)
checkpoint = torch.hub.load_state_dict_from_url(
pretrained, map_location='cpu')
d = checkpoint['model']
D = model.state_dict()
for k in d.keys():
if D[k].shape != d[k].shape:
d[k] = d[k][:, :, None, None]
model.load_state_dict(d)
if fuse:
replace_batchnorm(model)
return model
@register_model
def EfficientViT_M4(num_classes=1000, pretrained=False, distillation=False, fuse=False, pretrained_cfg=None, model_cfg=EfficientViT_m4):
model = EfficientViT(num_classes=num_classes, distillation=distillation, **model_cfg)
if pretrained:
pretrained = _checkpoint_url_format.format(pretrained)
checkpoint = torch.hub.load_state_dict_from_url(
pretrained, map_location='cpu')
d = checkpoint['model']
D = model.state_dict()
for k in d.keys():
if D[k].shape != d[k].shape:
d[k] = d[k][:, :, None, None]
model.load_state_dict(d)
if fuse:
replace_batchnorm(model)
return model
@register_model
def EfficientViT_M5(num_classes=1000, pretrained=False, distillation=False, fuse=False, pretrained_cfg=None, model_cfg=EfficientViT_m5):
model = EfficientViT(num_classes=num_classes, distillation=distillation, **model_cfg)
if pretrained:
pretrained = _checkpoint_url_format.format(pretrained)
checkpoint = torch.hub.load_state_dict_from_url(
pretrained, map_location='cpu')
d = checkpoint['model']
D = model.state_dict()
for k in d.keys():
if D[k].shape != d[k].shape:
d[k] = d[k][:, :, None, None]
model.load_state_dict(d)
if fuse:
replace_batchnorm(model)
return model
def replace_batchnorm(net):
for child_name, child in net.named_children():
if hasattr(child, 'fuse'):
setattr(net, child_name, child.fuse())
elif isinstance(child, torch.nn.BatchNorm2d):
setattr(net, child_name, torch.nn.Identity())
else:
replace_batchnorm(child)
_checkpoint_url_format = \
'https://github.com/xinyuliu-jeffrey/EfficientViT_Model_Zoo/releases/download/v1.0/{}.pth'
|
Cream/EfficientViT/classification/model/build.py/0
|
{
"file_path": "Cream/EfficientViT/classification/model/build.py",
"repo_id": "Cream",
"token_count": 2999
}
| 296 |
checkpoint_config = dict(interval=1)
# yapf:disable
log_config = dict(
interval=50,
hooks=[
dict(type='TextLoggerHook'),
# dict(type='TensorboardLoggerHook')
])
# yapf:enable
custom_hooks = [dict(type='NumClassCheckHook')]
dist_params = dict(backend='nccl')
log_level = 'INFO'
load_from = None
resume_from = None
workflow = [('train', 1)]
|
Cream/EfficientViT/downstream/configs/_base_/default_runtime.py/0
|
{
"file_path": "Cream/EfficientViT/downstream/configs/_base_/default_runtime.py",
"repo_id": "Cream",
"token_count": 156
}
| 297 |
# model settings
model = dict(
type='RPN',
pretrained='open-mmlab://detectron2/resnet50_caffe',
backbone=dict(
type='ResNet',
depth=50,
num_stages=3,
strides=(1, 2, 2),
dilations=(1, 1, 1),
out_indices=(2, ),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=False),
norm_eval=True,
style='caffe'),
neck=None,
rpn_head=dict(
type='RPNHead',
in_channels=1024,
feat_channels=1024,
anchor_generator=dict(
type='AnchorGenerator',
scales=[2, 4, 8, 16, 32],
ratios=[0.5, 1.0, 2.0],
strides=[16]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0]),
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type='L1Loss', loss_weight=1.0)),
# model training and testing settings
train_cfg=dict(
rpn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
allowed_border=0,
pos_weight=-1,
debug=False)),
test_cfg=dict(
rpn=dict(
nms_pre=12000,
max_per_img=2000,
nms=dict(type='nms', iou_threshold=0.7),
min_bbox_size=0)))
|
Cream/EfficientViT/downstream/configs/_base_/models/rpn_r50_caffe_c4.py/0
|
{
"file_path": "Cream/EfficientViT/downstream/configs/_base_/models/rpn_r50_caffe_c4.py",
"repo_id": "Cream",
"token_count": 1039
}
| 298 |
# Copyright (c) Open-MMLab. All rights reserved.
import os.path as osp
import platform
import shutil
import torch
from torch.optim import Optimizer
import mmcv
from mmcv.runner import RUNNERS, EpochBasedRunner
from .checkpoint import save_checkpoint
try:
import apex
except:
print('apex is not installed')
@RUNNERS.register_module()
class EpochBasedRunnerAmp(EpochBasedRunner):
"""Epoch-based Runner with AMP support.
This runner train models epoch by epoch.
"""
def save_checkpoint(self,
out_dir,
filename_tmpl='epoch_{}.pth',
save_optimizer=True,
meta=None,
create_symlink=True):
"""Save the checkpoint.
Args:
out_dir (str): The directory that checkpoints are saved.
filename_tmpl (str, optional): The checkpoint filename template,
which contains a placeholder for the epoch number.
Defaults to 'epoch_{}.pth'.
save_optimizer (bool, optional): Whether to save the optimizer to
the checkpoint. Defaults to True.
meta (dict, optional): The meta information to be saved in the
checkpoint. Defaults to None.
create_symlink (bool, optional): Whether to create a symlink
"latest.pth" to point to the latest checkpoint.
Defaults to True.
"""
if meta is None:
meta = dict(epoch=self.epoch + 1, iter=self.iter)
elif isinstance(meta, dict):
meta.update(epoch=self.epoch + 1, iter=self.iter)
else:
raise TypeError(
f'meta should be a dict or None, but got {type(meta)}')
if self.meta is not None:
meta.update(self.meta)
filename = filename_tmpl.format(self.epoch + 1)
filepath = osp.join(out_dir, filename)
optimizer = self.optimizer if save_optimizer else None
save_checkpoint(self.model, filepath, optimizer=optimizer, meta=meta)
# in some environments, `os.symlink` is not supported, you may need to
# set `create_symlink` to False
if create_symlink:
dst_file = osp.join(out_dir, 'latest.pth')
if platform.system() != 'Windows':
mmcv.symlink(filename, dst_file)
else:
shutil.copy(filepath, dst_file)
def resume(self,
checkpoint,
resume_optimizer=True,
map_location='default'):
if map_location == 'default':
if torch.cuda.is_available():
device_id = torch.cuda.current_device()
checkpoint = self.load_checkpoint(
checkpoint,
map_location=lambda storage, loc: storage.cuda(device_id))
else:
checkpoint = self.load_checkpoint(checkpoint)
else:
checkpoint = self.load_checkpoint(
checkpoint, map_location=map_location)
self._epoch = checkpoint['meta']['epoch']
self._iter = checkpoint['meta']['iter']
if 'optimizer' in checkpoint and resume_optimizer:
if isinstance(self.optimizer, Optimizer):
self.optimizer.load_state_dict(checkpoint['optimizer'])
elif isinstance(self.optimizer, dict):
for k in self.optimizer.keys():
self.optimizer[k].load_state_dict(
checkpoint['optimizer'][k])
else:
raise TypeError(
'Optimizer should be dict or torch.optim.Optimizer '
f'but got {type(self.optimizer)}')
if 'amp' in checkpoint:
apex.amp.load_state_dict(checkpoint['amp'])
self.logger.info('load amp state dict')
self.logger.info('resumed epoch %d, iter %d', self.epoch, self.iter)
|
Cream/EfficientViT/downstream/mmcv_custom/runner/epoch_based_runner.py/0
|
{
"file_path": "Cream/EfficientViT/downstream/mmcv_custom/runner/epoch_based_runner.py",
"repo_id": "Cream",
"token_count": 1873
}
| 299 |
import torch
import torch.nn as nn
from functools import partial
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from timm.models.helpers import load_pretrained
from timm.models.layers import DropPath, to_2tuple, trunc_normal_
from timm.models.resnet import resnet26d, resnet50d
from timm.models.registry import register_model
from timm.models.vision_transformer import _cfg, default_cfgs,\
Mlp, PatchEmbed
try:
from timm.models.vision_transformer import HybridEmbed
except ImportError:
# for higher version of timm
from timm.models.vision_transformer_hybrid import HybridEmbed
from irpe import build_rpe
class RepeatedModuleList(nn.Module):
def __init__(self, instances, repeated_times):
super().__init__()
assert len(instances) == repeated_times
self.instances = nn.ModuleList(instances)
self.repeated_times = repeated_times
def forward(self, *args, **kwargs):
r = self._repeated_id
return self.instances[r](*args, **kwargs)
def __repr__(self):
msg = super().__repr__()
msg += f'(repeated_times={self.repeated_times})'
return msg
class MiniAttention(nn.Module):
'''
Attention with image relative position encoding
'''
def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0., rpe_config=None, repeated_times=1, use_transform=False):
super().__init__()
self.num_heads = num_heads
head_dim = dim // num_heads
# NOTE scale factor was wrong in my original version, can set manually to be compat with prev weights
self.scale = qk_scale or head_dim ** -0.5
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
# image relative position encoding
rpe_qkvs = []
for _ in range(repeated_times):
rpe_qkv = build_rpe(rpe_config,
head_dim=head_dim,
num_heads=num_heads)
rpe_qkvs.append(rpe_qkv)
assert len(rpe_qkvs) == repeated_times
assert all(len(r) == 3 for r in rpe_qkvs)
rpe_q, rpe_k, rpe_v = zip(*rpe_qkvs)
if rpe_q[0] is not None:
self.rpe_q = RepeatedModuleList(rpe_q, repeated_times)
else:
self.rpe_q = None
if rpe_k[0] is not None:
self.rpe_k = RepeatedModuleList(rpe_k, repeated_times)
else:
self.rpe_k = None
if rpe_v[0] is not None:
self.rpe_v = RepeatedModuleList(rpe_v, repeated_times)
else:
self.rpe_v = None
if use_transform:
transform_bias = False
self.conv_l = RepeatedModuleList([nn.Conv2d(num_heads, num_heads, kernel_size=1, bias=transform_bias) \
for _ in range(repeated_times)], repeated_times)
self.conv_w = RepeatedModuleList([nn.Conv2d(num_heads, num_heads, kernel_size=1, bias=transform_bias) \
for _ in range(repeated_times)], repeated_times)
else:
self.conv_l = self.conv_w = None
def forward(self, x):
B, N, C = x.shape
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple)
q *= self.scale
attn = (q @ k.transpose(-2, -1))
# image relative position on keys
if self.rpe_k is not None:
attn += self.rpe_k(q)
# image relative position on queries
if self.rpe_q is not None:
attn += self.rpe_q(k * self.scale).transpose(2, 3)
if self.conv_l is not None:
attn = self.conv_l(attn)
attn = attn.softmax(dim=-1)
if self.conv_w is not None:
attn = self.conv_w(attn)
attn = self.attn_drop(attn)
out = attn @ v
# image relative position on values
if self.rpe_v is not None:
out += self.rpe_v(attn)
x = out.transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
def init_weights(self):
def _init_weights(m):
if isinstance(m, nn.Conv2d):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Conv2d) and m.bias is not None:
nn.init.constant_(m.bias, 0)
for m in [self.conv_l, self.conv_w]:
if m is not None:
m.apply(_init_weights)
class MiniBlock(nn.Module):
def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0.,
drop_paths=[0.], act_layer=nn.GELU, norm_layer=nn.LayerNorm, rpe_config=None, repeated_times=1, use_transform=False):
super().__init__()
assert len(drop_paths) == repeated_times
if repeated_times > 1:
self.norm1 = RepeatedModuleList([norm_layer(dim) for _ in range(repeated_times)], repeated_times)
self.norm2 = RepeatedModuleList([norm_layer(dim) for _ in range(repeated_times)], repeated_times)
self.attn = MiniAttention(
dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop, rpe_config=rpe_config,
repeated_times=repeated_times,
use_transform=use_transform)
# NOTE: drop path for stochastic depth, we shall see if this is better than dropout here
self.drop_paths = nn.ModuleList([DropPath(drop_path) if drop_path > 0. else nn.Identity() for drop_path in drop_paths])
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
def forward(self, x):
drop_path = self.drop_paths[self._repeated_id]
x = x + drop_path(self.attn(self.norm1(x)))
x = x + drop_path(self.mlp(self.norm2(x)))
return x
class RepeatedMiniBlock(nn.Module):
def __init__(self, repeated_times, **kwargs):
super().__init__()
self.repeated_times = repeated_times
self.block = MiniBlock(repeated_times=repeated_times, **kwargs)
def set_repeated_times_fn(m):
m._repeated_times = repeated_times
self.apply(set_repeated_times_fn)
def forward(self, x):
for i, t in enumerate(range(self.repeated_times)):
def set_repeated_id(m):
m._repeated_id = i
self.block.apply(set_repeated_id)
x = self.block(x)
return x
def __repr__(self):
msg = super().__repr__()
msg += f'(repeated_times={self.repeated_times})'
return msg
class VisionTransformer(nn.Module):
""" Vision Transformer with support for patch or hybrid CNN input stage
and image relative position encoding
"""
def __init__(self, img_size=224, patch_size=16, in_chans=3, num_classes=1000, embed_dim=768, depth=12,
num_heads=12, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop_rate=0., attn_drop_rate=0.,
drop_path_rate=0., hybrid_backbone=None, norm_layer=nn.LayerNorm, rpe_config=None,
use_cls_token=True,
repeated_times=1,
use_transform=False):
super().__init__()
self.num_classes = num_classes
self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models
if hybrid_backbone is not None:
self.patch_embed = HybridEmbed(
hybrid_backbone, img_size=img_size, in_chans=in_chans, embed_dim=embed_dim)
else:
self.patch_embed = PatchEmbed(
img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim)
num_patches = self.patch_embed.num_patches
if use_cls_token:
self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
else:
self.cls_token = None
pos_embed_len = 1 + num_patches if use_cls_token else num_patches
self.pos_embed = nn.Parameter(torch.zeros(1, pos_embed_len, embed_dim))
self.pos_drop = nn.Dropout(p=drop_rate)
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule
assert depth % repeated_times == 0
depth //= repeated_times
blocks = []
block_kwargs = dict(
dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate,
norm_layer=norm_layer, rpe_config=rpe_config,
use_transform=use_transform)
for i in range(depth):
if repeated_times > 1:
block = RepeatedMiniBlock(
repeated_times=repeated_times,
drop_paths=dpr[i * repeated_times : (i + 1) * repeated_times],
**block_kwargs,
)
else:
block = MiniBlock(drop_paths=[dpr[i]], **block_kwargs)
blocks.append(block)
self.blocks = nn.ModuleList(blocks)
self.norm = norm_layer(embed_dim)
# NOTE as per official impl, we could have a pre-logits representation dense layer + tanh here
#self.repr = nn.Linear(embed_dim, representation_size)
#self.repr_act = nn.Tanh()
# Classifier head
self.head = nn.Linear(embed_dim, num_classes) if num_classes > 0 else nn.Identity()
if not use_cls_token:
self.avgpool = nn.AdaptiveAvgPool1d(1)
else:
self.avgpool = None
trunc_normal_(self.pos_embed, std=.02)
if self.cls_token is not None:
trunc_normal_(self.cls_token, std=.02)
self.apply(self._init_weights)
self.apply(self._init_custom_weights)
def set_repeated_id(m):
m._repeated_id = 0
self.apply(set_repeated_id)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
def _init_custom_weights(self, m):
if hasattr(m, 'init_weights'):
m.init_weights()
@torch.jit.ignore
def no_weight_decay(self):
return {'pos_embed', 'cls_token'}
def get_classifier(self):
return self.head
def reset_classifier(self, num_classes, global_pool=''):
self.num_classes = num_classes
self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity()
def forward_features(self, x):
B = x.shape[0]
x = self.patch_embed(x)
if self.cls_token is not None:
cls_tokens = self.cls_token.expand(B, -1, -1) # stole cls_tokens impl from Phil Wang, thanks
x = torch.cat((cls_tokens, x), dim=1)
x = x + self.pos_embed
x = self.pos_drop(x)
for blk in self.blocks:
x = blk(x)
x = self.norm(x)
if self.cls_token is not None:
return x[:, 0]
else:
return x
def forward(self, x):
x = self.forward_features(x)
if self.avgpool is not None:
x = self.avgpool(x.transpose(1, 2)) # (B, C, 1)
x = torch.flatten(x, 1)
x = self.head(x)
return x
|
Cream/MiniViT/Mini-DeiT/mini_vision_transformer.py/0
|
{
"file_path": "Cream/MiniViT/Mini-DeiT/mini_vision_transformer.py",
"repo_id": "Cream",
"token_count": 5763
}
| 300 |
DATA:
IMG_SIZE: 384
MODEL:
TYPE: swin_minivit_distill
NAME: swin_base_patch4_window7_224to384_minivit
DROP_PATH_RATE: 0.2
SWIN:
EMBED_DIM: 128
DEPTHS: [ 2, 2, 18, 2 ]
NUM_HEADS: [ 4, 8, 16, 32 ]
WINDOW_SIZE: 12
MINIVIT:
SEPARATE_LAYERNUM_LIST: [1, 1, 9, 1]
TRAIN:
EPOCHS: 30
WARMUP_EPOCHS: 5
WEIGHT_DECAY: 1e-8
BASE_LR: 2e-5
CLIP_GRAD: 5.0
|
Cream/MiniViT/Mini-Swin/configs/swin_base_patch4_window7_224to384_minivit_sharenum2_adamw.yaml/0
|
{
"file_path": "Cream/MiniViT/Mini-Swin/configs/swin_base_patch4_window7_224to384_minivit_sharenum2_adamw.yaml",
"repo_id": "Cream",
"token_count": 211
}
| 301 |
from .swin_transformer import SwinTransformer
from .swin_transformer_minivit import SwinTransformerMiniViT
from .swin_transformer_minivit_distill import SwinTransformerMiniViTDistill
from .swin_mlp import SwinMLP
def build_model(config):
model_type = config.MODEL.TYPE
if model_type == 'swin':
model = SwinTransformer(img_size=config.DATA.IMG_SIZE,
patch_size=config.MODEL.SWIN.PATCH_SIZE,
in_chans=config.MODEL.SWIN.IN_CHANS,
num_classes=config.MODEL.NUM_CLASSES,
embed_dim=config.MODEL.SWIN.EMBED_DIM,
depths=config.MODEL.SWIN.DEPTHS,
num_heads=config.MODEL.SWIN.NUM_HEADS,
window_size=config.MODEL.SWIN.WINDOW_SIZE,
mlp_ratio=config.MODEL.SWIN.MLP_RATIO,
qkv_bias=config.MODEL.SWIN.QKV_BIAS,
qk_scale=config.MODEL.SWIN.QK_SCALE,
drop_rate=config.MODEL.DROP_RATE,
drop_path_rate=config.MODEL.DROP_PATH_RATE,
ape=config.MODEL.SWIN.APE,
patch_norm=config.MODEL.SWIN.PATCH_NORM,
use_checkpoint=config.TRAIN.USE_CHECKPOINT,
)
elif model_type == 'swin_minivit':
model = SwinTransformerMiniViT(img_size=config.DATA.IMG_SIZE,
patch_size=config.MODEL.SWIN.PATCH_SIZE,
in_chans=config.MODEL.SWIN.IN_CHANS,
num_classes=config.MODEL.NUM_CLASSES,
embed_dim=config.MODEL.SWIN.EMBED_DIM,
depths=config.MODEL.SWIN.DEPTHS,
num_heads=config.MODEL.SWIN.NUM_HEADS,
window_size=config.MODEL.SWIN.WINDOW_SIZE,
mlp_ratio=config.MODEL.SWIN.MLP_RATIO,
qkv_bias=config.MODEL.SWIN.QKV_BIAS,
qk_scale=config.MODEL.SWIN.QK_SCALE,
drop_rate=config.MODEL.DROP_RATE,
drop_path_rate=config.MODEL.DROP_PATH_RATE,
ape=config.MODEL.SWIN.APE,
patch_norm=config.MODEL.SWIN.PATCH_NORM,
use_checkpoint=config.TRAIN.USE_CHECKPOINT,
## The following arguments are for minivit
is_sep_layernorm = config.MINIVIT.IS_SEP_LAYERNORM,
is_transform_FFN = config.MINIVIT.IS_TRANSFORM_FFN,
is_transform_heads = config.MINIVIT.IS_TRANSFORM_HEADS,
separate_layer_num_list = config.MINIVIT.SEPARATE_LAYERNUM_LIST,
)
elif model_type == 'swin_minivit_distill':
model = SwinTransformerMiniViTDistill(img_size=config.DATA.IMG_SIZE,
patch_size=config.MODEL.SWIN.PATCH_SIZE,
in_chans=config.MODEL.SWIN.IN_CHANS,
num_classes=config.MODEL.NUM_CLASSES,
embed_dim=config.MODEL.SWIN.EMBED_DIM,
depths=config.MODEL.SWIN.DEPTHS,
num_heads=config.MODEL.SWIN.NUM_HEADS,
window_size=config.MODEL.SWIN.WINDOW_SIZE,
mlp_ratio=config.MODEL.SWIN.MLP_RATIO,
qkv_bias=config.MODEL.SWIN.QKV_BIAS,
qk_scale=config.MODEL.SWIN.QK_SCALE,
drop_rate=config.MODEL.DROP_RATE,
drop_path_rate=config.MODEL.DROP_PATH_RATE,
ape=config.MODEL.SWIN.APE,
patch_norm=config.MODEL.SWIN.PATCH_NORM,
use_checkpoint=config.TRAIN.USE_CHECKPOINT,
## The following arguments are for minivit
is_sep_layernorm = config.MINIVIT.IS_SEP_LAYERNORM,
is_transform_FFN = config.MINIVIT.IS_TRANSFORM_FFN,
is_transform_heads = config.MINIVIT.IS_TRANSFORM_HEADS,
separate_layer_num_list = config.MINIVIT.SEPARATE_LAYERNUM_LIST,
is_student = config.DISTILL.IS_STUDENT,
fit_size_C = config.DISTILL.FIT_SIZE_C,
)
elif model_type == 'swin_mlp':
model = SwinMLP(img_size=config.DATA.IMG_SIZE,
patch_size=config.MODEL.SWIN_MLP.PATCH_SIZE,
in_chans=config.MODEL.SWIN_MLP.IN_CHANS,
num_classes=config.MODEL.NUM_CLASSES,
embed_dim=config.MODEL.SWIN_MLP.EMBED_DIM,
depths=config.MODEL.SWIN_MLP.DEPTHS,
num_heads=config.MODEL.SWIN_MLP.NUM_HEADS,
window_size=config.MODEL.SWIN_MLP.WINDOW_SIZE,
mlp_ratio=config.MODEL.SWIN_MLP.MLP_RATIO,
drop_rate=config.MODEL.DROP_RATE,
drop_path_rate=config.MODEL.DROP_PATH_RATE,
ape=config.MODEL.SWIN_MLP.APE,
patch_norm=config.MODEL.SWIN_MLP.PATCH_NORM,
use_checkpoint=config.TRAIN.USE_CHECKPOINT)
else:
raise NotImplementedError(f"Unkown model: {model_type}")
return model
|
Cream/MiniViT/Mini-Swin/models/build.py/0
|
{
"file_path": "Cream/MiniViT/Mini-Swin/models/build.py",
"repo_id": "Cream",
"token_count": 3821
}
| 302 |
install: ## [Local development] Upgrade pip, install requirements, install package.
python -m pip install -U pip
python -m pip install -e .
install-dev: ## [Local development] Install test requirements
python -m pip install -r requirements-test.txt
test: ## [Local development] Run unit tests
python -m pytest -x -s -v tests
|
Cream/TinyCLIP/Makefile/0
|
{
"file_path": "Cream/TinyCLIP/Makefile",
"repo_id": "Cream",
"token_count": 91
}
| 303 |
""" Setup
"""
from setuptools import setup, find_packages
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
exec(open('src/open_clip/version.py').read())
setup(
name='open_clip_torch',
version=__version__,
description='OpenCLIP',
long_description=long_description,
long_description_content_type='text/markdown',
url='https://github.com/mlfoundations/open_clip',
author='',
author_email='',
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 3 - Alpha',
'Intended Audience :: Education',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Software Development',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
],
# Note that this is a string of words separated by whitespace, not a list.
keywords='CLIP pretrained',
package_dir={'': 'src'},
packages=find_packages(where='src', exclude=['training']),
include_package_data=True,
install_requires=[
'torch >= 1.9',
'torchvision',
'ftfy',
'regex',
'tqdm',
'huggingface_hub',
],
python_requires='>=3.6',
)
|
Cream/TinyCLIP/setup.py/0
|
{
"file_path": "Cream/TinyCLIP/setup.py",
"repo_id": "Cream",
"token_count": 711
}
| 304 |
import os
import torch
try:
import horovod.torch as hvd
except ImportError:
hvd = None
def is_global_master(args):
return args.rank == 0
def is_local_master(args):
return args.local_rank == 0
def is_master(args, local=False):
return is_local_master(args) if local else is_global_master(args)
def is_using_horovod():
# NOTE w/ horovod run, OMPI vars should be set, but w/ SLURM PMI vars will be set
# Differentiating between horovod and DDP use via SLURM may not be possible, so horovod arg still required...
ompi_vars = ["OMPI_COMM_WORLD_RANK", "OMPI_COMM_WORLD_SIZE"]
pmi_vars = ["PMI_RANK", "PMI_SIZE"]
if all([var in os.environ for var in ompi_vars]) or all([var in os.environ for var in pmi_vars]):
return True
else:
return False
def is_using_distributed():
return True
if 'WORLD_SIZE' in os.environ:
return int(os.environ['WORLD_SIZE']) > 1
if 'SLURM_NTASKS' in os.environ:
return int(os.environ['SLURM_NTASKS']) > 1
return False
def world_info_from_env():
local_rank = 0
for v in ('LOCAL_RANK', 'MPI_LOCALRANKID', 'SLURM_LOCALID', 'OMPI_COMM_WORLD_LOCAL_RANK'):
if v in os.environ:
local_rank = int(os.environ[v])
break
else:
raise Exception('local rank not found')
global_rank = 0
for v in ('RANK', 'PMI_RANK', 'SLURM_PROCID', 'OMPI_COMM_WORLD_RANK'):
if v in os.environ:
global_rank = int(os.environ[v])
break
else:
raise Exception('global rank not found')
world_size = 1
for v in ('WORLD_SIZE', 'PMI_SIZE', 'SLURM_NTASKS', 'OMPI_COMM_WORLD_SIZE'):
if v in os.environ:
world_size = int(os.environ[v])
break
else:
raise Exception('world size not found')
return local_rank, global_rank, world_size
def init_distributed_device(args):
# Distributed training = training on more than one GPU.
# Works in both single and multi-node scenarios.
args.distributed = False
args.world_size = 1
args.rank = 0 # global rank
args.local_rank = 0
if args.horovod:
assert hvd is not None, "Horovod is not installed"
hvd.init()
args.local_rank = int(hvd.local_rank())
args.rank = hvd.rank()
args.world_size = hvd.size()
args.distributed = True
os.environ['LOCAL_RANK'] = str(args.local_rank)
os.environ['RANK'] = str(args.rank)
os.environ['WORLD_SIZE'] = str(args.world_size)
elif is_using_distributed():
if 'SLURM_PROCID' in os.environ:
# DDP via SLURM
args.local_rank, args.rank, args.world_size = world_info_from_env()
# SLURM var -> torch.distributed vars in case needed
os.environ['LOCAL_RANK'] = str(args.local_rank)
os.environ['RANK'] = str(args.rank)
os.environ['WORLD_SIZE'] = str(args.world_size)
torch.distributed.init_process_group(
backend=args.dist_backend,
init_method=args.dist_url,
world_size=args.world_size,
rank=args.rank,
)
else:
# DDP via torchrun, torch.distributed.launch
args.local_rank, _, _ = world_info_from_env()
torch.distributed.init_process_group(
backend=args.dist_backend,
init_method=args.dist_url)
args.world_size = torch.distributed.get_world_size()
args.rank = torch.distributed.get_rank()
args.distributed = True
if torch.cuda.is_available():
if args.distributed and not args.no_set_device_rank:
device = 'cuda:%d' % args.local_rank
else:
device = 'cuda:0'
torch.cuda.set_device(device)
else:
device = 'cpu'
args.device = device
device = torch.device(device)
return device
|
Cream/TinyCLIP/src/training/distributed.py/0
|
{
"file_path": "Cream/TinyCLIP/src/training/distributed.py",
"repo_id": "Cream",
"token_count": 1846
}
| 305 |
from .build import build_loader, build_transform
from .imagenet_classnames import imagenet_classnames
|
Cream/TinyViT/data/__init__.py/0
|
{
"file_path": "Cream/TinyViT/data/__init__.py",
"repo_id": "Cream",
"token_count": 28
}
| 306 |
import os
def load_class_map(map_or_filename, root=''):
if isinstance(map_or_filename, dict):
assert dict, 'class_map dict must be non-empty'
return map_or_filename
class_map_path = map_or_filename
if not os.path.exists(class_map_path):
class_map_path = os.path.join(root, class_map_path)
assert os.path.exists(class_map_path), 'Cannot locate specified class map file (%s)' % map_or_filename
class_map_ext = os.path.splitext(map_or_filename)[-1].lower()
if class_map_ext == '.txt':
with open(class_map_path) as f:
class_to_idx = {v.strip(): k for k, v in enumerate(f)}
else:
assert False, f'Unsupported class map file extension ({class_map_ext}).'
return class_to_idx
|
Cream/TinyViT/data/augmentation/parsers/class_map.py/0
|
{
"file_path": "Cream/TinyViT/data/augmentation/parsers/class_map.py",
"repo_id": "Cream",
"token_count": 318
}
| 307 |
# --------------------------------------------------------
# TinyViT Data Sampler
# Copyright (c) 2022 Microsoft
# Refer to https://github.com/pytorch/pytorch/blob/master/torch/utils/data/distributed.py
# --------------------------------------------------------
import torch
from typing import TypeVar, Optional, Iterator
import torch
from torch.utils.data import Sampler, Dataset
import torch.distributed as dist
T_co = TypeVar('T_co', covariant=True)
class MyDistributedSampler(Sampler[T_co]):
r"""Sampler that restricts data loading to a subset of the dataset.
It is especially useful in conjunction with
:class:`torch.nn.parallel.DistributedDataParallel`. In such a case, each
process can pass a :class:`~torch.utils.data.DistributedSampler` instance as a
:class:`~torch.utils.data.DataLoader` sampler, and load a subset of the
original dataset that is exclusive to it.
.. note::
Dataset is assumed to be of constant size and that any instance of it always
returns the same elements in the same order.
Args:
dataset: Dataset used for sampling.
num_replicas (int, optional): Number of processes participating in
distributed training. By default, :attr:`world_size` is retrieved from the
current distributed group.
rank (int, optional): Rank of the current process within :attr:`num_replicas`.
By default, :attr:`rank` is retrieved from the current distributed
group.
shuffle (bool, optional): If ``True`` (default), sampler will shuffle the
indices.
seed (int, optional): random seed used to shuffle the sampler if
:attr:`shuffle=True`. This number should be identical across all
processes in the distributed group. Default: ``0``.
drop_last (bool, optional): if ``True``, then the sampler will drop the
tail of the data to make it evenly divisible across the number of
replicas. If ``False``, the sampler will add extra indices to make
the data evenly divisible across the replicas. Default: ``False``.
padding: (bool, optional): Whether to pad the dataset. Default: ``True``.
pair: (bool, optional): Pair output for Mixup. Default: ``False``.
.. warning::
In distributed mode, calling the :meth:`set_epoch` method at
the beginning of each epoch **before** creating the :class:`DataLoader` iterator
is necessary to make shuffling work properly across multiple epochs. Otherwise,
the same ordering will be always used.
Example::
>>> sampler = DistributedSampler(dataset) if is_distributed else None
>>> loader = DataLoader(dataset, shuffle=(sampler is None),
... sampler=sampler)
>>> for epoch in range(start_epoch, n_epochs):
... if is_distributed:
... sampler.set_epoch(epoch)
... train(loader)
"""
def __init__(self, dataset: Dataset, num_replicas: Optional[int] = None,
rank: Optional[int] = None, shuffle: bool = True,
seed: int = 0, drop_last: bool = False,
padding: bool = True,
pair: bool = False) -> None:
if num_replicas is None:
if not dist.is_available():
num_replicas = 1
else:
num_replicas = dist.get_world_size()
if rank is None:
if not dist.is_available():
rank = 0
else:
rank = dist.get_rank()
if rank >= num_replicas or rank < 0:
raise ValueError(
"Invalid rank {}, rank should be in the interval"
" [0, {}]".format(rank, num_replicas - 1))
self.dataset = dataset
self.num_replicas = num_replicas
self.rank = rank
self.epoch = 0
self.drop_last = drop_last
self.pair = pair
self.padding = padding
# If the dataset length is evenly divisible by # of replicas, then there
# is no need to drop any data, since the dataset will be split equally.
T = self.num_replicas if not self.pair else self.num_replicas * 2
self.total_size = len(self.dataset)
if self.padding:
num_parts = self.total_size // T
has_rest = bool(self.total_size % T)
if self.drop_last:
self.total_size = num_parts * T
else:
self.total_size = (num_parts + has_rest) * T
self.num_samples = (
self.total_size + self.num_replicas - 1) // self.num_replicas
self.shuffle = shuffle
self.seed = seed
def __iter__(self) -> Iterator[T_co]:
if self.shuffle:
# deterministically shuffle based on epoch and seed
g = torch.Generator()
g.manual_seed(self.seed + self.epoch)
indices = torch.randperm(len(self.dataset), generator=g)
else:
indices = torch.arange(len(self.dataset))
if not self.drop_last:
# add extra samples to make it evenly divisible
if self.padding:
padding_size = self.total_size - len(indices)
# pad to total_size
if padding_size <= len(indices):
indices = torch.cat(
[indices, indices[:padding_size]], dim=0)
else:
repeat_times = (self.total_size +
len(indices) - 1) // len(indices)
indices = indices.repeat(repeat_times)[:self.total_size]
else:
# remove tail of data to make it evenly divisible.
indices = indices[:self.total_size]
assert len(indices) == self.total_size
# subsample
if self.pair:
indices = indices.view(-1, 2)
indices = indices[self.rank:self.total_size:self.num_replicas].flatten(
).tolist()
assert len(indices) == self.num_samples or (
not self.padding and len(indices) == self.num_samples - 1)
return iter(indices)
def __len__(self) -> int:
return self.num_samples
def set_epoch(self, epoch: int) -> None:
r"""
Sets the epoch for this sampler. When :attr:`shuffle=True`, this ensures all replicas
use a different random ordering for each epoch. Otherwise, the next iteration of this
sampler will yield the same ordering.
Args:
epoch (int): Epoch number.
"""
self.epoch = epoch
|
Cream/TinyViT/data/sampler.py/0
|
{
"file_path": "Cream/TinyViT/data/sampler.py",
"repo_id": "Cream",
"token_count": 2830
}
| 308 |
# --------------------------------------------------------
# Optimizer
# Copyright (c) 2022 Microsoft
# Based on the code: Swin Transformer
# (https://github.com/microsoft/swin-transformer)
# --------------------------------------------------------
from torch import optim as optim
# Modified for TinyViT
from tinyvit_utils import divide_param_groups_by_lr_scale
def build_optimizer(config, model):
"""
Build optimizer, set weight decay of normalization to 0 by default.
"""
skip = {}
skip_keywords = {}
if hasattr(model, 'no_weight_decay'):
skip = model.no_weight_decay()
if hasattr(model, 'no_weight_decay_keywords'):
skip_keywords = model.no_weight_decay_keywords()
parameters = set_weight_decay(model, skip, skip_keywords)
# Modified for TinyViT
parameters = divide_param_groups_by_lr_scale(parameters)
opt_lower = config.TRAIN.OPTIMIZER.NAME.lower()
optimizer = None
if opt_lower == 'sgd':
optimizer = optim.SGD(parameters, momentum=config.TRAIN.OPTIMIZER.MOMENTUM, nesterov=True,
lr=config.TRAIN.BASE_LR, weight_decay=config.TRAIN.WEIGHT_DECAY)
elif opt_lower == 'adamw':
optimizer = optim.AdamW(parameters, eps=config.TRAIN.OPTIMIZER.EPS, betas=config.TRAIN.OPTIMIZER.BETAS,
lr=config.TRAIN.BASE_LR, weight_decay=config.TRAIN.WEIGHT_DECAY)
return optimizer
def set_weight_decay(model, skip_list=(), skip_keywords=()):
has_decay = []
no_decay = []
for name, param in model.named_parameters():
if not param.requires_grad:
continue # frozen weights
if len(param.shape) == 1 or name.endswith(".bias") or (name in skip_list) or \
check_keywords_in_name(name, skip_keywords):
no_decay.append(param)
else:
has_decay.append(param)
return [{'params': has_decay},
{'params': no_decay, 'weight_decay': 0.}]
def check_keywords_in_name(name, keywords=()):
isin = False
for keyword in keywords:
if keyword in name:
isin = True
return isin
|
Cream/TinyViT/optimizer.py/0
|
{
"file_path": "Cream/TinyViT/optimizer.py",
"repo_id": "Cream",
"token_count": 902
}
| 309 |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import argparse
import datetime
import json
import random
import time
from pathlib import Path
import numpy as np
import torch
from torch.utils.data import DataLoader, DistributedSampler
import datasets
import util.misc as utils
from datasets import build_dataset, get_coco_api_from_dataset
from engine import evaluate, train_one_epoch
from models import build_model
from models.transformer import RPE_HELP
def get_args_parser():
parser = argparse.ArgumentParser('Set transformer detector', add_help=False)
parser.add_argument('--lr', default=1e-4, type=float)
parser.add_argument('--lr_backbone', default=1e-5, type=float)
parser.add_argument('--batch_size', default=2, type=int)
parser.add_argument('--weight_decay', default=1e-4, type=float)
parser.add_argument('--epochs', default=300, type=int)
parser.add_argument('--lr_drop', default=200, type=int)
parser.add_argument('--clip_max_norm', default=0.1, type=float,
help='gradient clipping max norm')
# Model parameters
parser.add_argument('--frozen_weights', type=str, default=None,
help="Path to the pretrained model. If set, only the mask head will be trained")
# * Backbone
parser.add_argument('--backbone', default='resnet50', type=str,
help="Name of the convolutional backbone to use")
parser.add_argument('--dilation', action='store_true',
help="If true, we replace stride with dilation in the last convolutional block (DC5)")
parser.add_argument('--position_embedding', default='sine', type=str, choices=('sine', 'learned'),
help="Type of positional embedding to use on top of the image features")
# * Transformer
parser.add_argument('--enc_layers', default=6, type=int,
help="Number of encoding layers in the transformer")
parser.add_argument('--dec_layers', default=6, type=int,
help="Number of decoding layers in the transformer")
parser.add_argument('--dim_feedforward', default=2048, type=int,
help="Intermediate size of the feedforward layers in the transformer blocks")
parser.add_argument('--hidden_dim', default=256, type=int,
help="Size of the embeddings (dimension of the transformer)")
parser.add_argument('--dropout', default=0.1, type=float,
help="Dropout applied in the transformer")
parser.add_argument('--nheads', default=8, type=int,
help="Number of attention heads inside the transformer's attentions")
parser.add_argument('--num_queries', default=100, type=int,
help="Number of query slots")
parser.add_argument('--pre_norm', action='store_true')
# 2D relative position encoding
parser.add_argument('--enc_rpe2d', default='', type=str,
help=RPE_HELP)
# * Segmentation
parser.add_argument('--masks', action='store_true',
help="Train segmentation head if the flag is provided")
# Loss
parser.add_argument('--no_aux_loss', dest='aux_loss', action='store_false',
help="Disables auxiliary decoding losses (loss at each layer)")
# * Matcher
parser.add_argument('--set_cost_class', default=1, type=float,
help="Class coefficient in the matching cost")
parser.add_argument('--set_cost_bbox', default=5, type=float,
help="L1 box coefficient in the matching cost")
parser.add_argument('--set_cost_giou', default=2, type=float,
help="giou box coefficient in the matching cost")
# * Loss coefficients
parser.add_argument('--mask_loss_coef', default=1, type=float)
parser.add_argument('--dice_loss_coef', default=1, type=float)
parser.add_argument('--bbox_loss_coef', default=5, type=float)
parser.add_argument('--giou_loss_coef', default=2, type=float)
parser.add_argument('--eos_coef', default=0.1, type=float,
help="Relative classification weight of the no-object class")
# dataset parameters
parser.add_argument('--dataset_file', default='coco')
parser.add_argument('--coco_path', type=str)
parser.add_argument('--coco_panoptic_path', type=str)
parser.add_argument('--remove_difficult', action='store_true')
parser.add_argument('--output_dir', default='',
help='path where to save, empty for no saving')
parser.add_argument('--device', default='cuda',
help='device to use for training / testing')
parser.add_argument('--seed', default=42, type=int)
parser.add_argument('--resume', default='', help='resume from checkpoint')
parser.add_argument('--start_epoch', default=0, type=int, metavar='N',
help='start epoch')
parser.add_argument('--eval', action='store_true')
parser.add_argument('--num_workers', default=4, type=int)
# distributed training parameters
parser.add_argument('--world_size', default=1, type=int,
help='number of distributed processes')
parser.add_argument('--dist_url', default='env://', help='url used to set up distributed training')
return parser
def main(args):
utils.init_distributed_mode(args)
print("git:\n {}\n".format(utils.get_sha()))
if args.frozen_weights is not None:
assert args.masks, "Frozen training is meant for segmentation only"
print(args)
device = torch.device(args.device)
# fix the seed for reproducibility
seed = args.seed + utils.get_rank()
torch.manual_seed(seed)
np.random.seed(seed)
random.seed(seed)
model, criterion, postprocessors = build_model(args)
model.to(device)
model_without_ddp = model
if args.distributed:
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu])
model_without_ddp = model.module
n_parameters = sum(p.numel() for p in model.parameters() if p.requires_grad)
print('number of params:', n_parameters)
param_dicts = [
{"params": [p for n, p in model_without_ddp.named_parameters() if "backbone" not in n and p.requires_grad]},
{
"params": [p for n, p in model_without_ddp.named_parameters() if "backbone" in n and p.requires_grad],
"lr": args.lr_backbone,
},
]
optimizer = torch.optim.AdamW(param_dicts, lr=args.lr,
weight_decay=args.weight_decay)
lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, args.lr_drop)
dataset_train = build_dataset(image_set='train', args=args)
dataset_val = build_dataset(image_set='val', args=args)
if args.distributed:
sampler_train = DistributedSampler(dataset_train)
sampler_val = DistributedSampler(dataset_val, shuffle=False)
else:
sampler_train = torch.utils.data.RandomSampler(dataset_train)
sampler_val = torch.utils.data.SequentialSampler(dataset_val)
batch_sampler_train = torch.utils.data.BatchSampler(
sampler_train, args.batch_size, drop_last=True)
data_loader_train = DataLoader(dataset_train, batch_sampler=batch_sampler_train,
collate_fn=utils.collate_fn, num_workers=args.num_workers)
data_loader_val = DataLoader(dataset_val, args.batch_size, sampler=sampler_val,
drop_last=False, collate_fn=utils.collate_fn, num_workers=args.num_workers)
if args.dataset_file == "coco_panoptic":
# We also evaluate AP during panoptic training, on original coco DS
coco_val = datasets.coco.build("val", args)
base_ds = get_coco_api_from_dataset(coco_val)
else:
base_ds = get_coco_api_from_dataset(dataset_val)
if args.frozen_weights is not None:
checkpoint = torch.load(args.frozen_weights, map_location='cpu')
model_without_ddp.detr.load_state_dict(checkpoint['model'])
output_dir = Path(args.output_dir)
if args.resume:
if args.resume.startswith('https'):
checkpoint = torch.hub.load_state_dict_from_url(
args.resume, map_location='cpu', check_hash=True)
else:
checkpoint = torch.load(args.resume, map_location='cpu')
model_without_ddp.load_state_dict(checkpoint['model'])
if not args.eval and 'optimizer' in checkpoint and 'lr_scheduler' in checkpoint and 'epoch' in checkpoint:
optimizer.load_state_dict(checkpoint['optimizer'])
lr_scheduler.load_state_dict(checkpoint['lr_scheduler'])
args.start_epoch = checkpoint['epoch'] + 1
if args.eval:
test_stats, coco_evaluator = evaluate(model, criterion, postprocessors,
data_loader_val, base_ds, device, args.output_dir)
if args.output_dir:
utils.save_on_master(coco_evaluator.coco_eval["bbox"].eval, output_dir / "eval.pth")
return
print("Start training")
start_time = time.time()
for epoch in range(args.start_epoch, args.epochs):
if args.distributed:
sampler_train.set_epoch(epoch)
train_stats = train_one_epoch(
model, criterion, data_loader_train, optimizer, device, epoch,
args.clip_max_norm)
lr_scheduler.step()
if args.output_dir:
checkpoint_paths = [output_dir / 'checkpoint.pth']
# extra checkpoint before LR drop and every 100 epochs
if (epoch + 1) % args.lr_drop == 0 or (epoch + 1) % 100 == 0:
checkpoint_paths.append(output_dir / f'checkpoint{epoch:04}.pth')
for checkpoint_path in checkpoint_paths:
utils.save_on_master({
'model': model_without_ddp.state_dict(),
'optimizer': optimizer.state_dict(),
'lr_scheduler': lr_scheduler.state_dict(),
'epoch': epoch,
'args': args,
}, checkpoint_path)
test_stats, coco_evaluator = evaluate(
model, criterion, postprocessors, data_loader_val, base_ds, device, args.output_dir
)
log_stats = {**{f'train_{k}': v for k, v in train_stats.items()},
**{f'test_{k}': v for k, v in test_stats.items()},
'epoch': epoch,
'n_parameters': n_parameters}
if args.output_dir and utils.is_main_process():
with (output_dir / "log.txt").open("a") as f:
f.write(json.dumps(log_stats) + "\n")
# for evaluation logs
if coco_evaluator is not None:
(output_dir / 'eval').mkdir(exist_ok=True)
if "bbox" in coco_evaluator.coco_eval:
filenames = ['latest.pth']
if epoch % 50 == 0:
filenames.append(f'{epoch:03}.pth')
for name in filenames:
torch.save(coco_evaluator.coco_eval["bbox"].eval,
output_dir / "eval" / name)
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
print('Training time {}'.format(total_time_str))
if __name__ == '__main__':
parser = argparse.ArgumentParser('DETR training and evaluation script', parents=[get_args_parser()])
args = parser.parse_args()
if args.output_dir:
Path(args.output_dir).mkdir(parents=True, exist_ok=True)
main(args)
|
Cream/iRPE/DETR-with-iRPE/main.py/0
|
{
"file_path": "Cream/iRPE/DETR-with-iRPE/main.py",
"repo_id": "Cream",
"token_count": 5086
}
| 310 |
# Copyright (c) 2015-present, Facebook, Inc.
# All rights reserved.
import torch
import torch.nn as nn
from functools import partial
from rpe_vision_transformer import VisionTransformer, _cfg
from timm.models.registry import register_model
from timm.models.layers import trunc_normal_
__all__ = [
'deit_tiny_patch16_224', 'deit_small_patch16_224', 'deit_base_patch16_224',
'deit_tiny_distilled_patch16_224', 'deit_small_distilled_patch16_224',
'deit_base_distilled_patch16_224', 'deit_base_patch16_384',
'deit_base_distilled_patch16_384',
]
class DistilledVisionTransformer(VisionTransformer):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.dist_token = nn.Parameter(torch.zeros(1, 1, self.embed_dim))
num_patches = self.patch_embed.num_patches
self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + 2, self.embed_dim))
self.head_dist = nn.Linear(self.embed_dim, self.num_classes) if self.num_classes > 0 else nn.Identity()
trunc_normal_(self.dist_token, std=.02)
trunc_normal_(self.pos_embed, std=.02)
self.head_dist.apply(self._init_weights)
def forward_features(self, x):
# taken from https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py
# with slight modifications to add the dist_token
B = x.shape[0]
x = self.patch_embed(x)
cls_tokens = self.cls_token.expand(B, -1, -1) # stole cls_tokens impl from Phil Wang, thanks
dist_token = self.dist_token.expand(B, -1, -1)
x = torch.cat((cls_tokens, dist_token, x), dim=1)
x = x + self.pos_embed
x = self.pos_drop(x)
for blk in self.blocks:
x = blk(x)
x = self.norm(x)
return x[:, 0], x[:, 1]
def forward(self, x):
x, x_dist = self.forward_features(x)
x = self.head(x)
x_dist = self.head_dist(x_dist)
if self.training:
return x, x_dist
else:
# during inference, return the average of both classifier predictions
return (x + x_dist) / 2
@register_model
def deit_tiny_patch16_224(pretrained=False, **kwargs):
model = VisionTransformer(
patch_size=16, embed_dim=192, depth=12, num_heads=3, mlp_ratio=4, qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
model.default_cfg = _cfg()
if pretrained:
checkpoint = torch.hub.load_state_dict_from_url(
url="https://dl.fbaipublicfiles.com/deit/deit_tiny_patch16_224-a1311bcf.pth",
map_location="cpu", check_hash=True
)
model.load_state_dict(checkpoint["model"])
return model
@register_model
def deit_small_patch16_224(pretrained=False, **kwargs):
model = VisionTransformer(
patch_size=16, embed_dim=384, depth=12, num_heads=6, mlp_ratio=4, qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
model.default_cfg = _cfg()
if pretrained:
checkpoint = torch.hub.load_state_dict_from_url(
url="https://dl.fbaipublicfiles.com/deit/deit_small_patch16_224-cd65a155.pth",
map_location="cpu", check_hash=True
)
model.load_state_dict(checkpoint["model"])
return model
@register_model
def deit_base_patch16_224(pretrained=False, **kwargs):
model = VisionTransformer(
patch_size=16, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4, qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
model.default_cfg = _cfg()
if pretrained:
checkpoint = torch.hub.load_state_dict_from_url(
url="https://dl.fbaipublicfiles.com/deit/deit_base_patch16_224-b5f2ef4d.pth",
map_location="cpu", check_hash=True
)
model.load_state_dict(checkpoint["model"])
return model
@register_model
def deit_tiny_distilled_patch16_224(pretrained=False, **kwargs):
model = DistilledVisionTransformer(
patch_size=16, embed_dim=192, depth=12, num_heads=3, mlp_ratio=4, qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
model.default_cfg = _cfg()
if pretrained:
checkpoint = torch.hub.load_state_dict_from_url(
url="https://dl.fbaipublicfiles.com/deit/deit_tiny_distilled_patch16_224-b40b3cf7.pth",
map_location="cpu", check_hash=True
)
model.load_state_dict(checkpoint["model"])
return model
@register_model
def deit_small_distilled_patch16_224(pretrained=False, **kwargs):
model = DistilledVisionTransformer(
patch_size=16, embed_dim=384, depth=12, num_heads=6, mlp_ratio=4, qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
model.default_cfg = _cfg()
if pretrained:
checkpoint = torch.hub.load_state_dict_from_url(
url="https://dl.fbaipublicfiles.com/deit/deit_small_distilled_patch16_224-649709d9.pth",
map_location="cpu", check_hash=True
)
model.load_state_dict(checkpoint["model"])
return model
@register_model
def deit_base_distilled_patch16_224(pretrained=False, **kwargs):
model = DistilledVisionTransformer(
patch_size=16, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4, qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
model.default_cfg = _cfg()
if pretrained:
checkpoint = torch.hub.load_state_dict_from_url(
url="https://dl.fbaipublicfiles.com/deit/deit_base_distilled_patch16_224-df68dfff.pth",
map_location="cpu", check_hash=True
)
model.load_state_dict(checkpoint["model"])
return model
@register_model
def deit_base_patch16_384(pretrained=False, **kwargs):
model = VisionTransformer(
img_size=384, patch_size=16, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4, qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
model.default_cfg = _cfg()
if pretrained:
checkpoint = torch.hub.load_state_dict_from_url(
url="https://dl.fbaipublicfiles.com/deit/deit_base_patch16_384-8de9b5d1.pth",
map_location="cpu", check_hash=True
)
model.load_state_dict(checkpoint["model"])
return model
@register_model
def deit_base_distilled_patch16_384(pretrained=False, **kwargs):
model = DistilledVisionTransformer(
img_size=384, patch_size=16, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4, qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
model.default_cfg = _cfg()
if pretrained:
checkpoint = torch.hub.load_state_dict_from_url(
url="https://dl.fbaipublicfiles.com/deit/deit_base_distilled_patch16_384-d0272ac0.pth",
map_location="cpu", check_hash=True
)
model.load_state_dict(checkpoint["model"])
return model
|
Cream/iRPE/DeiT-with-iRPE/models.py/0
|
{
"file_path": "Cream/iRPE/DeiT-with-iRPE/models.py",
"repo_id": "Cream",
"token_count": 3142
}
| 311 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from timm.data import create_transform
from PIL import ImageFilter
import logging
import random
import torchvision.transforms as T
class GaussianBlur(object):
"""Gaussian blur augmentation in SimCLR https://arxiv.org/abs/2002.05709"""
def __init__(self, sigma=[.1, 2.]):
self.sigma = sigma
def __call__(self, x):
sigma = random.uniform(self.sigma[0], self.sigma[1])
x = x.filter(ImageFilter.GaussianBlur(radius=sigma))
return x
def get_resolution(original_resolution):
"""Takes (H,W) and returns (precrop, crop)."""
area = original_resolution[0] * original_resolution[1]
return (160, 128) if area < 96*96 else (512, 480)
def build_transforms(cfg, is_train=True):
if cfg.AUG.TIMM_AUG.USE_TRANSFORM and is_train:
logging.info('=> use timm transform for training')
timm_cfg = cfg.AUG.TIMM_AUG
transforms = create_transform(
input_size=cfg.TRAIN.IMAGE_SIZE[0],
is_training=True,
use_prefetcher=False,
no_aug=False,
re_prob=timm_cfg.RE_PROB,
re_mode=timm_cfg.RE_MODE,
re_count=timm_cfg.RE_COUNT,
scale=cfg.AUG.SCALE,
ratio=cfg.AUG.RATIO,
hflip=timm_cfg.HFLIP,
vflip=timm_cfg.VFLIP,
color_jitter=timm_cfg.COLOR_JITTER,
auto_augment=timm_cfg.AUTO_AUGMENT,
interpolation=timm_cfg.INTERPOLATION,
mean=cfg.INPUT.MEAN,
std=cfg.INPUT.STD,
)
return transforms
# assert isinstance(cfg.DATASET.OUTPUT_SIZE, (list, tuple)), 'DATASET.OUTPUT_SIZE should be list or tuple'
normalize = T.Normalize(mean=cfg.INPUT.MEAN, std=cfg.INPUT.STD)
transforms = None
if is_train:
if cfg.FINETUNE.FINETUNE and not cfg.FINETUNE.USE_TRAIN_AUG:
# precrop, crop = get_resolution(cfg.TRAIN.IMAGE_SIZE)
crop = cfg.TRAIN.IMAGE_SIZE[0]
precrop = crop + 32
transforms = T.Compose([
T.Resize(
(precrop, precrop),
interpolation=cfg.AUG.INTERPOLATION
),
T.RandomCrop((crop, crop)),
T.RandomHorizontalFlip(),
T.ToTensor(),
normalize,
])
else:
aug = cfg.AUG
scale = aug.SCALE
ratio = aug.RATIO
ts = [
T.RandomResizedCrop(
cfg.TRAIN.IMAGE_SIZE[0], scale=scale, ratio=ratio,
interpolation=cfg.AUG.INTERPOLATION
),
T.RandomHorizontalFlip(),
]
cj = aug.COLOR_JITTER
if cj[-1] > 0.0:
ts.append(T.RandomApply([T.ColorJitter(*cj[:-1])], p=cj[-1]))
gs = aug.GRAY_SCALE
if gs > 0.0:
ts.append(T.RandomGrayscale(gs))
gb = aug.GAUSSIAN_BLUR
if gb > 0.0:
ts.append(T.RandomApply([GaussianBlur([.1, 2.])], p=gb))
ts.append(T.ToTensor())
ts.append(normalize)
transforms = T.Compose(ts)
else:
if cfg.TEST.CENTER_CROP:
transforms = T.Compose([
T.Resize(
int(cfg.TEST.IMAGE_SIZE[0] / 0.875),
interpolation=cfg.TEST.INTERPOLATION
),
T.CenterCrop(cfg.TEST.IMAGE_SIZE[0]),
T.ToTensor(),
normalize,
])
else:
transforms = T.Compose([
T.Resize(
(cfg.TEST.IMAGE_SIZE[1], cfg.TEST.IMAGE_SIZE[0]),
interpolation=cfg.TEST.INTERPOLATION
),
T.ToTensor(),
normalize,
])
return transforms
|
CvT/lib/dataset/transformas/build.py/0
|
{
"file_path": "CvT/lib/dataset/transformas/build.py",
"repo_id": "CvT",
"token_count": 2206
}
| 312 |
# Spectral Residual Anomaly Detection Component
This folder specifies the Spectral Residual Anomaly Detection component that can be used in Azure Machine Learning designer. The details of the Spectral Residual algorithm can be found at https://arxiv.org/pdf/1906.03821.pdf.
## Component Specification
This section describes the specification of [Spectral Residual Anomaly Detection Component](./ad_component.yaml).
### Input Specification
* `Input`. AnyDirectory type means you need to register your dataset as **File dataset** to the workspace. The data set should contain at least 12 rows. Each row should contain a timestamp column and one or more columns that are to be detected.
* `Detect Mode`. The following two detect modes are supported.
1. `AnomalyOnly`. In this mode, the module outputs columns `isAnomaly`, `mag` and `score`.
2. `AnomalyAndMargin`. In this mode, the module outputs columns `isAnomaly`, `mag`, `score`, `expectedValue`, `lowerBoundary`, `upperBoundary`.
* `Timestamp Column`. The column that contains timestamp. The timestamp should be in ascending order. No duplication is allowed in timestamp.
* `Value Column`. One or more columns that are to be detected. The data in these columns should be numeric. Absolute value greater than 1e100 is not allowed.
* `Batch Size`. The number of rows to be detected in each batch. The batch size should be at least 12. Set this parameter to 0 or negative number if you want to detect all rows in one batch.
* `Threshold`. In AnomalyOnly mode, points are detected as anomaly if its `score` is greater than threshold. In AnomalyAndMargin mode, this parameter and `sensitivity` works together to filter anomaly.
* `Sensitivity`. This parameter is used in AnomalyAndMargin mode to determine the range of the boundaries.
* `Append result column to output`. If this parameter is set, the input data set will be output together with the results. Otherwise, only the results will be output.
### Output Specification
The output data set will contain a fraction of the following columns according to the `Detect Mode` parameter. If multiple value colums are selected, the result columns will add value column names as postfix.
* `isAnomaly`. The anomaly result.
* `mag`. The magnitude after spectral residual transformation.
* `score`. A value indicates the significance of the anomaly.
In AnomalyAndMargin mode, the following columns will be output in addition the the above three columns.
* `expectedValue`. The expected value of each point.
* `lowerBoundary`. The lower boundary at each point that the algorithm can tolerant as not anomaly.
* `upperBoundary`. The upper boundary at each point that the algorithm can tolerant as not anomaly.
## How to create a new component in Azure Machine Learning
Follow [this tutorial](https://github.com/Azure/AzureMachineLearningGallery/blob/main/tutorial/tutorial1-use-existing-components.md) to create a new component in your Azure Machine Learning workspace.
After creating component successfully, you can use it in Azure Machine Learning designer.
## How to build a pipeline in AML designer
1. Prepare input dataset for the component.
Register this [sample AnomalyDetector-Manufacture dataset](https://github.com/microsoft/Cognitive-Samples-IntelligentKiosk/blob/master/Kiosk/Assets/AnomalyDetector/AnomalyDetector-Manufacture.csv) as **Tabular dataset** in your Azure Machine Learning workspace.
The dataset above is a sample dataset. You can use your own dataset, make sure that it is registered as Tabular dataset and you can also preprocess your dataset using Designer built-in modules. Make sure that the input dataset of **Spectral Residual Anomaly Detection** is with following format, and the count of time series must be more than 12:
|Timestamp|Value|
|---|---|
|2018/7/1 0:00|22|
|2018/7/1 2:00|22|
|2018/7/1 4:00|22|
|2018/7/1 6:00|22|
|2018/7/1 8:00|52.93218322|
|2018/7/1 10:00|52.81943684|
|2018/7/1 12:00|52.33277765|
|2018/7/1 14:00|52.82106858|
|2018/7/1 16:00|52.93218322|
|2018/7/1 18:00|22|
|2018/7/1 20:00|22|
|2018/7/1 22:00|22|
|2018/7/2 0:00|22|
|2018/7/2 2:00|22|
|2018/7/2 4:00|22|
|2018/7/2 6:00|22|
1. Open AML designer, create a new pipeline draft and drag the registered dataset to canvas.
Add **Spectral Residual Anomaly Detection** to canvas, connect it to the dataset, and configure the parameters. The pipeline graph is like following:

1. Submit the pipeline.
1. When the pipeline runs completed, you can click on **Visualize** icon in the **Outputs+logs** tab in the right panel of the **Spectral Residual Anomaly Detection** module, or right-click the module to select **Visualize**.
|
anomalydetector/aml_component/README.md/0
|
{
"file_path": "anomalydetector/aml_component/README.md",
"repo_id": "anomalydetector",
"token_count": 1343
}
| 313 |
import bisect
import numpy as np
from msanomalydetector._anomaly_kernel_cython import median_filter
# pseudo - code to generate the factors.
# factors = [1]
# for i in range(50):
# if i < 40:
# factors.append(factors[-1] / (1.15 + 0.001 * i))
# else:
# factors.append(factors[-1] / (1.25 + 0.01 * i))
# for i in range(50):
# factors.insert(0, factors[0] * (1.25 + 0.001 * i))
factors = [
184331.62871148242, 141902.71648305038, 109324.12672037778, 84289.9974713784, 65038.57829581667, 50222.84038287002,
38812.08684920403, 30017.081863266845, 23233.035497884553, 17996.15452973242, 13950.50738738947, 10822.736530170265,
8402.745753237783, 6528.939979205737, 5076.93622022219, 3950.92312857758, 3077.042935029268, 2398.318733460069,
1870.7634426365591, 1460.393007522685, 1140.9320371270976, 892.0500681212648, 698.0047481387048, 546.5972968979678,
428.36778753759233, 335.97473532360186, 263.71643275007995, 207.16137686573444, 162.8627176617409, 128.13746472206208,
100.8956415134347, 79.50799173635517, 62.70346351447568, 49.48971074544253, 39.09139869308257, 30.90229145698227,
24.448015393182175, 19.35709849024717, 15.338429865489042, 12.163703303322, 9.653732780414286, 7.667778221139226,
6.095213212352326, 4.8490160798347866, 3.8606815922251485, 3.076240312529999, 2.4531421949999994, 1.9578149999999996,
1.5637499999999998, 1.25, 1.0, 0.8695652173913044, 0.7554867223208555, 0.655804446459076, 0.5687809596349316,
0.4928777813127657, 0.4267340097946024, 0.36914706729636887, 0.3190553736355825, 0.27552277516026125, 0.23772456873189068,
0.20493497304473338, 0.17651591132190647, 0.1519069804835684, 0.13061649224726435, 0.11221348131208278, 0.09632058481723846,
0.08260770567516164, 0.0707863801843716, 0.06060477755511267, 0.051843265658779024, 0.0443104834690419, 0.03783986632710667,
0.03228657536442549, 0.027524787181948417, 0.02344530424356765, 0.019953450420057577, 0.01696721974494692, 0.014415649740821513,
0.012237393667929978, 0.010379468759906684, 0.008796159966022614, 0.0074480609365136455, 0.006301235986898177,
0.00532648857725966, 0.004498723460523362, 0.0037963911059268884, 0.0032010043051660104, 0.002696718032995797,
0.0022699646742388863, 0.0019091376570554135, 0.0011570531254881296, 0.000697019955113331, 0.00041737721863073713,
0.000248438820613534, 0.00014700521929794912, 8.647365841055832e-05, 5.056939088336744e-05, 2.9400808653120604e-05,
1.6994687082728674e-05, 9.767061541798089e-06
]
def calculate_boundary_unit_last(data):
if len(data) == 0:
return 0
calculation_size = len(data) - 1
window = int(min(calculation_size // 3, 512))
trends = np.abs(np.asarray(median_filter(data[:calculation_size], window, need_two_end=True), dtype=float))
unit = max(np.mean(trends), 1.0)
if not np.isfinite(unit):
raise Exception('Not finite unit value')
return unit
def calculate_boundary_unit_entire(data, is_anomaly):
if len(data) == 0:
return []
window = int(min(len(data)//3, 512))
trend_fraction = 0.5
trends = np.abs(np.asarray(median_filter(data, window, need_two_end=True), dtype=float))
valid_trend = [t for a, t in zip(is_anomaly, trends) if not a]
if len(valid_trend) > 0:
average_part = np.mean(valid_trend)
units = trend_fraction * trends + average_part * (1 - trend_fraction)
else:
units = trends
if not np.all(np.isfinite(units)):
raise Exception('Not finite unit values')
units = np.clip(units, 1.0, max(np.max(units), 1.0))
return units
def calculate_margin(unit, sensitivity):
def calculate_margin_core(unit, sensitivity):
lb = int(sensitivity)
# if lb == sensitivity:
# return unit * factors[lb]
return (factors[lb + 1] + (factors[lb] - factors[lb + 1]) * (1 - sensitivity + lb)) * unit
if 0 > sensitivity or sensitivity > 100:
raise Exception('sensitivity should be integer in [0, 100]')
if unit <= 0:
raise Exception('unit should be a positive number')
if sensitivity == 100:
return 0.0
return calculate_margin_core(unit, sensitivity)
def calculate_anomaly_score(value, expected_value, unit, is_anomaly):
if not is_anomaly:
return 0.0
distance = np.abs(expected_value - value)
margins = [calculate_margin(unit, i) for i in range(101)][::-1]
lb = bisect.bisect_left(margins, distance)
if lb == 0:
return 0
elif lb >= 100:
return 1.0
else:
a, b = margins[lb-1], margins[lb]
score = lb - 1 + (distance - a) / (b - a)
return score / 100.0
def calculate_anomaly_scores(values, expected_values, units, is_anomaly):
scores = [calculate_anomaly_score(value, exp, unit, anomaly)
for value, exp, unit, anomaly in zip(values, expected_values, units, is_anomaly)]
return scores
|
anomalydetector/msanomalydetector/boundary_utils.py/0
|
{
"file_path": "anomalydetector/msanomalydetector/boundary_utils.py",
"repo_id": "anomalydetector",
"token_count": 2224
}
| 314 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
# Unwanted files and folders
confs/
devops/
docker/
docs/
research/
scripts/
tasks/
tests/
|
archai/.amltignore/0
|
{
"file_path": "archai/.amltignore",
"repo_id": "archai",
"token_count": 52
}
| 315 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from __future__ import annotations
import os
import pathlib
import re
import shutil
import tempfile
from pathlib import Path
from typing import Optional
from types import TracebackType
import torch
# File-related constants
CHECKPOINT_FOLDER_PREFIX = "checkpoint"
CHECKPOINT_REGEX = re.compile(r"^" + CHECKPOINT_FOLDER_PREFIX + r"\-(\d+)$")
def calculate_onnx_model_size(model_path: str) -> float:
"""Calculate the size of an ONNX model.
This function calculates the size of an ONNX model by reading the size of
the file on disk.
Args:
model_path: The path to the ONNX model on disk.
Returns:
The size of the model in megabytes.
"""
size = os.path.getsize(model_path) / 1e6
return size
def calculate_torch_model_size(model: torch.nn.Module) -> float:
"""Calculate the size of a PyTorch model.
This function calculates the size of a PyTorch model by saving its state
dictionary to a temporary file and reading the size of the file on disk.
Args:
model: The PyTorch model.
Returns:
The size of the model in megabytes.
"""
torch.save(model.state_dict(), "temp.p")
size = os.path.getsize("temp.p") / 1e6
os.remove("temp.p")
return size
def check_available_checkpoint(folder_name: str) -> bool:
"""Check if there are any available checkpoints in a given folder.
This function checks if a given folder contains any checkpoints by looking
for directories that match a regular expression for checkpoint names.
Args:
folder_name: The path to the folder that might contain checkpoints.
Returns:
`True` if there are available checkpoints, `False` otherwise.
"""
if not os.path.exists(folder_name):
return False
folder_content = os.listdir(folder_name)
checkpoints = [
path
for path in folder_content
if CHECKPOINT_REGEX.search(path) is not None and os.path.isdir(os.path.join(folder_name, path))
]
if len(checkpoints) == 0:
return False
return True
def create_file_name_identifier(file_name: str, identifier: str) -> str:
"""Create a new file name by adding an identifier to the end
of an existing file name (before the file extension).
Args:
file_name: The original file name.
identifier: The identifier to be added to the file name.
Returns:
The new file name with the added identifier.
"""
file_name = Path(file_name)
file_name_identifier = file_name.parent.joinpath(file_name.stem + identifier).with_suffix(file_name.suffix)
return file_name_identifier.as_posix()
def create_empty_file(file_path: str) -> None:
"""Create an empty file at the given path.
Args:
file_path: The path to the file to be created.
"""
open(file_path, "w").close()
def create_file_with_string(file_path: str, content: str) -> None:
"""Create a file at the given path and writes the given string to it.
Args:
file_path: The path to the file to be created.
content: The string to be written to the file.
"""
pathlib.Path(file_path).write_text(content)
def copy_file(
src_file_path: str, dest_file_path: str, force_shutil: Optional[bool] = True, keep_metadata: Optional[bool] = False
) -> str:
"""Copy a file from one location to another.
Args:
src_file_path: The path to the source file.
dest_file_path: The path to the destination file.
force_shutil: Whether to use `shutil` to copy the file.
keep_metadata: Whether to keep source file metadata when copying.
Returns:
The path to the destination file.
"""
def _copy_file_basic_mode(src_file_path: str, dest_file_path: str) -> str:
if os.path.isdir(dest_file_path):
dest_file_path = os.path.join(dest_file_path, pathlib.Path(src_file_path).name)
with open(src_file_path, "rb") as src, open(dest_file_path, "wb") as dest:
dest.write(src.read())
return dest_file_path
if not force_shutil:
return _copy_file_basic_mode(src_file_path, dest_file_path)
# Note shutil.copy2 might fail on Azure if file system does not support OS level copystats
# Use keep_metadata=True only if needed for maximum compatibility
try:
copy_fn = shutil.copy2 if keep_metadata else shutil.copy
return copy_fn(src_file_path, dest_file_path)
except OSError as e:
if keep_metadata or e.errno != 38: # OSError 38: Function not implemented
raise
return _copy_file_basic_mode(src_file_path, dest_file_path)
def get_full_path(path: str, create_folder: Optional[bool] = False) -> str:
"""Get the full path to a file or folder.
Args:
path: The path to the file or folder.
create_folder: Whether to create the folder if it does not exist.
Returns:
The full path to the file or folder.
"""
assert path
path = os.path.abspath(os.path.expanduser(os.path.expandvars(path)))
if create_folder:
os.makedirs(path, exist_ok=True)
return path
class TemporaryFiles:
""" Windows has a weird quirk where the tempfile.NamedTemporaryFile cannot be opened a second time. """
def __init__(self):
self.files_to_delete = []
def __enter__(self):
return self
def __exit__(self, exc_type: type[BaseException], exc_val: BaseException, exc_tb: TracebackType) -> None:
for name in self.files_to_delete:
os.unlink(name)
self.files_to_delete = []
def get_temp_file(self) -> str:
result = None
with tempfile.NamedTemporaryFile(delete=False) as tmp:
result = tmp.name
self.files_to_delete += [result]
return result
|
archai/archai/common/file_utils.py/0
|
{
"file_path": "archai/archai/common/file_utils.py",
"repo_id": "archai",
"token_count": 2198
}
| 316 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from typing import Callable, List, Optional, Union
from overrides import overrides
from torch.utils.data import Dataset
from torchvision.datasets import Caltech101, Caltech256
from torchvision.transforms import ToTensor
from archai.api.dataset_provider import DatasetProvider
from archai.common.ordered_dict_logger import OrderedDictLogger
logger = OrderedDictLogger(source=__name__)
class CaltechDatasetProvider(DatasetProvider):
"""Caltech-based dataset provider."""
SUPPORTED_DATASETS = {
"caltech101": Caltech101,
"caltech256": Caltech256,
}
def __init__(
self,
dataset: Optional[str] = "caltech101",
root: Optional[str] = "dataroot",
) -> None:
"""Initialize Caltech-based dataset provider.
Args:
dataset: Name of dataset.
root: Root directory of dataset where is saved.
"""
super().__init__()
assert dataset in self.SUPPORTED_DATASETS, f"`dataset` should be one of: {list(self.SUPPORTED_DATASETS)}"
self.dataset = dataset
self.root = root
@overrides
def get_train_dataset(
self,
target_type: Optional[Union[str, List[str]]] = "category",
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
) -> Dataset:
kwargs = {"target_type": target_type} if self.dataset == "caltech101" else {}
return self.SUPPORTED_DATASETS[self.dataset](
self.root, download=True, transform=transform or ToTensor(), target_transform=target_transform, **kwargs
)
@overrides
def get_val_dataset(
self,
target_type: Optional[Union[str, List[str]]] = "category",
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
) -> Dataset:
logger.warn(f"Validation set not available for `{self.dataset}`. Returning training set ...")
return self.get_train_dataset(target_type=target_type, transform=transform, target_transform=target_transform)
@overrides
def get_test_dataset(
self,
target_type: Optional[Union[str, List[str]]] = "category",
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
) -> Dataset:
logger.warn(f"Testing set not available for `{self.dataset}`. Returning training set ...")
return self.get_train_dataset(target_type=target_type, transform=transform, target_transform=target_transform)
|
archai/archai/datasets/cv/caltech_dataset_provider.py/0
|
{
"file_path": "archai/archai/datasets/cv/caltech_dataset_provider.py",
"repo_id": "archai",
"token_count": 1021
}
| 317 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
# https://github.com/quark0/darts/blob/master/cnn/utils.py
import numpy as np
import torch
class CustomCutout:
"""Custom-based cutout transform."""
def __init__(self, length: int) -> None:
"""Initialize the custom-based cutout transform.
Args:
length: Length of the cutout.
"""
self.length = length
def __call__(self, img: torch.Tensor) -> torch.Tensor:
h, w = img.size(1), img.size(2)
mask = np.ones((h, w), np.float32)
y = np.random.randint(h)
x = np.random.randint(w)
y1 = np.clip(y - self.length // 2, 0, h)
y2 = np.clip(y + self.length // 2, 0, h)
x1 = np.clip(x - self.length // 2, 0, w)
x2 = np.clip(x + self.length // 2, 0, w)
mask[y1:y2, x1:x2] = 0.0
mask = torch.from_numpy(mask)
mask = mask.expand_as(img)
img *= mask
return img
|
archai/archai/datasets/cv/transforms/custom_cutout.py/0
|
{
"file_path": "archai/archai/datasets/cv/transforms/custom_cutout.py",
"repo_id": "archai",
"token_count": 466
}
| 318 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import os
from collections import Counter, OrderedDict
from typing import List, Optional
from overrides import overrides
from archai.common.distributed_utils import sync_workers
from archai.common.file_utils import get_full_path
from archai.common.ordered_dict_logger import OrderedDictLogger
from archai.datasets.nlp.tokenizer_utils.token_config import (
SpecialTokenEnum,
TokenConfig,
)
from archai.datasets.nlp.tokenizer_utils.tokenizer_base import TokenizerBase
logger = OrderedDictLogger(source=__name__)
class WordTokenizer(TokenizerBase):
"""Word-based tokenizer."""
def __init__(
self,
save_path: str,
vocab_size: Optional[int] = None,
bos_token: Optional[str] = None,
eos_token: Optional[str] = "<eos>",
unk_token: Optional[str] = "<unk>",
min_frequency: Optional[int] = 0,
lower_case: Optional[int] = False,
delimiter: Optional[str] = None,
encode_special_tokens: Optional[bool] = True,
decode_special_tokens: Optional[bool] = True,
):
"""Define the tokenization pipeline.
Args:
save_path: Path to save the tokenizer.
vocab_size: Maximum size of vocabulary.
bos_token: Begin-of-sentence token.
eos_token: End-of-sentence token.
unk_token: Unknown token.
min_frequency: Minimum frequency of tokens.
model_max_length: Maximum length of sequence.
lower_case: Whether lower case should be applied.
delimiter: Delimiter between tokens.
encode_special_tokens: Whether special tokens should be encoded.
decode_special_tokens: Whether special tokens should be decoded.
"""
self.counter = Counter()
# No prefix space or line needed as we delimit on white space unlike in bbpe
self._config = TokenConfig(
bos_token=bos_token,
eos_token=eos_token,
unk_token=unk_token,
pad_token=None,
add_prefix_space=False,
add_prefix_new_line=False,
lower_case=lower_case,
)
assert self._config.unk_token, "`unk_token` must be supplied."
self._bos = [self._config.bos_token] if self._config.bos_token else []
self._eos = [self._config.eos_token] if self._config.eos_token else []
self.save_path = save_path
self.vocab_size = vocab_size
self.min_frequency = min_frequency
self.delimiter = delimiter
self.encode_special_tokens = encode_special_tokens
self.decode_special_tokens = decode_special_tokens
@overrides
def __len__(self) -> int:
return len(self.idx2sym)
@overrides
def train(self, filepaths: List[str]) -> None:
logger.info(
f"Training tokenizer with min_frequency = {self.min_frequency} and vocab_size = {self.vocab_size}, using {len(filepaths)} training file(s) at {self.save_path} ..."
)
assert len(filepaths)
self._clear()
for filepath in filepaths:
self._add_file(filepath)
# Adds specials tokens regardless of vocab_size
for sym in self._config.get_special_tokens():
self._add_special(sym)
remaining_len = self.vocab_size - len(self) if self.vocab_size is not None else None
for sym, cnt in self.counter.most_common(remaining_len):
if cnt < self.min_frequency:
break
self._add_symbol(sym)
with sync_workers() as rank:
if rank == 0:
self._save()
logger.info(f"Vocabulary size = {len(self)} | Unique tokens = {len(self.counter)}")
@overrides
def is_trained(self) -> bool:
vocab_filepath = self._vocab_filepath()
return os.path.exists(vocab_filepath)
@overrides
def load(self) -> None:
vocab_filepath = self._vocab_filepath()
self._clear()
with open(vocab_filepath, "r", encoding="utf-8") as f:
for line in f:
symb = line.strip().split()[0]
self._add_symbol(symb)
self.unk_idx = self.sym2idx[self._config.unk_token]
@overrides
def encode_text(self, text: str) -> List[int]:
symbols = self._tokenize_text(text)
if self.encode_special_tokens:
symbols = self._bos + symbols + self._eos
toks = self.tokens_to_ids(symbols)
return toks
@overrides
def decode_text(self, ids: List[int]) -> str:
syms = self.ids_to_tokens(ids)
if self.decode_special_tokens and len(syms):
if syms[0] == self._bos:
syms = syms[1:]
if len(syms) and syms[-1] == self._eos:
syms = syms[:-1]
return " ".join(syms)
@overrides
def special_token_id(self, sp: SpecialTokenEnum) -> int:
return self.token_to_id(self._config.special_token_name(sp))
@overrides
def token_to_id(self, t: str) -> int:
return self._get_idx(t)
@overrides
def id_to_token(self, id: int) -> str:
return self._get_sym(id)
@overrides
def tokens_to_ids(self, ts: List[str]) -> List[int]:
return [self._get_idx(t) for t in ts]
@overrides
def ids_to_tokens(self, ids: List[int]) -> List[str]:
return [self._get_sym(id) for id in ids]
def _preprocess_text(self, text: str) -> str:
if self._config.add_prefix_space:
text = " " + text
if self._config.add_prefix_new_line:
text = "\n" + text
if self._config.lower_case:
text = text.lower()
return text
def _add_file(self, path: str, verbose: Optional[bool] = True) -> None:
if verbose:
logger.debug(f"Counting file: {path}")
assert os.path.exists(path), f"File does not exist: {path}"
with open(path, "r", encoding="utf-8") as f:
for idx, line in enumerate(f):
if verbose and idx > 0 and idx % 500000 == 0:
logger.debug(f"Completed line: {idx}")
symbols = self._tokenize_text(line)
self.counter.update(symbols)
def _tokenize_text(self, text: str) -> List[str]:
text = self._preprocess_text(text)
symbols = text.split(self.delimiter)
return symbols
def _clear(self) -> None:
self.idx2sym = []
self.sym2idx = OrderedDict()
def _vocab_filepath(self) -> str:
vocab_dir = get_full_path(os.path.join(self.save_path), create_folder=True)
return os.path.join(vocab_dir, "vocab.txt")
def _save(self) -> None:
vocab_filepath = self._vocab_filepath()
with open(vocab_filepath, "w", encoding="utf-8") as f:
f.write("\n".join(self.idx2sym))
def _add_special(self, sym: str) -> None:
if sym not in self.sym2idx:
self.idx2sym.append(sym)
self.sym2idx[sym] = len(self.idx2sym) - 1
setattr(self, "{}_idx".format(sym.strip("<>")), self.sym2idx[sym])
def _add_symbol(self, sym: str) -> None:
if sym not in self.sym2idx:
self.idx2sym.append(sym)
self.sym2idx[sym] = len(self.idx2sym) - 1
def _get_sym(self, idx: int) -> str:
assert 0 <= idx < len(self), f"Index {idx} out of range."
return self.idx2sym[idx]
def _get_idx(self, sym: str) -> int:
if sym in self.sym2idx:
return self.sym2idx[sym]
return self.sym2idx.get(sym, self.unk_idx)
|
archai/archai/datasets/nlp/tokenizer_utils/word_tokenizer.py/0
|
{
"file_path": "archai/archai/datasets/nlp/tokenizer_utils/word_tokenizer.py",
"repo_id": "archai",
"token_count": 3554
}
| 319 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from abc import abstractmethod
from typing import Callable
from overrides import EnforceOverrides
from archai.discrete_search.api.search_results import SearchResults
class Searcher(EnforceOverrides):
"""Abstract class for searchers.
This class serves as a base for implementing searchers, which searches for an
architecture given an algorithm. The class enforces implementation of a single
method: `search`.
Note:
This class is inherited from `EnforceOverrides` and any overridden methods in the
subclass should be decorated with `@overrides` to ensure they are properly overridden.
Examples:
>>> class MySearcher(Searcher):
>>> def __init__(self) -> None:
>>> super().__init__()
>>>
>>> @overrides
>>> def search(self) -> SearchResults:
>>> # Code used to search for the best architecture
>>> return SearchResults(...)
"""
def __init__(self) -> None:
"""Initialize the searcher."""
self.iteration_callbacks = []
@abstractmethod
def search(self) -> SearchResults:
"""Search for the best architecture.
Returns:
Search results.
"""
pass
def subscribe_start_iteration(self, fn : Callable[[int], None]):
self.iteration_callbacks += [fn]
def on_start_iteration(self, iteration: int):
for fn in self.iteration_callbacks:
fn(iteration)
|
archai/archai/discrete_search/api/searcher.py/0
|
{
"file_path": "archai/archai/discrete_search/api/searcher.py",
"repo_id": "archai",
"token_count": 581
}
| 320 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from typing import Callable, List, Optional, Union
import ray
from overrides import overrides
from archai.discrete_search.api.archai_model import ArchaiModel
from archai.discrete_search.api.model_evaluator import (
AsyncModelEvaluator,
ModelEvaluator,
)
def _wrap_metric_calculate(class_method) -> Callable:
def _calculate(arch: ArchaiModel, budget: Optional[float] = None) -> Callable:
return class_method(arch, budget)
return _calculate
class RayParallelEvaluator(AsyncModelEvaluator):
"""Wraps a `ModelEvaluator` object into an `AsyncModelEvaluator` with parallel execution using Ray.
`RayParallelEvaluator` expects a stateless objective function as input, meaning that any
`ModelEvaluator.evaluate(arch, ...)` will not alter the state of `obj` or `arch` in any way.
"""
def __init__(
self, obj: ModelEvaluator, timeout: Optional[float] = None, force_stop: Optional[bool] = False, **ray_kwargs
) -> None:
"""Initialize the evaluator.
Args:
obj: A `ModelEvaluator` object.
timeout: Timeout for receiving results from Ray. If None, then Ray will wait
indefinitely for results. If timeout is reached, then incomplete tasks
are canceled and returned as None.
force_stop: If incomplete tasks (within `timeout` seconds) should be force-killed.
If set to `False`, Ray will just send a `KeyboardInterrupt` signal to the process.
**ray_kwargs: Key-value arguments for ray.remote(), e.g: num_gpus, num_cpus, max_task_retries.
"""
assert isinstance(obj, ModelEvaluator)
# Wraps metric.calculate as a standalone function. This only works with stateless metrics
if ray_kwargs:
self.compute_fn = ray.remote(**ray_kwargs)(_wrap_metric_calculate(obj.evaluate))
else:
self.compute_fn = ray.remote(_wrap_metric_calculate(obj.evaluate))
self.timeout = timeout
self.force_stop = force_stop
self.object_refs = []
@overrides
def send(self, arch: ArchaiModel, budget: Optional[float] = None) -> None:
self.object_refs.append(self.compute_fn.remote(arch, budget))
@overrides
def fetch_all(self) -> List[Union[float, None]]:
results = [None] * len(self.object_refs)
if not self.timeout:
results = ray.get(self.object_refs, timeout=self.timeout)
else:
# Maps each object from the object_refs list to its index
ref2idx = {ref: i for i, ref in enumerate(self.object_refs)}
# Gets all results available within `self.timeout` seconds.
complete_objs, incomplete_objs = ray.wait(
self.object_refs, timeout=self.timeout, num_returns=len(self.object_refs)
)
partial_results = ray.get(complete_objs)
# Update results with the partial results fetched
for ref, result in zip(complete_objs, partial_results):
results[ref2idx[ref]] = result
# Cancels incomplete jobs
for incomplete_obj in incomplete_objs:
ray.cancel(incomplete_obj, force=self.force_stop)
# Resets metric state
self.object_refs = []
return results
|
archai/archai/discrete_search/evaluators/ray.py/0
|
{
"file_path": "archai/archai/discrete_search/evaluators/ray.py",
"repo_id": "archai",
"token_count": 1365
}
| 321 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from __future__ import annotations
import json
import math
from collections import OrderedDict
from copy import deepcopy
from hashlib import sha1
from pathlib import Path
from typing import Any, Dict, List, MutableMapping, Optional, Tuple, Union
import torch
import yaml
from torch import nn
from archai.discrete_search.search_spaces.cv.segmentation_dag.ops import OPS, Block
class SegmentationDagModel(torch.nn.Module):
"""Model defined by a directed acyclic graph (DAG) of operations."""
def __init__(
self,
graph: List[Dict[str, Any]],
channels_per_scale: Dict[str, Any],
post_upsample_layers: Optional[int] = 1,
stem_stride: Optional[int] = 2,
img_size: Optional[Tuple[int, int]] = (256, 256),
nb_classes: Optional[int] = 19,
) -> None:
"""Initialize the model.
Args:
graph: List of dictionaries with the following keys:
* name: Name of the node.
* op: Name of the operation used to process the node.
* inputs: List of input nodes.
* scale: Scale of the node (higher means smaller resolutions).
channels_per_scale: Dictionary with the number of channels that should be used
for each scale value, e.g: {1: 32, 2: 64, 4: 128} or a dictionary containing
`base_channels`, `delta_channels` and optionally a `mult_delta` flag.
For instance, {'base_channels': 24, 'delta_channels': 2}, is equivalent to
{1: 24, 2: 26, 4: 28, 8: 30, 16: 32}, and {'base_channels': 24, 'delta_channels': 2,
mult_delta: True} is equivalent to {1: 24, 2: 48, 4: 96, 8: 192, 16: 384}.
post_upsample_layers: Number of post-upsample layers.
stem_strid: Stride of the first convolution.
img_size: Image size (width, height).
nb_classes: Number of classes for segmentation.
"""
super().__init__()
assert img_size[0] % 32 == 0 and img_size[1] % 32 == 0, "Image size must be a multiple of 32"
self.graph = OrderedDict([(n["name"], n) for n in graph])
self.node_names = [n["name"] for n in self.graph.values()]
self.channels_per_scale = self._get_channels_per_scale(channels_per_scale)
self.edge_dict = nn.ModuleDict(self._get_edge_list(self.graph, self.channels_per_scale))
self.stem_stride = stem_stride
self.img_size = img_size
self.nb_classes = nb_classes
self.post_upsample_layers = post_upsample_layers
# Checks if the edges are in topological order
self._validate_edges(self.edge_dict)
# Stem block
stem_ch = self.channels_per_scale[self.graph["input"]["scale"]]
self.stem_block = OPS["conv3x3"](3, stem_ch, stride=self.stem_stride)
# Upsample layers
w, h = self.img_size
self.up = nn.Upsample(size=(h, w), mode="nearest")
output_ch = self.channels_per_scale[self.graph["output"]["scale"]]
self.post_upsample = nn.Sequential(
*[
OPS["conv3x3"](
output_ch if i == 0 else self.channels_per_scale[1], self.channels_per_scale[1], stride=1
)
for i in range(self.post_upsample_layers)
]
)
# Classifier
self.classifier = nn.Conv2d(stem_ch, self.nb_classes, kernel_size=1)
@classmethod
def _get_channels_per_scale(
cls,
ch_per_scale: Dict[str, Any],
max_downsample_factor: Optional[int] = 16,
remove_spec: Optional[bool] = False,
) -> Dict[str, Any]:
ch_per_scale = deepcopy(ch_per_scale)
scales = [1, 2, 4, 8, 16]
scales = [s for s in scales if s <= max_downsample_factor]
# Builds `ch_per_scale` using `base_channels` and `delta_channels`
ch_per_scale["mult_delta"] = ch_per_scale.get("mult_delta", False)
assert "base_channels" in ch_per_scale
assert "delta_channels" in ch_per_scale
assert len(ch_per_scale.keys()) == 3, "Must specify only `base_channels`, `delta_channels` and `mult_delta`"
if ch_per_scale["mult_delta"]:
ch_per_scale.update(
{
scale: ch_per_scale["base_channels"] * ch_per_scale["delta_channels"] ** i
for i, scale in enumerate(scales)
}
)
else:
ch_per_scale.update(
{
scale: ch_per_scale["base_channels"] + ch_per_scale["delta_channels"] * i
for i, scale in enumerate(scales)
}
)
if remove_spec:
ch_per_scale.pop("base_channels", None)
ch_per_scale.pop("delta_channels", None)
ch_per_scale.pop("mult_delta", None)
return ch_per_scale
def _get_edge_list(
self, graph: OrderedDict, channels_per_scale: Dict[str, Any]
) -> MutableMapping[Tuple[str, str], nn.Module]:
assert "input" in graph
assert "output" in graph
edges = [
(in_node, node["name"]) for node in graph.values() if node["name"] != "input" for in_node in node["inputs"]
]
# Returns an `OrderedDict` with the mapping "in_node-out_node": nn.Module
return OrderedDict(
[
(
f"{i}-{o}",
Block(
in_ch=channels_per_scale[graph[i]["scale"]],
out_ch=channels_per_scale[graph[o]["scale"]],
in_scale=graph[i]["scale"],
out_scale=graph[o]["scale"],
op_name=graph[i]["op"],
),
)
for i, o in edges
]
)
def _validate_edges(self, edge_dict: MutableMapping[Tuple[str, str], nn.Module]) -> None:
visited_nodes = {"input"}
for edge in edge_dict.keys():
in_node, out_node = edge.split("-")
visited_nodes.add(out_node)
assert (
in_node in visited_nodes
), "SegmentationModel received a list of nodes that is not in topological order"
def forward(self, x: torch.Tensor) -> torch.Tensor:
inputs = {node_name: 0 for node_name in self.node_names}
inputs["input"] = self.stem_block(x)
for edge, module in self.edge_dict.items():
in_node, out_node = edge.split("-")
inputs[out_node] = inputs[out_node] + module(inputs[in_node])
output = self.post_upsample(self.up(inputs["output"]))
return self.classifier(output)
def validate_forward(self, x: torch.Tensor) -> torch.Tensor:
"""Checks if the constructed model is working as expected.
Args:
x: Input tensor.
Returns:
Output tensor.
"""
in_nodes = set()
res_w, res_h = [d // self.stem_stride for d in self.img_size]
inputs = {node_name: 0 for node_name in self.node_names}
inputs["input"] = self.stem_block(x)
for edge, module in self.edge_dict.items():
in_node, out_node = edge.split("-")
in_nodes.add(in_node)
# Checks if the resolution of each node is correct
assert inputs[in_node].shape[2] == int(
res_h // self.graph[in_node]["scale"]
), "Input resolution does not match the node resolution."
assert inputs[in_node].shape[3] == int(
res_w // self.graph[in_node]["scale"]
), "Input resolution does not match the node resolution."
inputs[out_node] = inputs[out_node] + module(inputs[in_node])
assert (
inputs[out_node].shape[1] == self.channels_per_scale[self.graph[out_node]["scale"]]
), "Output channel does not match the node channel scale."
assert all(
node in in_nodes for node in set(self.graph.keys()) - {"output"}
), f'Unused nodes were detected: {set(self.graph.keys()) - in_nodes - set(["output"])}.'
output = self.post_upsample(self.up(inputs["output"]))
return self.classifier(output)
@classmethod
def from_file(
cls, config_file: Union[str, Path], img_size: Optional[Tuple[int, int]] = 256, nb_classes: Optional[int] = 19
) -> SegmentationDagModel:
"""Creates a SegmentationArchaiModel from a YAML config file.
Args:
config_file: Path to the YAML config file, following the format:
>>> post_upsample_layers: 2
>>> channels_per_scale:
>>> 1: 32
>>> 2: 64
>>> architecture:
>>> - name: input
>>> scale: 1
>>> op: conv3x3
>>> inputs: null
>>> - name: node0
>>> scale: 2
>>> op: conv5x5
>>> inputs: [input]
>>> - name: output
>>> scale: 4
>>> op: conv3x3
>>> inputs: [node0, node1]
img_size: The size of the input image.
nb_classes: The number of classes in the dataset.
Returns:
A `SegmentationArchaiModel` instance.
"""
config_file = Path(config_file)
assert config_file.is_file()
config_dict = yaml.safe_load(open(config_file))
return cls(
config_dict["architecture"],
config_dict["channels_per_scale"],
config_dict["post_upsample_layers"],
img_size=img_size,
nb_classes=nb_classes,
)
def view(self) -> Any:
"""Visualizes the architecture using graphviz.
Returns:
A graphviz object.
"""
import graphviz
scales = []
dot = graphviz.Digraph("architecture", graph_attr={"splines": "true", "overlap": "true"})
dot.engine = "neato"
for i, node in enumerate(self.node_names):
scales.append(self.graph[node]["scale"])
dot.node(node, label=self.graph[node]["op"], pos=f"{i*1.5 + 2},-{math.log2(2*scales[-1])}!")
for scale in sorted(list(set(scales))):
dot.node(
f"scale-{scale}",
label=f"scale={2*scale}, ch={self.channels_per_scale[scale]}",
pos=f"-1,-{math.log2(2*scale)}!",
)
for edge in self.edge_dict:
in_node, out_node = edge.split("-")
dot.edge(in_node, out_node)
# Adds post upsample
dot.node("upsample", label=f"Upsample + {self.post_upsample_layers} x Conv 3x3", pos=f"{i*1.5 + 2},0!")
dot.edge("output", "upsample")
# Shows the graph
return dot
def to_config(self) -> Dict[str, Any]:
"""Converts the model to a configuration dictionary.
Returns:
A configuration dictionary.
"""
ch_map = self.channels_per_scale
if "base_channels" in ch_map:
ch_map = {"base_channels": ch_map["base_channels"], "delta_channels": ch_map["delta_channels"]}
# We only put the `mult_delta` flag in config dict if it's active
if self.channels_per_scale["mult_delta"]:
ch_map["mult_delta"] = True
return {
"post_upsample_layers": int(self.post_upsample_layers),
"channels_per_scale": ch_map,
"architecture": list(self.graph.values()),
}
def to_file(self, path: str) -> None:
"""Saves the model to a YAML config file.
Args:
path: Path to the YAML config file.
"""
content = self.to_config()
with open(path, "w") as fp:
fp.write(yaml.dump(content))
m = SegmentationDagModel.from_file(path, self.img_size, self.nb_classes)
assert content["architecture"] == list(m.graph.values())
assert content["post_upsample_layers"] == len(self.post_upsample)
assert all(m.channels_per_scale[k] == v for k, v in content["channels_per_scale"].items())
def to_hash(self) -> str:
"""Generates a hash for the model.
Returns:
A hash string.
"""
config = self.to_config()
arch_str = json.dumps(config, sort_keys=True, ensure_ascii=True)
return sha1(arch_str.encode("ascii")).hexdigest() + f"_{self.img_size[0]}_{self.img_size[1]}"
|
archai/archai/discrete_search/search_spaces/cv/segmentation_dag/model.py/0
|
{
"file_path": "archai/archai/discrete_search/search_spaces/cv/segmentation_dag/model.py",
"repo_id": "archai",
"token_count": 6196
}
| 322 |
from collections import namedtuple
from .mha import MHA
from .causal_self_attn import CausalSelfAttention
from .sep_conv1d import SeparableConv1d
from .sgconv import SGConv
from .sgconv3 import SGConv3
from .local_attention import LocalMHA
from .lsh_attn import LSHAttention
OP = namedtuple(
'Operation', ['cls', 'requires_extra_config', 'deprecated'],
defaults=[None, None, False]
)
OPS = {
'causal_self_attn': OP(CausalSelfAttention, False, deprecated=True), # For retro-compatibility
'flash_mha': OP(MHA, False, deprecated=True), # For retro-compatibility
'mha': OP(MHA, False),
'sep_conv1d': OP(SeparableConv1d, True),
'sgconv': OP(SGConv, True),
'sgconv3': OP(SGConv3, True),
'local_attn': OP(LocalMHA, True),
'lsh_attn': OP(LSHAttention, True)
}
|
archai/archai/discrete_search/search_spaces/nlp/tfpp/ops/__init__.py/0
|
{
"file_path": "archai/archai/discrete_search/search_spaces/nlp/tfpp/ops/__init__.py",
"repo_id": "archai",
"token_count": 316
}
| 323 |
# TD: [2023-01-05]: Extracted the SSKernelDiag class from
# https://github.com/HazyResearch/state-spaces/blob/06dbbdfd0876501a7f12bf3262121badbc7658af/src/models/sequence/ss/kernel.py
# We make a small change to use the log_vandermonde CUDA code.
"""SSKernelDiag is the S4D kernel, a simpler algorithm for computing the kernel for the case of diagonal state matrices A.
"""
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from einops import rearrange, repeat
from opt_einsum import contract
from .utils import OptimModule
class SSKernelShift(OptimModule):
def __init__(self, B, C, L=None, lr=None, **kwargs):
"""
B: (H, d), real
C: (channel, H, d), real
"""
super().__init__()
self.L = L
self.N = B.size(-1)
self.H = B.shape[0]
# Register parameters
if lr is None or isinstance(lr, float): lr_dict = {}
else: lr_dict, lr = lr, None
self.register("B", B, lr_dict.get('B', lr))
self.C = nn.Parameter(C)
def forward(self, state=None, rate=1.0, L=None):
if L is None:
L = self.L
# This class doesn't support variable length functionalities, since it's a discrete SSM
assert rate == 1.0 and L is not None
# Augment B with state
B = self.B
if state is not None:
B = rearrange(torch.cat([rearrange(B, 'h n -> 1 h n'), state], dim=-3),
'bp1 h n -> bp1 1 h n') # (1 + B, 1, H, N)
B_f = torch.fft.rfft(B, n=2*self.N)
C_f = torch.fft.rfft(self.C, n=2*self.N)
k = torch.fft.irfft(B_f.conj() * C_f, n=2*self.N)[..., :min(self.N, L)]
# If self.N < L, need to pad with zeros to reach length L
if self.N < L:
k = F.pad(k, (0, L - self.N))
k = k.float() # Otherwise it could be dtype half
if state is not None:
k, k_state = k[0], k[1:]
else:
k_state = None
return k, k_state
def _setup_step(self):
# Just here to conform to the interface, eventually we should refactor out
pass
def default_state(self, *batch_shape):
return torch.zeros(*batch_shape, self.H, self.N, dtype=self.C.dtype, device=self.C.device)
def step(self, u, state):
"""u: (B, H), state: (B, H, N)"""
next_state = F.pad(state, (1, -1)) + contract("h n, b h -> b h n", self.B, u)
y = contract("c h n, b h n -> b c h", self.C, next_state)
return y, next_state
def forward_state(self, u, state):
"""u: (B, H, L), state: (B, H, N)"""
L = u.shape[-1]
B_f = torch.fft.rfft(self.B, n=2 * self.N)
u_f = torch.fft.rfft(u[..., -self.N:].flip(-1).to(dtype=self.B.dtype), n=2 * self.N)
v = torch.fft.irfft(B_f * u_f, n=2 * self.N)[..., :self.N]
if L < self.N:
next_state = F.pad(state, (L, -L)) + v
else:
next_state = v
return next_state
|
archai/archai/discrete_search/search_spaces/nlp/tfpp/ops/ssm_utils/ss_kernel_shift.py/0
|
{
"file_path": "archai/archai/discrete_search/search_spaces/nlp/tfpp/ops/ssm_utils/ss_kernel_shift.py",
"repo_id": "archai",
"token_count": 1470
}
| 324 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
#
# Copyright (c) 2018, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0.
from typing import Optional
import torch
import torch.nn as nn
class PositionalEmbedding(nn.Module):
def __init__(self, d_model: int) -> None:
super().__init__()
self.d_model = d_model
inv_freq = 1 / (1e4 ** (torch.arange(0, d_model, 2) / d_model))
self.register_buffer("inv_freq", inv_freq)
def forward(self, inputs: torch.FloatTensor, batch_size: Optional[int] = None) -> torch.FloatTensor:
# Generates a positional embedding through sinusoids
inputs_sinusoid = torch.ger(inputs, self.inv_freq)
embed_pos = torch.cat([inputs_sinusoid.sin(), inputs_sinusoid.cos()], dim=-1)
# If a batch size is supplied, expand the tensor to comply with it
if batch_size is not None:
return embed_pos[:, None, :].expand(-1, batch_size, -1)
else:
return embed_pos[:, None, :]
|
archai/archai/discrete_search/search_spaces/nlp/transformer_flex/models/mem_transformer_utils/positional_embedding.py/0
|
{
"file_path": "archai/archai/discrete_search/search_spaces/nlp/transformer_flex/models/mem_transformer_utils/positional_embedding.py",
"repo_id": "archai",
"token_count": 417
}
| 325 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from os import environ
from typing import List, Optional
from onnxruntime import GraphOptimizationLevel, InferenceSession, SessionOptions
from archai.common.ordered_dict_logger import OrderedDictLogger
logger = OrderedDictLogger(source=__name__)
def load_from_onnx(onnx_model_path: str, providers: Optional[List[str]] = None) -> InferenceSession:
"""Load an ONNX-based model from file.
This function loads an ONNX-based model from the specified file path and
returns an ONNX inference session. Performance optimization constants are set as well.
Args:
onnx_model_path: Path to the ONNX model file.
providers: List of providers to use for inference.
Returns:
ONNX inference session.
"""
logger.info(f"Loading model: {onnx_model_path}")
# Constants available in ONNXRuntime that enables performance optimization
OMP_NUM_THREADS = 1
environ["OMP_NUM_THREADS"] = str(OMP_NUM_THREADS)
environ["OMP_WAIT_POLICY"] = "ACTIVE"
options = SessionOptions()
options.intra_op_num_threads = OMP_NUM_THREADS
options.graph_optimization_level = GraphOptimizationLevel.ORT_ENABLE_ALL
providers = providers or ["CPUExecutionProvider"]
session = InferenceSession(onnx_model_path, sess_options=options, providers=providers)
session.disable_fallback()
return session
|
archai/archai/onnx/onnx_loader.py/0
|
{
"file_path": "archai/archai/onnx/onnx_loader.py",
"repo_id": "archai",
"token_count": 481
}
| 326 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import copy
from typing import List, Tuple
from overrides import overrides
from archai.common.config import Config
from archai.supergraph.algos.divnas.divop import DivOp
from archai.supergraph.nas.model_desc import (
CellType,
ConvMacroParams,
EdgeDesc,
NodeDesc,
OpDesc,
TensorShape,
TensorShapes,
)
from archai.supergraph.nas.model_desc_builder import ModelDescBuilder
from archai.supergraph.nas.operations import Op
class DivnasModelDescBuilder(ModelDescBuilder):
@overrides
def pre_build(self, conf_model_desc:Config)->None:
Op.register_op('div_op',
lambda op_desc, arch_params, affine:
DivOp(op_desc, arch_params, affine))
@overrides
def build_nodes(self, stem_shapes:TensorShapes, conf_cell:Config,
cell_index:int, cell_type:CellType, node_count:int,
in_shape:TensorShape, out_shape:TensorShape) \
->Tuple[TensorShapes, List[NodeDesc]]:
assert in_shape[0]==out_shape[0]
reduction = (cell_type==CellType.Reduction)
nodes:List[NodeDesc] = []
conv_params = ConvMacroParams(in_shape[0], out_shape[0])
# add div op for each edge in each node
# how does the stride works? For all ops connected to s0 and s1, we apply
# reduction in WxH. All ops connected elsewhere automatically gets
# reduced WxH (because all subsequent states are derived from s0 and s1).
# Note that channel is increased via conv_params for the cell
for i in range(node_count):
edges=[]
for j in range(i+2):
op_desc = OpDesc('div_op',
params={
'conv': conv_params,
'stride': 2 if reduction and j < 2 else 1
}, in_len=1, trainables=None, children=None)
edge = EdgeDesc(op_desc, input_ids=[j])
edges.append(edge)
nodes.append(NodeDesc(edges=edges, conv_params=conv_params))
out_shapes = [copy.deepcopy(out_shape) for _ in range(node_count)]
return out_shapes, nodes
|
archai/archai/supergraph/algos/divnas/divnas_model_desc_builder.py/0
|
{
"file_path": "archai/archai/supergraph/algos/divnas/divnas_model_desc_builder.py",
"repo_id": "archai",
"token_count": 1071
}
| 327 |
- support only one stem in model
- enable model init strategies
- Output node is vertex_op(sum(edges)) + projected input
- Yaml:
- every cell reduction, cell stem = max_pool, model stem
- Model desc: model matrix, vetex ops
- Do we need alphas?
- Do we need to modify finalizers?
- Ops() overriding in NasBench101Op
- Do we need to specify children in OpDesc?
|
archai/archai/supergraph/algos/nasbench101/TODO.md/0
|
{
"file_path": "archai/archai/supergraph/algos/nasbench101/TODO.md",
"repo_id": "archai",
"token_count": 108
}
| 328 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import os
import shutil
from overrides import overrides
from archai.common import utils
from archai.supergraph.algos.petridish.evaluater_petridish import EvaluaterPetridish
from archai.supergraph.algos.petridish.petridish_model_desc_builder import (
PetridishModelBuilder,
)
from archai.supergraph.algos.petridish.searcher_petridish import SearcherPetridish
from archai.supergraph.nas.arch_trainer import ArchTrainer, TArchTrainer
from archai.supergraph.nas.exp_runner import ExperimentRunner
def copy_dir(src_dir:str, dest_dir:str, use_shutil:bool=True)->None:
if os.path.isdir(src_dir):
if use_shutil:
shutil.copytree(src_dir, dest_dir)
else:
if not os.path.isdir(dest_dir):
os.makedirs(dest_dir)
files = os.listdir(src_dir)
for f in files:
copy_dir(os.path.join(src_dir, f),
os.path.join(dest_dir, f), use_shutil=use_shutil)
else:
utils.copy_file(src_dir, dest_dir, use_shutil=use_shutil)
class PetridishExperimentRunner(ExperimentRunner):
@overrides
def model_desc_builder(self)->PetridishModelBuilder:
return PetridishModelBuilder()
@overrides
def trainer_class(self)->TArchTrainer:
return ArchTrainer
@overrides
def searcher(self)->SearcherPetridish:
return SearcherPetridish()
@overrides
def evaluater(self)->EvaluaterPetridish:
return EvaluaterPetridish()
@overrides
def copy_search_to_eval(self)->None:
# get folder of model gallery that search has produced
conf_search = self.get_conf(True)['nas']['search']
search_desc_foldername = conf_search['final_desc_foldername']
search_desc_folderpath = utils.full_path(search_desc_foldername)
assert search_desc_foldername and os.path.exists(search_desc_folderpath)
# get folder path that eval would need
conf_eval = self.get_conf(False)['nas']['eval']
eval_desc_foldername = conf_eval['final_desc_foldername']
eval_desc_folderpath = utils.full_path(eval_desc_foldername)
assert eval_desc_folderpath
# only later version of shutil copytree has dirs_exists_ok option
# so being robust to pre-existing directory
if os.path.exists(eval_desc_folderpath):
shutil.rmtree(eval_desc_folderpath)
utils.copy_dir(search_desc_folderpath, eval_desc_folderpath, use_shutil=True)
|
archai/archai/supergraph/algos/petridish/petridish_exp_runner.py/0
|
{
"file_path": "archai/archai/supergraph/algos/petridish/petridish_exp_runner.py",
"repo_id": "archai",
"token_count": 1059
}
| 329 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from typing import Optional, Tuple
from torch.utils.data import DataLoader, Dataset, Sampler
from archai.common import apex_utils, utils
from archai.common.config import Config
from archai.common.ordered_dict_logger import get_global_logger
from archai.supergraph.datasets.distributed_stratified_sampler import DistributedStratifiedSampler
from archai.supergraph.datasets.augmentation import add_named_augs
from archai.supergraph.datasets.dataset_provider import (
DatasetProvider,
get_provider_type,
)
from archai.supergraph.datasets.limit_dataset import DatasetLike
logger = get_global_logger()
class DataLoaders:
def __init__(self, train_dl:Optional[DataLoader]=None,
val_dl:Optional[DataLoader]=None,
test_dl:Optional[DataLoader]=None) -> None:
self.train_dl = train_dl
self.val_dl = val_dl
self.test_dl = test_dl
def get_data(conf_loader:Config)->DataLoaders:
logger.pushd('data')
# region conf vars
# dataset
conf_dataset = conf_loader['dataset']
max_batches = conf_dataset['max_batches']
aug = conf_loader['aug']
cutout = conf_loader['cutout']
val_ratio = conf_loader['val_ratio']
val_fold = conf_loader['val_fold']
img_size = conf_loader.get('img_size', None)
load_train = conf_loader['load_train']
train_batch = conf_loader['train_batch']
train_workers = conf_loader['train_workers']
load_test = conf_loader['load_test']
test_batch = conf_loader['test_batch']
test_workers = conf_loader['test_workers']
conf_apex = conf_loader['apex']
# endregion
ds_provider = create_dataset_provider(conf_dataset)
apex = apex_utils.ApexUtils(conf_apex)
train_dl, val_dl, test_dl = get_dataloaders(ds_provider,
load_train=load_train, train_batch_size=train_batch,
load_test=load_test, test_batch_size=test_batch,
aug=aug, cutout=cutout, val_ratio=val_ratio, val_fold=val_fold,
img_size=img_size, train_workers=train_workers,
test_workers=test_workers, max_batches=max_batches, apex=apex)
assert train_dl is not None
logger.popd()
return DataLoaders(train_dl=train_dl, val_dl=val_dl, test_dl=test_dl)
def create_dataset_provider(conf_dataset:Config)->DatasetProvider:
ds_name = conf_dataset['name']
dataroot = utils.full_path(conf_dataset['dataroot'])
storage_name = conf_dataset['storage_name']
logger.info({'ds_name': ds_name, 'dataroot':dataroot, 'storage_name':storage_name})
ds_provider_type = get_provider_type(ds_name)
return ds_provider_type(conf_dataset)
def get_dataloaders(ds_provider:DatasetProvider,
load_train:bool, train_batch_size:int,
load_test:bool, test_batch_size:int,
aug, cutout:int, val_ratio:float, apex:apex_utils.ApexUtils,
val_fold=0, img_size:Optional[int]=None, train_workers:Optional[int]=None,
test_workers:Optional[int]=None, target_lb=-1, max_batches:int=-1) \
-> Tuple[Optional[DataLoader], Optional[DataLoader], Optional[DataLoader]]:
# if debugging in vscode, workers > 0 gets termination
default_workers = 4
if utils.is_debugging():
train_workers = test_workers = 0
logger.warn({'debugger': True})
if train_workers is None:
train_workers = default_workers # following NVidia DeepLearningExamples
if test_workers is None:
test_workers = default_workers
train_workers = round((1-val_ratio)*train_workers)
val_workers = round(val_ratio*train_workers)
logger.info({'train_workers': train_workers, 'val_workers': val_workers,
'test_workers':test_workers})
transform_train, transform_test = ds_provider.get_transforms(img_size)
add_named_augs(transform_train, aug, cutout)
trainset, testset = _get_datasets(ds_provider,
load_train, load_test, transform_train, transform_test)
# TODO: below will never get executed, set_preaug does not exist in PyTorch
# if total_aug is not None and augs is not None:
# trainset.set_preaug(augs, total_aug)
# logger.info('set_preaug-')
trainloader, validloader, testloader, train_sampler = None, None, None, None
if trainset:
max_train_fold = min(len(trainset), max_batches*train_batch_size) if max_batches else None # pyright: ignore[reportGeneralTypeIssues]
logger.info({'val_ratio': val_ratio, 'max_train_batches': max_batches,
'max_train_fold': max_train_fold})
# sample validation set from trainset if cv_ratio > 0
train_sampler, valid_sampler = _get_sampler(trainset, val_ratio=val_ratio,
shuffle=True, apex=apex,
max_items=max_train_fold)
logger.info({'train_sampler_world_size':train_sampler.world_size,
'train_sampler_rank':train_sampler.rank,
'train_sampler_len': len(train_sampler)})
if valid_sampler:
logger.info({'valid_sampler_world_size':valid_sampler.world_size,
'valid_sampler_rank':valid_sampler.rank,
'valid_sampler_len': len(valid_sampler)
})
# shuffle is performed by sampler at each epoch
trainloader = DataLoader(trainset,
batch_size=train_batch_size, shuffle=False,
num_workers=train_workers,
pin_memory=True,
sampler=train_sampler, drop_last=False) # TODO: original paper has this True
if val_ratio > 0.0:
validloader = DataLoader(trainset,
batch_size=train_batch_size, shuffle=False,
num_workers=val_workers,
pin_memory=True,
sampler=valid_sampler, drop_last=False)
# else validloader is left as None
if testset:
max_test_fold = min(len(testset), max_batches*test_batch_size) if max_batches else None # pyright: ignore[reportGeneralTypeIssues]
logger.info({'max_test_batches': max_batches,
'max_test_fold': max_test_fold})
test_sampler, test_val_sampler = _get_sampler(testset, val_ratio=None,
shuffle=False, apex=apex,
max_items=max_test_fold)
logger.info({'test_sampler_world_size':test_sampler.world_size,
'test_sampler_rank':test_sampler.rank,
'test_sampler_len': len(test_sampler)})
assert test_val_sampler is None
testloader = DataLoader(testset,
batch_size=test_batch_size, shuffle=False,
num_workers=test_workers,
pin_memory=True,
sampler=test_sampler, drop_last=False
)
assert val_ratio > 0.0 or validloader is None
logger.info({
'train_batch_size': train_batch_size, 'test_batch_size': test_batch_size,
'train_batches': len(trainloader) if trainloader is not None else None,
'val_batches': len(validloader) if validloader is not None else None,
'test_batches': len(testloader) if testloader is not None else None
})
return trainloader, validloader, testloader
class SubsetSampler(Sampler):
"""Samples elements from a given list of indices, without replacement.
Arguments:
indices (sequence): a sequence of indices
"""
def __init__(self, indices):
self.indices = indices
def __iter__(self):
return (i for i in self.indices)
def __len__(self):
return len(self.indices)
def _get_datasets(ds_provider:DatasetProvider, load_train:bool, load_test:bool,
transform_train, transform_test)\
->Tuple[DatasetLike, DatasetLike]:
trainset, testset = ds_provider.get_datasets(load_train, load_test,
transform_train, transform_test)
return trainset, testset
# target_lb allows to filter dataset for a specific class, not used
def _get_sampler(dataset:Dataset, val_ratio:Optional[float], shuffle:bool,
max_items:Optional[int], apex:apex_utils.ApexUtils)\
->Tuple[DistributedStratifiedSampler, Optional[DistributedStratifiedSampler]]:
world_size, global_rank = apex.world_size, apex.global_rank
# we cannot not shuffle just for train or just val because of in distributed mode both must come from same shrad
train_sampler = DistributedStratifiedSampler(dataset,
val_ratio=val_ratio, is_val_split=False, shuffle=shuffle,
max_samples=max_items, world_size=world_size, rank=global_rank)
valid_sampler = DistributedStratifiedSampler(dataset,
val_ratio=val_ratio, is_val_split=True, shuffle=shuffle,
max_samples=max_items, world_size=world_size, rank=global_rank) \
if val_ratio is not None else None
return train_sampler, valid_sampler
|
archai/archai/supergraph/datasets/data.py/0
|
{
"file_path": "archai/archai/supergraph/datasets/data.py",
"repo_id": "archai",
"token_count": 3932
}
| 330 |
# -*- coding: utf-8 -*-
import math
import torch.nn as nn
import torch.nn.functional as F
from archai.supergraph.models.shakeshake.shakeshake import ShakeShake, Shortcut
class ShakeBlock(nn.Module):
def __init__(self, in_ch, out_ch, stride=1):
super(ShakeBlock, self).__init__()
self.equal_io = in_ch == out_ch
self.shortcut = self.equal_io and None or Shortcut(in_ch, out_ch, stride=stride)
self.branch1 = self._make_branch(in_ch, out_ch, stride)
self.branch2 = self._make_branch(in_ch, out_ch, stride)
def forward(self, x):
h1 = self.branch1(x)
h2 = self.branch2(x)
h = ShakeShake.apply(h1, h2, self.training)
h0 = x if self.equal_io else self.shortcut(x)
return h + h0
def _make_branch(self, in_ch, out_ch, stride=1):
return nn.Sequential(
nn.ReLU(inplace=False),
nn.Conv2d(in_ch, out_ch, 3, padding=1, stride=stride, bias=False),
nn.BatchNorm2d(out_ch),
nn.ReLU(inplace=False),
nn.Conv2d(out_ch, out_ch, 3, padding=1, stride=1, bias=False),
nn.BatchNorm2d(out_ch))
class ShakeResNet(nn.Module):
def __init__(self, depth, w_base, label):
super(ShakeResNet, self).__init__()
n_units = (depth - 2) / 6
in_chs = [16, w_base, w_base * 2, w_base * 4]
self.in_chs = in_chs
self.c_in = nn.Conv2d(3, in_chs[0], 3, padding=1)
self.layer1 = self._make_layer(n_units, in_chs[0], in_chs[1])
self.layer2 = self._make_layer(n_units, in_chs[1], in_chs[2], 2)
self.layer3 = self._make_layer(n_units, in_chs[2], in_chs[3], 2)
self.fc_out = nn.Linear(in_chs[3], label)
# Initialize paramters
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.bias.data.zero_()
def forward(self, x):
h = self.c_in(x)
h = self.layer1(h)
h = self.layer2(h)
h = self.layer3(h)
h = F.relu(h)
h = F.avg_pool2d(h, 8)
h = h.view(-1, self.in_chs[3])
h = self.fc_out(h)
return h
def _make_layer(self, n_units, in_ch, out_ch, stride=1):
layers = []
for i in range(int(n_units)):
layers.append(ShakeBlock(in_ch, out_ch, stride=stride))
in_ch, stride = out_ch, 1
return nn.Sequential(*layers)
|
archai/archai/supergraph/models/shakeshake/shake_resnet.py/0
|
{
"file_path": "archai/archai/supergraph/models/shakeshake/shake_resnet.py",
"repo_id": "archai",
"token_count": 1442
}
| 331 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import copy
from typing import List, Optional, Tuple
from overrides import EnforceOverrides
from archai.common.config import Config
from archai.supergraph.nas.model_desc import (
AuxTowerDesc,
CellDesc,
CellType,
ConvMacroParams,
ModelDesc,
NodeDesc,
OpDesc,
TensorShape,
TensorShapes,
TensorShapesList,
)
from archai.supergraph.nas.operations import Op, StemBase
class ModelDescBuilder(EnforceOverrides):
def get_reduction_indices(self, conf_model_desc:Config)->List[int]:
""" Returns cell indices which reduces HxW and doubles channels """
n_cells:int = conf_model_desc['n_cells']
n_reductions:int = conf_model_desc['n_reductions']
# this satisfies N R N R N pattern, this need not be enforced but
# we are doing now for sanity
assert n_cells >= n_reductions * 2 + 1
# for each reduction, we create one indice
# for cifar and imagenet, reductions=2 creating cuts at n//3, n*2//3
return list(n_cells*(i+1) // (n_reductions+1) \
for i in range(n_reductions))
def get_node_channels(self, conf_model_desc:Config)->List[List[int]]:
""" Returns array of channels for each node in each cell. All nodes
are assumed to have same output channels as input channels. """
conf_model_stems = self.get_conf_model_stems()
conf_cell = self.get_conf_cell()
init_node_ch:int = conf_model_stems['init_node_ch']
n_cells = conf_model_desc['n_cells']
n_nodes = conf_cell['n_nodes']
# same channels for all nodes in a cell
cell_node_channels:List[List[int]] = []
# channels for the first cell
node_channels = init_node_ch
for ci in range(n_cells):
# if reduction cell than double the node channels
if self.get_cell_type(ci)==CellType.Reduction:
node_channels *= 2
# all nodes in a cell have same channels
nodes_channels = [node_channels for ni in range(n_nodes)]
cell_node_channels.append(nodes_channels
)
return cell_node_channels
def get_conf_cell(self)->Config:
return self.conf_model_desc['cell']
def get_conf_dataset(self)->Config:
return self.conf_model_desc['dataset']
def get_conf_model_stems(self)->Config:
return self.conf_model_desc['model_stems']
def _init_build(self, conf_model_desc: Config,
template:Optional[ModelDesc]=None)->None:
self.conf_model_desc = conf_model_desc
self.template = template
# if template model desc is specified then setup regular and reduction cell templates
self._cell_templates = self.create_cell_templates(template)
n_cells = conf_model_desc['n_cells']
# for each reduction, we create one indice
# for cifar and imagenet, reductions=2 creating cuts at n//3, n*2//3
self._reduction_indices = self.get_reduction_indices(conf_model_desc)
self._normal_indices = [i for i in range(n_cells)\
if i not in self._reduction_indices]
self.node_channels = self.get_node_channels(conf_model_desc)
def build(self, conf_model_desc: Config,
template:Optional[ModelDesc]=None)->ModelDesc:
"""main entry point for the class"""
self._init_build(conf_model_desc, template)
self.pre_build(conf_model_desc)
# input shape for the stem has same channels as channels in image
# -1 indicates, actual dimensions are not known
ds_ch = self.get_conf_dataset()['channels']
in_shapes = [[[ds_ch, -1, -1, -1]]]
# create model stems
model_stems = self.build_model_stems(in_shapes, conf_model_desc)
# create cell descriptions
cell_descs, aux_tower_descs = self.build_cells(in_shapes, conf_model_desc)
model_pool_op = self.build_model_pool(in_shapes, conf_model_desc)
logits_op = self.build_logits_op(in_shapes, conf_model_desc)
return ModelDesc(conf_model_desc, model_stems, model_pool_op, cell_descs,
aux_tower_descs, logits_op)
def build_cells(self, in_shapes:TensorShapesList, conf_model_desc:Config)\
->Tuple[List[CellDesc], List[Optional[AuxTowerDesc]]]:
conf_cell = self.get_conf_cell()
n_cells = conf_model_desc['n_cells']
cell_descs, aux_tower_descs = [], []
# create list of output shapes for cells that starts with model stem
for ci in range(n_cells):
cell_desc = self.build_cell(in_shapes, conf_cell, ci)
# get first tensor output of last cell
aux_tower_desc = self.build_aux_tower(in_shapes[-1][0], conf_model_desc, ci)
cell_descs.append(cell_desc)
aux_tower_descs.append(aux_tower_desc)
return cell_descs, aux_tower_descs
def get_node_count(self, cell_index:int)->int:
return len(self.node_channels[cell_index])
def build_cell(self, in_shapes:TensorShapesList, conf_cell:Config,
cell_index:int) ->CellDesc:
stem_shapes, stems = self.build_cell_stems(in_shapes, conf_cell, cell_index)
cell_type = self.get_cell_type(cell_index)
if self.template is None:
node_count = self.get_node_count(cell_index)
in_shape = stem_shapes[0] # input shape to noded is same as cell stem
out_shape = stem_shapes[0] # we ask nodes to keep the output shape same
node_shapes, nodes = self.build_nodes(stem_shapes, conf_cell,
cell_index, cell_type, node_count, in_shape, out_shape)
else:
node_shapes, nodes = self.build_nodes_from_template(stem_shapes, conf_cell, cell_index)
post_op_shape, post_op_desc = self.build_cell_post_op(stem_shapes,
node_shapes, conf_cell, cell_index)
cell_desc = CellDesc(
id=cell_index, cell_type=self.get_cell_type(cell_index),
conf_cell=conf_cell,
stems=stems, stem_shapes=stem_shapes,
nodes=nodes, node_shapes=node_shapes,
post_op=post_op_desc, out_shape=post_op_shape,
trainables_from=self.get_trainables_from(cell_index)
)
# output same shape twice to indicate s0 and s1 inputs for next cell
in_shapes.append([post_op_shape])
return cell_desc
def get_trainables_from(self, cell_index:int)->int:
cell_type = self.get_cell_type(cell_index)
if cell_type == CellType.Reduction:
return self._reduction_indices[0]
if cell_type == CellType.Regular:
return self._normal_indices[0]
raise RuntimeError(f'Cannot get cell for shared trainables because cell_type "{cell_type}" is not recgnized')
def get_ch(self, shape:TensorShape)->int:
return int(shape[0])
def build_cell_stems(self, in_shapes:TensorShapesList, conf_cell:Config,
cell_index:int)\
->Tuple[TensorShapes, List[OpDesc]]:
# expect two stems, both should have same channels
# TODO: support multiple stems
assert len(in_shapes) >= 2, "we must have outputs from at least two previous modules"
# Get channels for previous two layers.
# At start we have only one layer, i.e., model stems.
# Typically model stems should have same channel count but for imagenet we do
# reduction at model stem so stem1 will have twice channels as stem0
p_ch_out = self.get_ch(in_shapes[-1][0])
pp_ch_out = self.get_ch(in_shapes[-2][0])
# was the previous layer reduction layer?
reduction_p = p_ch_out == pp_ch_out*2 or in_shapes[-2][0][2] == in_shapes[-1][0][2]*2
# find out the node channels for this cell
node_ch_out = self.node_channels[cell_index][0] # init with first node in cell
# Cell stemps will take prev channels and out sameput channels as nodes would.
# If prev cell was reduction then we need to increase channels of prev-prev
# by 2X. This is done by prepr_reduce stem.
s0_op = OpDesc('prepr_reduce' if reduction_p else 'prepr_normal',
params={
'conv': ConvMacroParams(pp_ch_out, node_ch_out)
}, in_len=1, trainables=None)
s1_op = OpDesc('prepr_normal',
params={
'conv': ConvMacroParams(p_ch_out, node_ch_out)
}, in_len=1, trainables=None)
# output two shapes with proper channels setup
# for default model desc, cell stems have same shapes and channels
out_shape0 = copy.deepcopy(in_shapes[-1][0])
# set channels and reset shapes to -1 to indicate unknown
# for imagenet HxW would be floating point numbers due to one input reduced
out_shape0[0], out_shape0[2], out_shape0[3] = node_ch_out, -1, -1
out_shape1 = copy.deepcopy(out_shape0)
return [out_shape0, out_shape1], [s0_op, s1_op]
def build_nodes_from_template(self, stem_shapes:TensorShapes, conf_cell:Config,
cell_index:int) \
->Tuple[TensorShapes, List[NodeDesc]]:
cell_template = self.get_cell_template(cell_index)
assert cell_template is not None
cell_type = self.get_cell_type(cell_index)
assert cell_template.cell_type==cell_type
nodes:List[NodeDesc] = []
for n in cell_template.nodes():
edges_copy = [e.clone(
# use new macro params
conv_params=ConvMacroParams(self.get_ch(stem_shapes[0]),
self.get_ch(stem_shapes[0])),
# TODO: check for compatibility?
clear_trainables=True
) for e in n.edges]
nodes.append(NodeDesc(edges=edges_copy, conv_params=n.conv_params))
out_shapes = [copy.deepcopy(stem_shapes[0]) for _ in cell_template.nodes()]
return out_shapes, nodes
def build_nodes(self, stem_shapes:TensorShapes, conf_cell:Config,
cell_index:int, cell_type:CellType, node_count:int,
in_shape:TensorShape, out_shape:TensorShape) \
->Tuple[TensorShapes, List[NodeDesc]]:
# default: create nodes with empty edges
nodes:List[NodeDesc] = [NodeDesc(edges=[],
conv_params=ConvMacroParams(
self.get_ch(in_shape),
self.get_ch(out_shape)))
for _ in range(node_count)]
out_shapes = [copy.deepcopy(out_shape) for _ in range(node_count)]
return out_shapes, nodes
def create_cell_templates(self, template:Optional[ModelDesc])\
->List[Optional[CellDesc]]:
normal_template, reduction_template = None, None
if template is not None:
# find first regular and reduction cells and set them as
# the template that we will use. When we create new cells
# we will fill them up with nodes from these templates
for cell_desc in template.cell_descs():
if normal_template is None and \
cell_desc.cell_type==CellType.Regular:
normal_template = cell_desc
if reduction_template is None and \
cell_desc.cell_type==CellType.Reduction:
reduction_template = cell_desc
return [normal_template, reduction_template]
def build_model_pool(self, in_shapes:TensorShapesList, conf_model_desc:Config)\
->OpDesc:
model_post_op = conf_model_desc['model_post_op']
last_shape = in_shapes[-1][0]
in_shapes.append([copy.deepcopy(last_shape)])
return OpDesc(model_post_op,
params={'conv': ConvMacroParams(self.get_ch(last_shape),
self.get_ch(last_shape))},
in_len=1, trainables=None)
def build_logits_op(self, in_shapes:TensorShapesList, conf_model_desc:Config)->OpDesc:
n_classes = self.get_conf_dataset()['n_classes']
return OpDesc('linear',
params={'n_ch':in_shapes[-1][0][0],
'n_classes': n_classes},
in_len=1, trainables=None)
def get_cell_template(self, cell_index:int)->Optional[CellDesc]:
cell_type = self.get_cell_type(cell_index)
if cell_type==CellType.Regular:
return self._cell_templates[0]
if cell_type==CellType.Reduction:
return self._cell_templates[1]
raise RuntimeError(f'Cannot get cell template because cell_type "{cell_type}" is not recgnized')
def get_cell_type(self, cell_index:int)->CellType:
# For darts, n_cells=8 so we build [N N R N N R N N] structure
# Notice that this will result in only 2 reduction cells no matter
# total number of cells. Original resnet actually have 3 reduction cells.
# Between two reduction cells we have regular cells.
return CellType.Reduction if cell_index in self._reduction_indices \
else CellType.Regular
def _post_op_ch(self, post_op_name:str, node_shapes:TensorShapes) \
->Tuple[int, int, int]:
node_count = len(node_shapes)
node_ch_out = self.get_ch(node_shapes[-1])
# we take all available node outputs as input to post op
# if no nodes exist then we will use cell stem outputs
# Note that for reduction cell stems wxh is larger than node wxh which
# means we cannot use cell stem outputs with node outputs because
# concate will fail
# TODO: remove hard coding of 2
out_states = node_count if node_count else 2
# number of input channels to the cell post op
op_ch_in = out_states * node_ch_out
# number of output channels for the cell post op
if post_op_name == 'concate_channels':
cell_ch_out = op_ch_in
elif post_op_name == 'proj_channels':
cell_ch_out = node_ch_out
else:
raise RuntimeError(f'Unsupported cell_post_op: {post_op_name}')
return op_ch_in, cell_ch_out, out_states
def build_cell_post_op(self, stem_shapes:TensorShapes,
node_shapes:TensorShapes, conf_cell:Config, cell_index:int)\
-> Tuple[TensorShape, OpDesc]:
post_op_name = conf_cell['cell_post_op']
op_ch_in, cell_ch_out, out_states = self._post_op_ch(post_op_name,
node_shapes)
post_op_desc = OpDesc(post_op_name,
{
'conv': ConvMacroParams(op_ch_in, cell_ch_out),
'out_states': out_states
},
in_len=1, trainables=None, children=None)
out_shape = copy.deepcopy(node_shapes[-1])
out_shape[0] = cell_ch_out
return out_shape, post_op_desc
def build_aux_tower(self, out_shape:TensorShape, conf_model_desc:Config,
cell_index:int)->Optional[AuxTowerDesc]:
n_classes = self.get_conf_dataset()['n_classes']
n_cells = conf_model_desc['n_cells']
n_reductions = conf_model_desc['n_reductions']
aux_tower_stride = conf_model_desc['aux_tower_stride']
aux_weight = conf_model_desc['aux_weight']
# TODO: shouldn't we be adding aux tower at *every* 1/3rd?
if aux_weight and n_reductions > 1 and cell_index == 2*n_cells//3:
return AuxTowerDesc(self.get_ch(out_shape), n_classes, aux_tower_stride)
return None
def build_model_stems(self, in_shapes:TensorShapesList,
conf_model_desc:Config)->List[OpDesc]:
# TODO: why do we need stem_multiplier?
# TODO: in original paper stems are always affine
conf_model_stems = self.get_conf_model_stems()
init_node_ch:int = conf_model_stems['init_node_ch']
stem_multiplier:int = conf_model_stems['stem_multiplier']
ops:List[str] = conf_model_stems['ops']
out_channels = init_node_ch*stem_multiplier
conv_params = ConvMacroParams(self.get_ch(in_shapes[-1][0]), # channels of first input tensor
init_node_ch*stem_multiplier)
stems = [OpDesc(name=op_name, params={'conv': conv_params},
in_len=1, trainables=None) \
for op_name in ops]
# get reduction factors done by each stem, typically they should be same but for
# imagenet they can differ
stem_reductions = ModelDescBuilder._stem_reductions(stems)
# Each cell takes input from previous and 2nd previous cells.
# To be consistence we create two outputs for model stems: [[s1, s0], [s0, s1]
# This way when we access first element of each output we get s1, s0.
# Normailly s0==s1 but for networks like imagenet, s0 will have twice the channels
# of s1.
for stem_reduction in stem_reductions:
in_shapes.append([[out_channels, -1, -1.0/stem_reduction, -1.0/stem_reduction]])
return stems
@staticmethod
def _stem_reductions(stems:List[OpDesc])->List[int]:
# create stem ops to find out reduction factors
ops = [Op.create(stem, affine=False) for stem in stems]
assert all(isinstance(op, StemBase) for op in ops)
return list(op.reduction for op in ops)
def pre_build(self, conf_model_desc:Config)->None:
"""hook for accomplishing any setup before build starts"""
pass
def seed_cell(self, model_desc:ModelDesc)->None:
# prepare model as seed model before search iterations starts
pass
|
archai/archai/supergraph/nas/model_desc_builder.py/0
|
{
"file_path": "archai/archai/supergraph/nas/model_desc_builder.py",
"repo_id": "archai",
"token_count": 8352
}
| 332 |
__include__: "../datasets/cifar10.yaml" # default dataset settings are for cifar
common:
experiment_name: 'throwaway' # you should supply from command line
experiment_desc: 'throwaway'
logdir: '~/logdir'
log_prefix: 'log' # prefix for log files that will becreated (log.log and log.yaml), no log files if ''
log_level: 20 # logging.INFO
backup_existing_log_file: False # should we overwrite existing log file without making a copy?
yaml_log: True # if True, structured logs as yaml are also generated
seed: 2.0
tb_enable: False # if True then TensorBoard logging is enabled (may impact perf)
tb_dir: '$expdir/tb' # path where tensorboard logs would be stored
checkpoint:
filename: '$expdir/checkpoint.pth'
freq: 10
# reddis address of Ray cluster. Use None for single node run
# otherwise it should something like host:6379. Make sure to run on head node:
# "ray start --head --redis-port=6379"
redis: null
apex: # this is overriden in search and eval individually
enabled: False # global switch to disable everything apex
distributed_enabled: True # enable/disable distributed mode
mixed_prec_enabled: True # switch to disable amp mixed precision
gpus: '' # use GPU IDs specified here (comma separated), if '' then use all GPUs
sync_bn: False # should be replace BNs with sync BNs for distributed model
scale_lr: True # enable/disable distributed mode
min_world_size: 0 # allows to confirm we are indeed in distributed setting
detect_anomaly: False # if True, PyTorch code will run 6X slower
seed: '_copy: /common/seed'
ray:
enabled: False # initialize ray. Note: ray cannot be used if apex distributed is enabled
local_mode: False # if True then ray runs in serial mode
smoke_test: False
only_eval: False
resume: True
dataset: {} # default dataset settings comes from __include__ on the top
nas:
eval:
full_desc_filename: '$expdir/full_model_desc.yaml' # model desc used for building model for evaluation
final_desc_filename: '$expdir/final_model_desc.yaml' # model desc used as template to construct cells
# If below is specified then final_desc_filename is ignored and model is created through factory function instead.
# This is useful for running eval for manually designed models such as resnet-50.
# The value is string of form 'some.namespace.module.function'. The function returns nn.Module and no required args.
model_factory_spec: ''
metric_filename: '$expdir/eval_train_metrics.yaml'
model_filename: '$expdir/model.pt' # file to which trained model will be saved
data_parallel: False
checkpoint:
_copy: '/common/checkpoint'
resume: '_copy: /common/resume'
model_desc:
n_reductions: 2 # number of reductions to be applied
n_cells: 20 # number of cells
dataset:
_copy: '/dataset'
max_final_edges: 2 # max edge that can be in final arch per node
model_post_op: 'pool_adaptive_avg2d'
params: {} # additional custom params for model desc
aux_weight: 0.4 # weight for loss from auxiliary towers in test time arch
aux_tower_stride: 3 # stride that aux tower should use, 3 is good for 32x32 images, 2 for imagenet
model_stems:
ops: ['stem_conv3x3', 'stem_conv3x3']
init_node_ch: 36 # num of input/output channels for nodes in 1st cell
stem_multiplier: 3 # output channels multiplier for the stem
cell:
n_nodes: 4 # number of nodes in a cell
cell_post_op: 'concate_channels'
loader:
apex:
_copy: '../../trainer/apex'
aug: '' # additional augmentations to use, for ex, fa_reduced_cifar10, arsaug, autoaug_cifar10, autoaug_extend
cutout: 16 # cutout length, use cutout augmentation when > 0
load_train: True # load train split of dataset
train_batch: 96 # 96 is too aggressive for 1080Ti, better set it to 68
train_workers: 4
test_workers: '_copy: ../train_workers' # if null then 4
load_test: True # load test split of dataset
test_batch: 1024
val_ratio: 0.0 #split portion for test set, 0 to 1
val_fold: 0 #Fold number to use (0 to 4)
cv_num: 5 # total number of folds available
dataset:
_copy: '/dataset'
trainer:
apex:
_copy: '/common/apex'
aux_weight: '_copy: /nas/eval/model_desc/aux_weight'
drop_path_prob: 0.2 # probability that given edge will be dropped
grad_clip: 5.0 # grads above this value is clipped
l1_alphas: 0.0 # weight to be applied to sum(abs(alphas)) to loss term
logger_freq: 1000 # after every N updates dump loss and other metrics in logger
title: 'eval_train'
epochs: 600
batch_chunks: 1 # split batch into these many chunks and accumulate gradients so we can support GPUs with lower RAM
lossfn:
type: 'CrossEntropyLoss'
optimizer:
type: 'sgd'
lr: 0.025 # init learning rate
decay: 3.0e-4 # pytorch default is 0.0
momentum: 0.9 # pytorch default is 0.0
nesterov: False # pytorch default is False
decay_bn: .NaN # if NaN then same as decay otherwise apply different decay to BN layers
lr_schedule:
type: 'cosine'
min_lr: 0.001 # min learning rate to se bet in eta_min param of scheduler
warmup: # increases LR for 0 to current in specified epochs and then hands over to main scheduler
multiplier: 1
epochs: 0 # 0 disables warmup
validation:
title: 'eval_test'
batch_chunks: '_copy: ../../batch_chunks' # split batch into these many chunks and accumulate gradients so we can support GPUs with lower RAM
logger_freq: 0
freq: 1 # perform validation only every N epochs
lossfn:
type: 'CrossEntropyLoss'
search:
finalizer: 'default' # options are 'random' or 'default'
data_parallel: False
checkpoint:
_copy: '/common/checkpoint'
resume: '_copy: /common/resume'
search_iters: 1
full_desc_filename: '$expdir/full_model_desc.yaml' # arch before it was finalized
final_desc_filename: '$expdir/final_model_desc.yaml' # final arch is saved in this file
metrics_dir: '$expdir/models/{reductions}/{cells}/{nodes}/{search_iter}' # where metrics and model stats would be saved from each pareto iteration
seed_train:
trainer:
_copy: '/nas/eval/trainer'
title: 'seed_train'
epochs: 0 # number of epochs model will be trained before search
aux_weight: 0.0
drop_path_prob: 0.0
loader:
_copy: '/nas/eval/loader'
train_batch: 128
val_ratio: 0.1 #split portion for test set, 0 to 1
post_train:
trainer:
_copy: '/nas/eval/trainer'
title: 'post_train'
epochs: 0 # number of epochs model will be trained after search
aux_weight: 0.0
drop_path_prob: 0.0
loader:
_copy: '/nas/eval/loader'
train_batch: 128
val_ratio: 0.1 #split portion for test set, 0 to 1
pareto:
# default parameters are set so there is exactly one search iteration
max_cells: 8
max_reductions: 2
max_nodes: 4
enabled: False
summary_filename: '$expdir/perito.tsv' # for each iteration of macro, we fave model and perf summary
model_desc:
n_reductions: 2 # number of reductions to be applied
n_cells: 8 # number of cells
# we avoid copying from eval node because dataset settings
# may override eval.model_desc with different stems, pool etc
dataset:
_copy: '/dataset'
max_final_edges: 2 # max edge that can be in final arch per node
model_post_op: 'pool_adaptive_avg2d'
params: {}
aux_weight: 0.0 # weight for loss from auxiliary towers in test time arch
aux_tower_stride: 3 # stride that aux tower should use, 3 is good for 32x32 images, 2 for imagenet
model_stems:
ops: ['stem_conv3x3', 'stem_conv3x3']
stem_multiplier: 3 # output channels multiplier for the stem
init_node_ch: 16 # num of input/output channels for nodes in 1st cell
cell:
n_nodes: 4 # number of nodes in a cell
cell_post_op: 'concate_channels'
loader:
apex:
_copy: '../../trainer/apex'
aug: '' # additional augmentations to use
cutout: 0 # cutout length, use cutout augmentation when > 0
load_train: True # load train split of dataset
train_batch: 64
train_workers: 4 # if null then gpu_count*4
test_workers: '_copy: ../train_workers' # if null then 4
load_test: False # load test split of dataset
test_batch: 1024
val_ratio: 0.5 #split portion for test set, 0 to 1
val_fold: 0 #Fold number to use (0 to 4)
cv_num: 5 # total number of folds available
dataset:
_copy: '/dataset'
trainer:
apex:
_copy: '/common/apex'
aux_weight: '_copy: /nas/search/model_desc/aux_weight'
drop_path_prob: 0.0 # probability that given edge will be dropped
grad_clip: 5.0 # grads above this value is clipped
logger_freq: 1000 # after every N updates dump loss and other metrics in logger
title: 'arch_train'
epochs: 50
batch_chunks: 1 # split batch into these many chunks and accumulate gradients so we can support GPUs with lower RAM
# additional vals for the derived class
plotsdir: '' #empty string means no plots, other wise plots are generated for each epoch in this dir
l1_alphas: 0.0 # weight to be applied to sum(abs(alphas)) to loss term
lossfn:
type: 'CrossEntropyLoss'
optimizer:
type: 'sgd'
lr: 0.025 # init learning rate
decay: 3.0e-4
momentum: 0.9 # pytorch default is 0
nesterov: False
decay_bn: .NaN # if NaN then same as decay otherwise apply different decay to BN layers
alpha_optimizer:
type: 'adam'
lr: 3.0e-4
decay: 1.0e-3
betas: [0.5, 0.999]
decay_bn: .NaN # if NaN then same as decay otherwise apply different decay to BN layers
alpha_lr_schedule:
type: ''
lr_schedule:
type: 'cosine'
min_lr: 0.001 # min learning rate, this will be used in eta_min param of scheduler
warmup: null
validation:
title: 'search_val'
logger_freq: 0
batch_chunks: '_copy: ../../batch_chunks' # split batch into these many chunks and accumulate gradients so we can support GPUs with lower RAM
freq: 1 # perform validation only every N epochs
lossfn:
type: 'CrossEntropyLoss'
autoaug:
num_op: 2
num_policy: 5
num_search: 200
num_result_per_cv: 10 # after conducting N trials, we will chose the results of top num_result_per_cv
loader:
apex:
_copy: '/common/apex'
aug: '' # additional augmentations to use
cutout: 16 # cutout length, use cutout augmentation when > 0
epochs: 50
load_train: True # load train split of dataset
train_batch: 64
train_workers: 4 # if null then gpu_count*4
test_workers: '_copy: ../train_workers' # if null then 4
load_test: True # load test split of dataset
test_batch: 1024
val_ratio: 0.4 #split portion for test set, 0 to 1
val_fold: 0 #Fold number to use (0 to 4)
cv_num: 5 # total number of folds available
dataset:
_copy: '/dataset'
optimizer:
type: 'sgd'
lr: 0.025 # init learning rate
decay: 3.0e-4 # pytorch default is 0.0
momentum: 0.9 # pytorch default is 0.0
nesterov: False # pytorch default is False
clip: 5.0 # grads above this value is clipped # TODO: Why is this also in trainer?
decay_bn: .NaN # if NaN then same as decay otherwise apply different decay to BN layers
#betas: [0.9, 0.999] # PyTorch default betas for Adam
lr_schedule:
type: 'cosine'
min_lr: 0.0 # min learning rate, this will be used in eta_min param of scheduler
warmup: null
|
archai/confs/algos/darts.yaml/0
|
{
"file_path": "archai/confs/algos/darts.yaml",
"repo_id": "archai",
"token_count": 4599
}
| 333 |
# use this as overriding config for quick compile testing
common:
detect_anomaly: False # if True, PyTorch code will run 6X slower
resume: False
nas:
search:
data_parallel: False
resume: False # ignore checkpoint file if it exist
search_iters: 1
pareto:
max_reductions: 2
max_cells: 5
max_nodes: 2
enabled: False
seed_train:
trainer:
epochs: 1 # number of epochs model will be trained before search
loader:
train_batch: 32
test_batch: 32
val_ratio: 0.5 #for toy mode batch is small so use higher val ratio otherwise batch size will be lower than num classes
dataset:
max_batches: 2
post_train:
trainer:
epochs: 1
loader:
train_batch: 32
test_batch: 32
val_ratio: 0.5 #for toy mode batch is small so use higher val ratio otherwise batch size will be lower than num classes
dataset:
max_batches: 2
model_desc:
n_reductions: 1 # number of reductions to be applied
n_cells: 3 # number of cells
cell:
n_nodes: 2 # number of nodes in a cell
loader:
train_batch: 32
test_batch: 32
val_ratio: 0.5 #for toy mode batch is small so use higher val ratio otherwise batch size will be lower than num classes
dataset:
max_batches: 2
trainer:
epochs: 1
logger_freq: 1
validation:
logger_freq: 1
eval:
data_parallel: False
checkpoint: null
model_desc:
n_cells: 5 # number of cells
n_reductions: 2 # number of reductions to be applied
cell:
n_nodes: 4 # number of nodes in a cell
loader:
train_batch: 32
test_batch: 32
val_ratio: 0.5 #for toy mode batch is small so use higher val ratio otherwise batch size will be lower than num classes
dataset:
max_batches: 2
trainer:
epochs: 1
logger_freq: 1
validation:
logger_freq: 1
|
archai/confs/algos/toy_common.yaml/0
|
{
"file_path": "archai/confs/algos/toy_common.yaml",
"repo_id": "archai",
"token_count": 833
}
| 334 |
!!python/object:archai.nas.model_desc.ModelDesc
_cell_descs:
- !!python/object:archai.nas.model_desc.CellDesc
_nodes:
- !!python/object:archai.nas.model_desc.NodeDesc
conv_params: &id001 !!python/object:archai.nas.model_desc.ConvMacroParams
ch_in: 16
ch_out: 16
edges:
- !!python/object:archai.nas.model_desc.EdgeDesc
input_ids:
- 0
op_desc: !!python/object:archai.nas.model_desc.OpDesc
children: null
children_ins: null
in_len: 1
name: sep_conv_3x3
params:
conv: !!python/object:archai.nas.model_desc.ConvMacroParams
ch_in: 16
ch_out: 16
stride: 1
trainables: null
- !!python/object:archai.nas.model_desc.EdgeDesc
input_ids:
- 1
op_desc: !!python/object:archai.nas.model_desc.OpDesc
children: null
children_ins: null
in_len: 1
name: sep_conv_3x3
params:
conv: !!python/object:archai.nas.model_desc.ConvMacroParams
ch_in: 16
ch_out: 16
stride: 1
trainables: null
- !!python/object:archai.nas.model_desc.NodeDesc
conv_params: *id001
edges:
- !!python/object:archai.nas.model_desc.EdgeDesc
input_ids:
- 0
op_desc: !!python/object:archai.nas.model_desc.OpDesc
children: null
children_ins: null
in_len: 1
name: sep_conv_3x3
params:
conv: !!python/object:archai.nas.model_desc.ConvMacroParams
ch_in: 16
ch_out: 16
stride: 1
trainables: null
- !!python/object:archai.nas.model_desc.EdgeDesc
input_ids:
- 2
op_desc: !!python/object:archai.nas.model_desc.OpDesc
children: null
children_ins: null
in_len: 1
name: skip_connect
params:
conv: !!python/object:archai.nas.model_desc.ConvMacroParams
ch_in: 16
ch_out: 16
stride: 1
trainables: null
- !!python/object:archai.nas.model_desc.NodeDesc
conv_params: *id001
edges:
- !!python/object:archai.nas.model_desc.EdgeDesc
input_ids:
- 2
op_desc: !!python/object:archai.nas.model_desc.OpDesc
children: null
children_ins: null
in_len: 1
name: skip_connect
params:
conv: !!python/object:archai.nas.model_desc.ConvMacroParams
ch_in: 16
ch_out: 16
stride: 1
trainables: null
- !!python/object:archai.nas.model_desc.EdgeDesc
input_ids:
- 0
op_desc: !!python/object:archai.nas.model_desc.OpDesc
children: null
children_ins: null
in_len: 1
name: skip_connect
params:
conv: !!python/object:archai.nas.model_desc.ConvMacroParams
ch_in: 16
ch_out: 16
stride: 1
trainables: null
- !!python/object:archai.nas.model_desc.NodeDesc
conv_params: *id001
edges:
- !!python/object:archai.nas.model_desc.EdgeDesc
input_ids:
- 3
op_desc: !!python/object:archai.nas.model_desc.OpDesc
children: null
children_ins: null
in_len: 1
name: skip_connect
params:
conv: !!python/object:archai.nas.model_desc.ConvMacroParams
ch_in: 16
ch_out: 16
stride: 1
trainables: null
- !!python/object:archai.nas.model_desc.EdgeDesc
input_ids:
- 2
op_desc: !!python/object:archai.nas.model_desc.OpDesc
children: null
children_ins: null
in_len: 1
name: max_pool_3x3
params:
conv: !!python/object:archai.nas.model_desc.ConvMacroParams
ch_in: 16
ch_out: 16
stride: 1
trainables: null
cell_type: &id003 !!python/object/apply:archai.nas.model_desc.CellType
- regular
conf_cell: &id004 !!python/object:archai.common.config.Config
args: null
config_filepath: null
data:
cell_post_op: concate_channels
n_nodes: 4
extra_args: []
id: 0
node_shapes:
- - 16
- -1
- -1
- -1
- - 16
- -1
- -1
- -1
- - 16
- -1
- -1
- -1
- - 16
- -1
- -1
- -1
out_shape:
- 64
- -1
- -1
- -1
post_op: !!python/object:archai.nas.model_desc.OpDesc
children: null
children_ins: null
in_len: 1
name: concate_channels
params:
conv: !!python/object:archai.nas.model_desc.ConvMacroParams
ch_in: 64
ch_out: 64
out_states: 4
trainables: null
stem_shapes:
- - 16
- -1
- -1
- -1
- - 16
- -1
- -1
- -1
stems:
- !!python/object:archai.nas.model_desc.OpDesc
children: null
children_ins: null
in_len: 1
name: prepr_normal
params:
conv: !!python/object:archai.nas.model_desc.ConvMacroParams
ch_in: 48
ch_out: 16
trainables: null
- !!python/object:archai.nas.model_desc.OpDesc
children: null
children_ins: null
in_len: 1
name: prepr_normal
params:
conv: !!python/object:archai.nas.model_desc.ConvMacroParams
ch_in: 48
ch_out: 16
trainables: null
trainables_from: 0
- !!python/object:archai.nas.model_desc.CellDesc
_nodes:
- !!python/object:archai.nas.model_desc.NodeDesc
conv_params: &id002 !!python/object:archai.nas.model_desc.ConvMacroParams
ch_in: 16
ch_out: 16
edges:
- !!python/object:archai.nas.model_desc.EdgeDesc
input_ids:
- 0
op_desc: !!python/object:archai.nas.model_desc.OpDesc
children: null
children_ins: null
in_len: 1
name: sep_conv_3x3
params:
conv: !!python/object:archai.nas.model_desc.ConvMacroParams
ch_in: 16
ch_out: 16
stride: 1
trainables: null
- !!python/object:archai.nas.model_desc.EdgeDesc
input_ids:
- 1
op_desc: !!python/object:archai.nas.model_desc.OpDesc
children: null
children_ins: null
in_len: 1
name: sep_conv_3x3
params:
conv: !!python/object:archai.nas.model_desc.ConvMacroParams
ch_in: 16
ch_out: 16
stride: 1
trainables: null
- !!python/object:archai.nas.model_desc.NodeDesc
conv_params: *id002
edges:
- !!python/object:archai.nas.model_desc.EdgeDesc
input_ids:
- 0
op_desc: !!python/object:archai.nas.model_desc.OpDesc
children: null
children_ins: null
in_len: 1
name: sep_conv_3x3
params:
conv: !!python/object:archai.nas.model_desc.ConvMacroParams
ch_in: 16
ch_out: 16
stride: 1
trainables: null
- !!python/object:archai.nas.model_desc.EdgeDesc
input_ids:
- 2
op_desc: !!python/object:archai.nas.model_desc.OpDesc
children: null
children_ins: null
in_len: 1
name: skip_connect
params:
conv: !!python/object:archai.nas.model_desc.ConvMacroParams
ch_in: 16
ch_out: 16
stride: 1
trainables: null
- !!python/object:archai.nas.model_desc.NodeDesc
conv_params: *id002
edges:
- !!python/object:archai.nas.model_desc.EdgeDesc
input_ids:
- 2
op_desc: !!python/object:archai.nas.model_desc.OpDesc
children: null
children_ins: null
in_len: 1
name: skip_connect
params:
conv: !!python/object:archai.nas.model_desc.ConvMacroParams
ch_in: 16
ch_out: 16
stride: 1
trainables: null
- !!python/object:archai.nas.model_desc.EdgeDesc
input_ids:
- 0
op_desc: !!python/object:archai.nas.model_desc.OpDesc
children: null
children_ins: null
in_len: 1
name: skip_connect
params:
conv: !!python/object:archai.nas.model_desc.ConvMacroParams
ch_in: 16
ch_out: 16
stride: 1
trainables: null
- !!python/object:archai.nas.model_desc.NodeDesc
conv_params: *id002
edges:
- !!python/object:archai.nas.model_desc.EdgeDesc
input_ids:
- 3
op_desc: !!python/object:archai.nas.model_desc.OpDesc
children: null
children_ins: null
in_len: 1
name: skip_connect
params:
conv: !!python/object:archai.nas.model_desc.ConvMacroParams
ch_in: 16
ch_out: 16
stride: 1
trainables: null
- !!python/object:archai.nas.model_desc.EdgeDesc
input_ids:
- 2
op_desc: !!python/object:archai.nas.model_desc.OpDesc
children: null
children_ins: null
in_len: 1
name: max_pool_3x3
params:
conv: !!python/object:archai.nas.model_desc.ConvMacroParams
ch_in: 16
ch_out: 16
stride: 1
trainables: null
cell_type: *id003
conf_cell: *id004
id: 1
node_shapes:
- - 16
- -1
- -1
- -1
- - 16
- -1
- -1
- -1
- - 16
- -1
- -1
- -1
- - 16
- -1
- -1
- -1
out_shape:
- 64
- -1
- -1
- -1
post_op: !!python/object:archai.nas.model_desc.OpDesc
children: null
children_ins: null
in_len: 1
name: concate_channels
params:
conv: !!python/object:archai.nas.model_desc.ConvMacroParams
ch_in: 64
ch_out: 64
out_states: 4
trainables: null
stem_shapes:
- - 16
- -1
- -1
- -1
- - 16
- -1
- -1
- -1
stems:
- !!python/object:archai.nas.model_desc.OpDesc
children: null
children_ins: null
in_len: 1
name: prepr_normal
params:
conv: !!python/object:archai.nas.model_desc.ConvMacroParams
ch_in: 48
ch_out: 16
trainables: null
- !!python/object:archai.nas.model_desc.OpDesc
children: null
children_ins: null
in_len: 1
name: prepr_normal
params:
conv: !!python/object:archai.nas.model_desc.ConvMacroParams
ch_in: 64
ch_out: 16
trainables: null
trainables_from: 0
- !!python/object:archai.nas.model_desc.CellDesc
_nodes:
- !!python/object:archai.nas.model_desc.NodeDesc
conv_params: &id005 !!python/object:archai.nas.model_desc.ConvMacroParams
ch_in: 32
ch_out: 32
edges:
- !!python/object:archai.nas.model_desc.EdgeDesc
input_ids:
- 0
op_desc: !!python/object:archai.nas.model_desc.OpDesc
children: null
children_ins: null
in_len: 1
name: sep_conv_5x5
params:
conv: !!python/object:archai.nas.model_desc.ConvMacroParams
ch_in: 32
ch_out: 32
stride: 2
trainables: null
- !!python/object:archai.nas.model_desc.EdgeDesc
input_ids:
- 1
op_desc: !!python/object:archai.nas.model_desc.OpDesc
children: null
children_ins: null
in_len: 1
name: max_pool_3x3
params:
conv: !!python/object:archai.nas.model_desc.ConvMacroParams
ch_in: 32
ch_out: 32
stride: 2
trainables: null
- !!python/object:archai.nas.model_desc.NodeDesc
conv_params: *id005
edges:
- !!python/object:archai.nas.model_desc.EdgeDesc
input_ids:
- 0
op_desc: !!python/object:archai.nas.model_desc.OpDesc
children: null
children_ins: null
in_len: 1
name: avg_pool_3x3
params:
conv: !!python/object:archai.nas.model_desc.ConvMacroParams
ch_in: 32
ch_out: 32
stride: 2
trainables: null
- !!python/object:archai.nas.model_desc.EdgeDesc
input_ids:
- 2
op_desc: !!python/object:archai.nas.model_desc.OpDesc
children: null
children_ins: null
in_len: 1
name: dil_conv_5x5
params:
conv: !!python/object:archai.nas.model_desc.ConvMacroParams
ch_in: 32
ch_out: 32
stride: 1
trainables: null
- !!python/object:archai.nas.model_desc.NodeDesc
conv_params: *id005
edges:
- !!python/object:archai.nas.model_desc.EdgeDesc
input_ids:
- 1
op_desc: !!python/object:archai.nas.model_desc.OpDesc
children: null
children_ins: null
in_len: 1
name: max_pool_3x3
params:
conv: !!python/object:archai.nas.model_desc.ConvMacroParams
ch_in: 32
ch_out: 32
stride: 2
trainables: null
- !!python/object:archai.nas.model_desc.EdgeDesc
input_ids:
- 0
op_desc: !!python/object:archai.nas.model_desc.OpDesc
children: null
children_ins: null
in_len: 1
name: max_pool_3x3
params:
conv: !!python/object:archai.nas.model_desc.ConvMacroParams
ch_in: 32
ch_out: 32
stride: 2
trainables: null
- !!python/object:archai.nas.model_desc.NodeDesc
conv_params: *id005
edges:
- !!python/object:archai.nas.model_desc.EdgeDesc
input_ids:
- 0
op_desc: !!python/object:archai.nas.model_desc.OpDesc
children: null
children_ins: null
in_len: 1
name: max_pool_3x3
params:
conv: !!python/object:archai.nas.model_desc.ConvMacroParams
ch_in: 32
ch_out: 32
stride: 2
trainables: null
- !!python/object:archai.nas.model_desc.EdgeDesc
input_ids:
- 4
op_desc: !!python/object:archai.nas.model_desc.OpDesc
children: null
children_ins: null
in_len: 1
name: dil_conv_5x5
params:
conv: !!python/object:archai.nas.model_desc.ConvMacroParams
ch_in: 32
ch_out: 32
stride: 1
trainables: null
cell_type: &id009 !!python/object/apply:archai.nas.model_desc.CellType
- reduction
conf_cell: *id004
id: 2
node_shapes:
- - 32
- -1
- -1
- -1
- - 32
- -1
- -1
- -1
- - 32
- -1
- -1
- -1
- - 32
- -1
- -1
- -1
out_shape:
- 128
- -1
- -1
- -1
post_op: !!python/object:archai.nas.model_desc.OpDesc
children: null
children_ins: null
in_len: 1
name: concate_channels
params:
conv: !!python/object:archai.nas.model_desc.ConvMacroParams
ch_in: 128
ch_out: 128
out_states: 4
trainables: null
stem_shapes:
- - 32
- -1
- -1
- -1
- - 32
- -1
- -1
- -1
stems:
- !!python/object:archai.nas.model_desc.OpDesc
children: null
children_ins: null
in_len: 1
name: prepr_normal
params:
conv: !!python/object:archai.nas.model_desc.ConvMacroParams
ch_in: 64
ch_out: 32
trainables: null
- !!python/object:archai.nas.model_desc.OpDesc
children: null
children_ins: null
in_len: 1
name: prepr_normal
params:
conv: !!python/object:archai.nas.model_desc.ConvMacroParams
ch_in: 64
ch_out: 32
trainables: null
trainables_from: 2
- !!python/object:archai.nas.model_desc.CellDesc
_nodes:
- !!python/object:archai.nas.model_desc.NodeDesc
conv_params: &id006 !!python/object:archai.nas.model_desc.ConvMacroParams
ch_in: 32
ch_out: 32
edges:
- !!python/object:archai.nas.model_desc.EdgeDesc
input_ids:
- 0
op_desc: !!python/object:archai.nas.model_desc.OpDesc
children: null
children_ins: null
in_len: 1
name: sep_conv_3x3
params:
conv: !!python/object:archai.nas.model_desc.ConvMacroParams
ch_in: 32
ch_out: 32
stride: 1
trainables: null
- !!python/object:archai.nas.model_desc.EdgeDesc
input_ids:
- 1
op_desc: !!python/object:archai.nas.model_desc.OpDesc
children: null
children_ins: null
in_len: 1
name: sep_conv_3x3
params:
conv: !!python/object:archai.nas.model_desc.ConvMacroParams
ch_in: 32
ch_out: 32
stride: 1
trainables: null
- !!python/object:archai.nas.model_desc.NodeDesc
conv_params: *id006
edges:
- !!python/object:archai.nas.model_desc.EdgeDesc
input_ids:
- 0
op_desc: !!python/object:archai.nas.model_desc.OpDesc
children: null
children_ins: null
in_len: 1
name: sep_conv_3x3
params:
conv: !!python/object:archai.nas.model_desc.ConvMacroParams
ch_in: 32
ch_out: 32
stride: 1
trainables: null
- !!python/object:archai.nas.model_desc.EdgeDesc
input_ids:
- 2
op_desc: !!python/object:archai.nas.model_desc.OpDesc
children: null
children_ins: null
in_len: 1
name: skip_connect
params:
conv: !!python/object:archai.nas.model_desc.ConvMacroParams
ch_in: 32
ch_out: 32
stride: 1
trainables: null
- !!python/object:archai.nas.model_desc.NodeDesc
conv_params: *id006
edges:
- !!python/object:archai.nas.model_desc.EdgeDesc
input_ids:
- 2
op_desc: !!python/object:archai.nas.model_desc.OpDesc
children: null
children_ins: null
in_len: 1
name: skip_connect
params:
conv: !!python/object:archai.nas.model_desc.ConvMacroParams
ch_in: 32
ch_out: 32
stride: 1
trainables: null
- !!python/object:archai.nas.model_desc.EdgeDesc
input_ids:
- 0
op_desc: !!python/object:archai.nas.model_desc.OpDesc
children: null
children_ins: null
in_len: 1
name: skip_connect
params:
conv: !!python/object:archai.nas.model_desc.ConvMacroParams
ch_in: 32
ch_out: 32
stride: 1
trainables: null
- !!python/object:archai.nas.model_desc.NodeDesc
conv_params: *id006
edges:
- !!python/object:archai.nas.model_desc.EdgeDesc
input_ids:
- 3
op_desc: !!python/object:archai.nas.model_desc.OpDesc
children: null
children_ins: null
in_len: 1
name: skip_connect
params:
conv: !!python/object:archai.nas.model_desc.ConvMacroParams
ch_in: 32
ch_out: 32
stride: 1
trainables: null
- !!python/object:archai.nas.model_desc.EdgeDesc
input_ids:
- 2
op_desc: !!python/object:archai.nas.model_desc.OpDesc
children: null
children_ins: null
in_len: 1
name: max_pool_3x3
params:
conv: !!python/object:archai.nas.model_desc.ConvMacroParams
ch_in: 32
ch_out: 32
stride: 1
trainables: null
cell_type: *id003
conf_cell: *id004
id: 3
node_shapes:
- - 32
- -1
- -1
- -1
- - 32
- -1
- -1
- -1
- - 32
- -1
- -1
- -1
- - 32
- -1
- -1
- -1
out_shape:
- 128
- -1
- -1
- -1
post_op: !!python/object:archai.nas.model_desc.OpDesc
children: null
children_ins: null
in_len: 1
name: concate_channels
params:
conv: !!python/object:archai.nas.model_desc.ConvMacroParams
ch_in: 128
ch_out: 128
out_states: 4
trainables: null
stem_shapes:
- - 32
- -1
- -1
- -1
- - 32
- -1
- -1
- -1
stems:
- !!python/object:archai.nas.model_desc.OpDesc
children: null
children_ins: null
in_len: 1
name: prepr_reduce
params:
conv: !!python/object:archai.nas.model_desc.ConvMacroParams
ch_in: 64
ch_out: 32
trainables: null
- !!python/object:archai.nas.model_desc.OpDesc
children: null
children_ins: null
in_len: 1
name: prepr_normal
params:
conv: !!python/object:archai.nas.model_desc.ConvMacroParams
ch_in: 128
ch_out: 32
trainables: null
trainables_from: 0
- !!python/object:archai.nas.model_desc.CellDesc
_nodes:
- !!python/object:archai.nas.model_desc.NodeDesc
conv_params: &id007 !!python/object:archai.nas.model_desc.ConvMacroParams
ch_in: 32
ch_out: 32
edges:
- !!python/object:archai.nas.model_desc.EdgeDesc
input_ids:
- 0
op_desc: !!python/object:archai.nas.model_desc.OpDesc
children: null
children_ins: null
in_len: 1
name: sep_conv_3x3
params:
conv: !!python/object:archai.nas.model_desc.ConvMacroParams
ch_in: 32
ch_out: 32
stride: 1
trainables: null
- !!python/object:archai.nas.model_desc.EdgeDesc
input_ids:
- 1
op_desc: !!python/object:archai.nas.model_desc.OpDesc
children: null
children_ins: null
in_len: 1
name: sep_conv_3x3
params:
conv: !!python/object:archai.nas.model_desc.ConvMacroParams
ch_in: 32
ch_out: 32
stride: 1
trainables: null
- !!python/object:archai.nas.model_desc.NodeDesc
conv_params: *id007
edges:
- !!python/object:archai.nas.model_desc.EdgeDesc
input_ids:
- 0
op_desc: !!python/object:archai.nas.model_desc.OpDesc
children: null
children_ins: null
in_len: 1
name: sep_conv_3x3
params:
conv: !!python/object:archai.nas.model_desc.ConvMacroParams
ch_in: 32
ch_out: 32
stride: 1
trainables: null
- !!python/object:archai.nas.model_desc.EdgeDesc
input_ids:
- 2
op_desc: !!python/object:archai.nas.model_desc.OpDesc
children: null
children_ins: null
in_len: 1
name: skip_connect
params:
conv: !!python/object:archai.nas.model_desc.ConvMacroParams
ch_in: 32
ch_out: 32
stride: 1
trainables: null
- !!python/object:archai.nas.model_desc.NodeDesc
conv_params: *id007
edges:
- !!python/object:archai.nas.model_desc.EdgeDesc
input_ids:
- 2
op_desc: !!python/object:archai.nas.model_desc.OpDesc
children: null
children_ins: null
in_len: 1
name: skip_connect
params:
conv: !!python/object:archai.nas.model_desc.ConvMacroParams
ch_in: 32
ch_out: 32
stride: 1
trainables: null
- !!python/object:archai.nas.model_desc.EdgeDesc
input_ids:
- 0
op_desc: !!python/object:archai.nas.model_desc.OpDesc
children: null
children_ins: null
in_len: 1
name: skip_connect
params:
conv: !!python/object:archai.nas.model_desc.ConvMacroParams
ch_in: 32
ch_out: 32
stride: 1
trainables: null
- !!python/object:archai.nas.model_desc.NodeDesc
conv_params: *id007
edges:
- !!python/object:archai.nas.model_desc.EdgeDesc
input_ids:
- 3
op_desc: !!python/object:archai.nas.model_desc.OpDesc
children: null
children_ins: null
in_len: 1
name: skip_connect
params:
conv: !!python/object:archai.nas.model_desc.ConvMacroParams
ch_in: 32
ch_out: 32
stride: 1
trainables: null
- !!python/object:archai.nas.model_desc.EdgeDesc
input_ids:
- 2
op_desc: !!python/object:archai.nas.model_desc.OpDesc
children: null
children_ins: null
in_len: 1
name: max_pool_3x3
params:
conv: !!python/object:archai.nas.model_desc.ConvMacroParams
ch_in: 32
ch_out: 32
stride: 1
trainables: null
cell_type: *id003
conf_cell: *id004
id: 4
node_shapes:
- - 32
- -1
- -1
- -1
- - 32
- -1
- -1
- -1
- - 32
- -1
- -1
- -1
- - 32
- -1
- -1
- -1
out_shape:
- 128
- -1
- -1
- -1
post_op: !!python/object:archai.nas.model_desc.OpDesc
children: null
children_ins: null
in_len: 1
name: concate_channels
params:
conv: !!python/object:archai.nas.model_desc.ConvMacroParams
ch_in: 128
ch_out: 128
out_states: 4
trainables: null
stem_shapes:
- - 32
- -1
- -1
- -1
- - 32
- -1
- -1
- -1
stems:
- !!python/object:archai.nas.model_desc.OpDesc
children: null
children_ins: null
in_len: 1
name: prepr_normal
params:
conv: !!python/object:archai.nas.model_desc.ConvMacroParams
ch_in: 128
ch_out: 32
trainables: null
- !!python/object:archai.nas.model_desc.OpDesc
children: null
children_ins: null
in_len: 1
name: prepr_normal
params:
conv: !!python/object:archai.nas.model_desc.ConvMacroParams
ch_in: 128
ch_out: 32
trainables: null
trainables_from: 0
- !!python/object:archai.nas.model_desc.CellDesc
_nodes:
- !!python/object:archai.nas.model_desc.NodeDesc
conv_params: &id008 !!python/object:archai.nas.model_desc.ConvMacroParams
ch_in: 64
ch_out: 64
edges:
- !!python/object:archai.nas.model_desc.EdgeDesc
input_ids:
- 0
op_desc: !!python/object:archai.nas.model_desc.OpDesc
children: null
children_ins: null
in_len: 1
name: sep_conv_5x5
params:
conv: !!python/object:archai.nas.model_desc.ConvMacroParams
ch_in: 64
ch_out: 64
stride: 2
trainables: null
- !!python/object:archai.nas.model_desc.EdgeDesc
input_ids:
- 1
op_desc: !!python/object:archai.nas.model_desc.OpDesc
children: null
children_ins: null
in_len: 1
name: max_pool_3x3
params:
conv: !!python/object:archai.nas.model_desc.ConvMacroParams
ch_in: 64
ch_out: 64
stride: 2
trainables: null
- !!python/object:archai.nas.model_desc.NodeDesc
conv_params: *id008
edges:
- !!python/object:archai.nas.model_desc.EdgeDesc
input_ids:
- 0
op_desc: !!python/object:archai.nas.model_desc.OpDesc
children: null
children_ins: null
in_len: 1
name: avg_pool_3x3
params:
conv: !!python/object:archai.nas.model_desc.ConvMacroParams
ch_in: 64
ch_out: 64
stride: 2
trainables: null
- !!python/object:archai.nas.model_desc.EdgeDesc
input_ids:
- 2
op_desc: !!python/object:archai.nas.model_desc.OpDesc
children: null
children_ins: null
in_len: 1
name: dil_conv_5x5
params:
conv: !!python/object:archai.nas.model_desc.ConvMacroParams
ch_in: 64
ch_out: 64
stride: 1
trainables: null
- !!python/object:archai.nas.model_desc.NodeDesc
conv_params: *id008
edges:
- !!python/object:archai.nas.model_desc.EdgeDesc
input_ids:
- 1
op_desc: !!python/object:archai.nas.model_desc.OpDesc
children: null
children_ins: null
in_len: 1
name: max_pool_3x3
params:
conv: !!python/object:archai.nas.model_desc.ConvMacroParams
ch_in: 64
ch_out: 64
stride: 2
trainables: null
- !!python/object:archai.nas.model_desc.EdgeDesc
input_ids:
- 0
op_desc: !!python/object:archai.nas.model_desc.OpDesc
children: null
children_ins: null
in_len: 1
name: max_pool_3x3
params:
conv: !!python/object:archai.nas.model_desc.ConvMacroParams
ch_in: 64
ch_out: 64
stride: 2
trainables: null
- !!python/object:archai.nas.model_desc.NodeDesc
conv_params: *id008
edges:
- !!python/object:archai.nas.model_desc.EdgeDesc
input_ids:
- 0
op_desc: !!python/object:archai.nas.model_desc.OpDesc
children: null
children_ins: null
in_len: 1
name: max_pool_3x3
params:
conv: !!python/object:archai.nas.model_desc.ConvMacroParams
ch_in: 64
ch_out: 64
stride: 2
trainables: null
- !!python/object:archai.nas.model_desc.EdgeDesc
input_ids:
- 4
op_desc: !!python/object:archai.nas.model_desc.OpDesc
children: null
children_ins: null
in_len: 1
name: dil_conv_5x5
params:
conv: !!python/object:archai.nas.model_desc.ConvMacroParams
ch_in: 64
ch_out: 64
stride: 1
trainables: null
cell_type: *id009
conf_cell: *id004
id: 5
node_shapes:
- - 64
- -1
- -1
- -1
- - 64
- -1
- -1
- -1
- - 64
- -1
- -1
- -1
- - 64
- -1
- -1
- -1
out_shape:
- 256
- -1
- -1
- -1
post_op: !!python/object:archai.nas.model_desc.OpDesc
children: null
children_ins: null
in_len: 1
name: concate_channels
params:
conv: !!python/object:archai.nas.model_desc.ConvMacroParams
ch_in: 256
ch_out: 256
out_states: 4
trainables: null
stem_shapes:
- - 64
- -1
- -1
- -1
- - 64
- -1
- -1
- -1
stems:
- !!python/object:archai.nas.model_desc.OpDesc
children: null
children_ins: null
in_len: 1
name: prepr_normal
params:
conv: !!python/object:archai.nas.model_desc.ConvMacroParams
ch_in: 128
ch_out: 64
trainables: null
- !!python/object:archai.nas.model_desc.OpDesc
children: null
children_ins: null
in_len: 1
name: prepr_normal
params:
conv: !!python/object:archai.nas.model_desc.ConvMacroParams
ch_in: 128
ch_out: 64
trainables: null
trainables_from: 2
- !!python/object:archai.nas.model_desc.CellDesc
_nodes:
- !!python/object:archai.nas.model_desc.NodeDesc
conv_params: &id010 !!python/object:archai.nas.model_desc.ConvMacroParams
ch_in: 64
ch_out: 64
edges:
- !!python/object:archai.nas.model_desc.EdgeDesc
input_ids:
- 0
op_desc: !!python/object:archai.nas.model_desc.OpDesc
children: null
children_ins: null
in_len: 1
name: sep_conv_3x3
params:
conv: !!python/object:archai.nas.model_desc.ConvMacroParams
ch_in: 64
ch_out: 64
stride: 1
trainables: null
- !!python/object:archai.nas.model_desc.EdgeDesc
input_ids:
- 1
op_desc: !!python/object:archai.nas.model_desc.OpDesc
children: null
children_ins: null
in_len: 1
name: sep_conv_3x3
params:
conv: !!python/object:archai.nas.model_desc.ConvMacroParams
ch_in: 64
ch_out: 64
stride: 1
trainables: null
- !!python/object:archai.nas.model_desc.NodeDesc
conv_params: *id010
edges:
- !!python/object:archai.nas.model_desc.EdgeDesc
input_ids:
- 0
op_desc: !!python/object:archai.nas.model_desc.OpDesc
children: null
children_ins: null
in_len: 1
name: sep_conv_3x3
params:
conv: !!python/object:archai.nas.model_desc.ConvMacroParams
ch_in: 64
ch_out: 64
stride: 1
trainables: null
- !!python/object:archai.nas.model_desc.EdgeDesc
input_ids:
- 2
op_desc: !!python/object:archai.nas.model_desc.OpDesc
children: null
children_ins: null
in_len: 1
name: skip_connect
params:
conv: !!python/object:archai.nas.model_desc.ConvMacroParams
ch_in: 64
ch_out: 64
stride: 1
trainables: null
- !!python/object:archai.nas.model_desc.NodeDesc
conv_params: *id010
edges:
- !!python/object:archai.nas.model_desc.EdgeDesc
input_ids:
- 2
op_desc: !!python/object:archai.nas.model_desc.OpDesc
children: null
children_ins: null
in_len: 1
name: skip_connect
params:
conv: !!python/object:archai.nas.model_desc.ConvMacroParams
ch_in: 64
ch_out: 64
stride: 1
trainables: null
- !!python/object:archai.nas.model_desc.EdgeDesc
input_ids:
- 0
op_desc: !!python/object:archai.nas.model_desc.OpDesc
children: null
children_ins: null
in_len: 1
name: skip_connect
params:
conv: !!python/object:archai.nas.model_desc.ConvMacroParams
ch_in: 64
ch_out: 64
stride: 1
trainables: null
- !!python/object:archai.nas.model_desc.NodeDesc
conv_params: *id010
edges:
- !!python/object:archai.nas.model_desc.EdgeDesc
input_ids:
- 3
op_desc: !!python/object:archai.nas.model_desc.OpDesc
children: null
children_ins: null
in_len: 1
name: skip_connect
params:
conv: !!python/object:archai.nas.model_desc.ConvMacroParams
ch_in: 64
ch_out: 64
stride: 1
trainables: null
- !!python/object:archai.nas.model_desc.EdgeDesc
input_ids:
- 2
op_desc: !!python/object:archai.nas.model_desc.OpDesc
children: null
children_ins: null
in_len: 1
name: max_pool_3x3
params:
conv: !!python/object:archai.nas.model_desc.ConvMacroParams
ch_in: 64
ch_out: 64
stride: 1
trainables: null
cell_type: *id003
conf_cell: *id004
id: 6
node_shapes:
- - 64
- -1
- -1
- -1
- - 64
- -1
- -1
- -1
- - 64
- -1
- -1
- -1
- - 64
- -1
- -1
- -1
out_shape:
- 256
- -1
- -1
- -1
post_op: !!python/object:archai.nas.model_desc.OpDesc
children: null
children_ins: null
in_len: 1
name: concate_channels
params:
conv: !!python/object:archai.nas.model_desc.ConvMacroParams
ch_in: 256
ch_out: 256
out_states: 4
trainables: null
stem_shapes:
- - 64
- -1
- -1
- -1
- - 64
- -1
- -1
- -1
stems:
- !!python/object:archai.nas.model_desc.OpDesc
children: null
children_ins: null
in_len: 1
name: prepr_reduce
params:
conv: !!python/object:archai.nas.model_desc.ConvMacroParams
ch_in: 128
ch_out: 64
trainables: null
- !!python/object:archai.nas.model_desc.OpDesc
children: null
children_ins: null
in_len: 1
name: prepr_normal
params:
conv: !!python/object:archai.nas.model_desc.ConvMacroParams
ch_in: 256
ch_out: 64
trainables: null
trainables_from: 0
- !!python/object:archai.nas.model_desc.CellDesc
_nodes:
- !!python/object:archai.nas.model_desc.NodeDesc
conv_params: &id011 !!python/object:archai.nas.model_desc.ConvMacroParams
ch_in: 64
ch_out: 64
edges:
- !!python/object:archai.nas.model_desc.EdgeDesc
input_ids:
- 0
op_desc: !!python/object:archai.nas.model_desc.OpDesc
children: null
children_ins: null
in_len: 1
name: sep_conv_3x3
params:
conv: !!python/object:archai.nas.model_desc.ConvMacroParams
ch_in: 64
ch_out: 64
stride: 1
trainables: null
- !!python/object:archai.nas.model_desc.EdgeDesc
input_ids:
- 1
op_desc: !!python/object:archai.nas.model_desc.OpDesc
children: null
children_ins: null
in_len: 1
name: sep_conv_3x3
params:
conv: !!python/object:archai.nas.model_desc.ConvMacroParams
ch_in: 64
ch_out: 64
stride: 1
trainables: null
- !!python/object:archai.nas.model_desc.NodeDesc
conv_params: *id011
edges:
- !!python/object:archai.nas.model_desc.EdgeDesc
input_ids:
- 0
op_desc: !!python/object:archai.nas.model_desc.OpDesc
children: null
children_ins: null
in_len: 1
name: sep_conv_3x3
params:
conv: !!python/object:archai.nas.model_desc.ConvMacroParams
ch_in: 64
ch_out: 64
stride: 1
trainables: null
- !!python/object:archai.nas.model_desc.EdgeDesc
input_ids:
- 2
op_desc: !!python/object:archai.nas.model_desc.OpDesc
children: null
children_ins: null
in_len: 1
name: skip_connect
params:
conv: !!python/object:archai.nas.model_desc.ConvMacroParams
ch_in: 64
ch_out: 64
stride: 1
trainables: null
- !!python/object:archai.nas.model_desc.NodeDesc
conv_params: *id011
edges:
- !!python/object:archai.nas.model_desc.EdgeDesc
input_ids:
- 2
op_desc: !!python/object:archai.nas.model_desc.OpDesc
children: null
children_ins: null
in_len: 1
name: skip_connect
params:
conv: !!python/object:archai.nas.model_desc.ConvMacroParams
ch_in: 64
ch_out: 64
stride: 1
trainables: null
- !!python/object:archai.nas.model_desc.EdgeDesc
input_ids:
- 0
op_desc: !!python/object:archai.nas.model_desc.OpDesc
children: null
children_ins: null
in_len: 1
name: skip_connect
params:
conv: !!python/object:archai.nas.model_desc.ConvMacroParams
ch_in: 64
ch_out: 64
stride: 1
trainables: null
- !!python/object:archai.nas.model_desc.NodeDesc
conv_params: *id011
edges:
- !!python/object:archai.nas.model_desc.EdgeDesc
input_ids:
- 3
op_desc: !!python/object:archai.nas.model_desc.OpDesc
children: null
children_ins: null
in_len: 1
name: skip_connect
params:
conv: !!python/object:archai.nas.model_desc.ConvMacroParams
ch_in: 64
ch_out: 64
stride: 1
trainables: null
- !!python/object:archai.nas.model_desc.EdgeDesc
input_ids:
- 2
op_desc: !!python/object:archai.nas.model_desc.OpDesc
children: null
children_ins: null
in_len: 1
name: max_pool_3x3
params:
conv: !!python/object:archai.nas.model_desc.ConvMacroParams
ch_in: 64
ch_out: 64
stride: 1
trainables: null
cell_type: *id003
conf_cell: *id004
id: 7
node_shapes:
- - 64
- -1
- -1
- -1
- - 64
- -1
- -1
- -1
- - 64
- -1
- -1
- -1
- - 64
- -1
- -1
- -1
out_shape:
- 256
- -1
- -1
- -1
post_op: !!python/object:archai.nas.model_desc.OpDesc
children: null
children_ins: null
in_len: 1
name: concate_channels
params:
conv: !!python/object:archai.nas.model_desc.ConvMacroParams
ch_in: 256
ch_out: 256
out_states: 4
trainables: null
stem_shapes:
- - 64
- -1
- -1
- -1
- - 64
- -1
- -1
- -1
stems:
- !!python/object:archai.nas.model_desc.OpDesc
children: null
children_ins: null
in_len: 1
name: prepr_normal
params:
conv: !!python/object:archai.nas.model_desc.ConvMacroParams
ch_in: 256
ch_out: 64
trainables: null
- !!python/object:archai.nas.model_desc.OpDesc
children: null
children_ins: null
in_len: 1
name: prepr_normal
params:
conv: !!python/object:archai.nas.model_desc.ConvMacroParams
ch_in: 256
ch_out: 64
trainables: null
trainables_from: 0
aux_tower_descs:
- null
- null
- null
- null
- null
- null
- null
- null
conf_model_desc: !!python/object:archai.common.config.Config
args: null
config_filepath: null
data:
aux_tower_stride: 3
aux_weight: 0.0
cell: *id004
dataset: !!python/object:archai.common.config.Config
args: null
config_filepath: null
data:
channels: 3
dataroot: ~/dataroot
max_batches: -1
n_classes: 10
name: cifar10
storage_name: cifar-10-batches-py
extra_args: []
max_final_edges: 2
model_post_op: pool_adaptive_avg2d
model_stems: !!python/object:archai.common.config.Config
args: null
config_filepath: null
data:
init_node_ch: 16
ops:
- stem_conv3x3
- stem_conv3x3
stem_multiplier: 3
extra_args: []
n_cells: 8
n_reductions: 2
params: !!python/object:archai.common.config.Config
args: null
config_filepath: null
data: {}
extra_args: []
extra_args: []
ds_ch: 3
logits_op: !!python/object:archai.nas.model_desc.OpDesc
children: null
children_ins: null
in_len: 1
name: linear
params:
n_ch: 256
n_classes: 10
trainables: null
max_final_edges: 2
model_stems:
- !!python/object:archai.nas.model_desc.OpDesc
children: null
children_ins: null
in_len: 1
name: stem_conv3x3
params:
conv: !!python/object:archai.nas.model_desc.ConvMacroParams
ch_in: 3
ch_out: 48
trainables: null
- !!python/object:archai.nas.model_desc.OpDesc
children: null
children_ins: null
in_len: 1
name: stem_conv3x3
params:
conv: !!python/object:archai.nas.model_desc.ConvMacroParams
ch_in: 3
ch_out: 48
trainables: null
n_classes: 10
params: {}
pool_op: !!python/object:archai.nas.model_desc.OpDesc
children: null
children_ins: null
in_len: 1
name: pool_adaptive_avg2d
params:
conv: !!python/object:archai.nas.model_desc.ConvMacroParams
ch_in: 256
ch_out: 256
trainables: null
|
archai/confs/darts_models/final_model_desc1.yaml/0
|
{
"file_path": "archai/confs/darts_models/final_model_desc1.yaml",
"repo_id": "archai",
"token_count": 21621
}
| 335 |
Cloud-Based Search
==================
This section contains information about using Archai in the cloud.
.. toctree::
:maxdepth: 2
Azure <cloud/azure>
|
archai/docs/advanced_guide/cloud.rst/0
|
{
"file_path": "archai/docs/advanced_guide/cloud.rst",
"repo_id": "archai",
"token_count": 47
}
| 336 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import json
from azure.ai.ml import command
from azure.ai.ml import Input, Output
from azure.ai.ml.entities import UserIdentityConfiguration
def make_train_model_command(output_path, code_dir, environment_name, id,
storage_account_name, storage_account_key,
archid, training_epochs, save_models):
""" This is a parametrized command for training a given model architecture.
We will stamp these out to create a distributed training pipeline, with one
command per model architecture that we need to train. """
fixed_args = f'--name "{id}" ' + \
f'--storage_account_name "{storage_account_name}" ' + \
f'--storage_account_key "{storage_account_key}" ' + \
f'--model_params "{archid}" ' + \
f'--epochs "{training_epochs}" '
if save_models:
fixed_args += '--save_models '
# training is not deterministic, so we set is_deterministic=False which disables pipeline caching.
# https://azureml-pipelines-doc-1p.azurewebsites.net/how_tos/ml_professionals/debugging/reuse_issue_debugging.html
return command(
name=f'train_{id}',
display_name=f'train {id}',
is_deterministic=False,
inputs={
"data": Input(type="uri_folder")
},
outputs={
"results": Output(type="uri_folder", path=output_path, mode="rw_mount")
},
# The source folder of the component
code=code_dir,
environment=environment_name,
identity=UserIdentityConfiguration(),
command="""python3 train.py \
--data_dir "${{inputs.data}}" \
--output "${{outputs.results}}" """ + fixed_args
)
def make_monitor_command(hex_config, code_dir, results_uri, environment_name, metrics=[], timeout=3600):
""" This command waits up to some timeout for all the given training jobs to complete
and returns the validation accuracy results """
fixed_args = f'--config "{hex_config}" ' + \
f'--timeout {timeout} ' + \
f'--metrics {",".join(metrics)}'
# this node depends on training and is therefore also not deterministic, so we set is_deterministic=False
# which disables pipeline caching.
# https://azureml-pipelines-doc-1p.azurewebsites.net/how_tos/ml_professionals/debugging/reuse_issue_debugging.html
# the --model_path input path here points to the folder that contains the "models.json" file listing
# all the training jobs we are waiting on, it uses this to look up the status of those training jobs
# in the ArchaiStore (specified by the --config option). It also writes an updated "models.json"
# with the final validation accuracies reported by the training to the output results folder.
return command(
name="wait",
display_name="Wait for training to complete",
description="Waits for all distributed training jobs to complete.",
is_deterministic=False,
inputs={
"models": Input(type="uri_folder"),
"training_results": Input(type="uri_folder")
},
outputs={
"results": Output(type="uri_file", path=results_uri, mode="rw_mount")
},
code=code_dir,
identity=UserIdentityConfiguration(),
command="""python3 monitor.py \
--model_path "${{inputs.models}}" \
--output "${{outputs.results}}" """ + fixed_args,
environment=environment_name,
)
def make_training_pipeline_command(description, hex_config, code_dir, compute_cluster_name,
datastore_uri, results_uri, experiment_name,
environment_name, training_epochs, save_models):
""" This command create an entirely new Azure ML pipeline for each parallel training run
where it trains each of the models found in the "models.json" file in the given input
models folder. The training_pipeline.py script uses the above make_train_model_command
for each model found. """
fixed_args = f'--config "{hex_config}" ' + \
f'--description "{description}" ' + \
f'--compute_cluster_name "{compute_cluster_name}" ' + \
f'--experiment_name "{experiment_name}" ' + \
f'--environment_name "{environment_name}" ' + \
f'--datastore_uri "{datastore_uri}" ' + \
f'--results_uri "{results_uri}" ' + \
f'--epochs "{training_epochs}" '
if save_models:
fixed_args += '--save_models '
# training is not deterministic, so we set is_deterministic=False which disables pipeline caching.
# https://azureml-pipelines-doc-1p.azurewebsites.net/how_tos/ml_professionals/debugging/reuse_issue_debugging.html
return command(
name="training",
display_name=f'Start {description}',
description="Starts a separate pipeline to do parallel partial training of a given set of models.",
is_deterministic=False,
inputs={
"models": Input(type="uri_folder", path=results_uri),
"data": Input(type="uri_folder")
},
outputs={
"results": Output(type="uri_folder", path=results_uri, mode="rw_mount")
},
code=code_dir,
identity=UserIdentityConfiguration(),
command="""python3 training_pipeline.py \
--output_path "${{outputs.results}}" \
--models_path "${{inputs.models}}" """ + fixed_args,
environment=environment_name,
)
|
archai/docs/advanced_guide/cloud/azure/notebooks/multi_node_search/scripts/commands.py/0
|
{
"file_path": "archai/docs/advanced_guide/cloud/azure/notebooks/multi_node_search/scripts/commands.py",
"repo_id": "archai",
"token_count": 2271
}
| 337 |
display_name: Train a Pareto architecture from Transformer-Flex
type: command
compute: nas-gpu-cluster-NC6
inputs:
arch_config_path:
type: uri_file
path: azureml://full/path/to/architecture/configuration/file
outputs:
output_dir:
type: uri_folder
code: .
environment:
azureml:aml-archai:0.0.1
command: >-
python train.py
${{inputs.arch_config_path}}
--output_dir ${{outputs.output_dir}}
|
archai/docs/advanced_guide/cloud/azure/notebooks/text_generation/src/train.yaml/0
|
{
"file_path": "archai/docs/advanced_guide/cloud/azure/notebooks/text_generation/src/train.yaml",
"repo_id": "archai",
"token_count": 161
}
| 338 |
Installation
============
There are various methods to install Archai, but it is recommended to use it within a virtual environment, such as ``conda`` or ``pyenv``. This ensures that the software runs in a consistent and isolated environment, and allows for easy management of installed packages and dependencies.
.. attention::
Archai requires `Python <http://python.org>`_ 3.7+ and `PyTorch <https://pytorch.org>`_ 1.7.0+.
PyPI
----
PyPI provides a convenient way to install Python packages, as it allows users to easily search for and download packages, as well as automatically handle dependencies and other installation requirements. This is especially useful for larger Python projects that require multiple packages to be installed and managed.
.. code-block:: sh
pip install archai
The default installation only includes the core functionality of Archai, e.g., NAS-related packages. To install additional functionalities, use the following commands:
* Computer Vision: ``pip install archai[cv]``.
* Natural Language Processing: ``pip install archai[nlp]``.
* DeepSpeed: ``pip install archai[deepspeed]``.
* Flash-Attention: ``pip install archai[flash-attn]``.
* Documentation and Notebooks: ``pip install archai[docs]``.
* Unit Tests: ``pip install archai[tests]``.
* Development: ``pip install archai[dev]``.
Docker
------
Docker is a useful tool for running experiments because it provides a consistent, isolated environment for the experiment to run in. This ensures that the results of the experiment are not affected by external factors, such as the specific operating system or installed packages on the host machine.
The :github:`docker/Dockerfile` provides a development environment to run experiments. Additionally, :github:`docker/build_image.sh` and :github:`docker/run_container.sh` provide scripts to build the image and run the container, respectively:
.. code-block:: sh
cd docker
bash build_image.sh
bash run_container.sh
|
archai/docs/getting_started/installation.rst/0
|
{
"file_path": "archai/docs/getting_started/installation.rst",
"repo_id": "archai",
"token_count": 499
}
| 339 |
from torch import nn
class MyModel(nn.Module):
def __init__(self, nb_layers: int = 5, kernel_size: int = 3, hidden_dim: int = 32):
super().__init__()
self.nb_layers = nb_layers
self.kernel_size = kernel_size
self.hidden_dim = hidden_dim
layer_list = []
for i in range(nb_layers):
in_ch = (1 if i == 0 else hidden_dim)
layer_list += [
nn.Conv2d(in_ch, hidden_dim, kernel_size=kernel_size, padding=(kernel_size-1)//2),
nn.BatchNorm2d(hidden_dim),
nn.ReLU(),
]
layer_list += [
nn.AdaptiveAvgPool2d(output_size=(1, 1)),
nn.Conv2d(hidden_dim, 10, kernel_size=1)
]
self.model = nn.Sequential(*layer_list)
def forward(self, x):
return self.model(x).squeeze()
def get_archid(self):
return f'({self.nb_layers}, {self.kernel_size}, {self.hidden_dim})'
|
archai/docs/getting_started/notebooks/discrete_search/model.py/0
|
{
"file_path": "archai/docs/getting_started/notebooks/discrete_search/model.py",
"repo_id": "archai",
"token_count": 492
}
| 340 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.