text
stringlengths 5
22M
| id
stringlengths 12
177
| metadata
dict | __index_level_0__
int64 0
1.37k
|
---|---|---|---|
# def propara_executor(state, action):
import jsonlines
from tqdm import tqdm
from random import choices
import argparse
import multiprocessing
from multiprocessing import Pool
parser = argparse.ArgumentParser()
parser.add_argument("--dataset_prefix", type=str, default='propara', help="dataset prefix")
# parser.add_argument("--max_number", type=int, default=10000, help="max number each dataset.")
parser.add_argument("--corpus_file", type=str, default='../corpus/pretraining_corpus_propara.txt', help="corpus file")
args = parser.parse_args()
fw = open(args.corpus_file, 'w')
def random_sampling(candidate_list, n, weights=None):
result_list = []
for _ in range(n):
result = choices(candidate_list, k=1, weights=weights)[0]
result_list.append(result)
return result_list
def propara_state_generator(candidate_list):
# random get a state
item = random_sampling(candidate_list, 1)[0]
participants = item['participants']
states = random_sampling(item['states_token'], len(participants))
states = {'participants':participants, 'states':states}
return states
def propara_action_generator(states, states_tokens):
# random get an action
action = ''
func_list = ['Move', 'Create', 'Destroy']
while True:
try:
func = random_sampling(func_list,1)[0]
if func == 'Create':
available_participants = [p for p, s in zip(states['participants'],states['states']) if s == '-']
p = random_sampling(available_participants, 1)[0]
x = random_sampling(states_tokens, 1)[0]
if x not in ['-', '?']:
action = {'func': func, 'participant':p, 'para1':x}
else:
action = {'func': func, 'participant':p}
elif func == 'Destroy':
available_participants = [p for p, s in zip(states['participants'],states['states']) if s != '-']
p = random_sampling(available_participants, 1)[0]
action = {'func': func, 'participant':p}
elif func == 'Move':
available_participants_states = [(p,s) for p, s in zip(states['participants'],states['states']) if s != '-']
p, x1 = random_sampling(available_participants_states, 1)[0]
x2 = random_sampling([item for item in states_tokens if item != x1 and item != '-'], 1)[0]
action = {'func': func, 'participant':p, 'para1':x1, 'para2':x2}
break
except:
continue
return action
def propara_exeutor(states, action):
result = dict()
if action['func'] == 'Create':
if 'para1' not in action.keys():
result['participants'] = states['participants']
result['states'] = ['?' if p==action['participant'] else s for p, s in zip(states['participants'], states['states'])]
else:
result['participants'] = states['participants']
result['states'] = [action['para1'] if p==action['participant'] else s for p, s in zip(states['participants'], states['states'])]
elif action['func'] == 'Destroy':
result['participants'] = states['participants']
result['states'] = ['-' if p==action['participant'] else s for p, s in zip(states['participants'], states['states'])]
elif action['func'] == 'Move':
result['participants'] = states['participants']
result['states'] = [action['para2'] if p==action['participant'] and s==action['para1'] else s for p, s in zip(states['participants'], states['states'])]
return result
def states_linearize_ori(states):
return 'col : ' + ' | '.join(states['participants']) + ' ' + 'state : ' + ' | '.join(states['states'])
def states_linearize_tgt(states):
return 'state : ' + ' | '.join(states['states'])
def action_linearize(action):
if action['func'] == 'Create':
if 'para1' in action.keys():
result = ' '.join([action['func'], action['participant'], action['para1']])
else:
result = ' '.join([action['func'], action['participant']])
elif action['func'] == 'Destroy':
result = ' '.join([action['func'], action['participant']])
elif action['func'] == 'Move':
result = ' '.join([action['func'], action['participant'], 'from', action['para1'], 'to', action['para2']])
return result
def corpus_generation(inputs):
candidate_list, max_step, total_number = inputs
count = 0
while True:
states = propara_state_generator(candidate_list)
action_list = []
for _ in range(20):
action = propara_action_generator(states, states_tokens)
if action['participant'] not in [item['participant'] for item in action_list]:
action_list.append(action)
if len(action_list) >= max_step:
break
if len(action_list) == max_step:
states_temp = states
for action in action_list:
states_temp = propara_exeutor(states_temp, action)
final_states = states_temp
initial_states = states_linearize_ori(states)
final_states = states_linearize_tgt(final_states)
final_action = ' , '.join([action_linearize(item) for item in action_list]).lower()
item_row = '\t'.join([final_action.strip(), initial_states, final_states]).lower()
fw.write(item_row)
fw.write('\n')
count += 1
if count % 10000 == 0:
print('Finish generating {} cases'.format(count))
if count >= total_number:
break
if __name__ == '__main__':
if args.dataset_prefix == 'propara':
data_lines = list(jsonlines.open('./grids.v1.train.json', 'r'))
candidate_list = []
for index in tqdm(range(len(data_lines))):
line = data_lines[index]
id = line['para_id']
participants = line['participants']
states = line['states']
states_tokens = [states[i][j] for i in range(len(states)) for j in range(len(states[0]))]
candidate = {'participants':participants, 'states_token':states_tokens}
candidate_list.append(candidate)
cores = multiprocessing.cpu_count()
print("Using {} cores".format(cores))
pool = Pool(cores)
max_number_list = [200000,300000,300000,300000,150000,75000,25000,10000] # 100W
for i in range(1,9):
res = pool.map(corpus_generation, zip([candidate_list]*cores, [i]*cores, [int(max_number_list[i-1] // cores)]*cores))
pool.close()
pool.join()
|
ContextualSP/lemon/corpus_generation/propara_corpus_generation.py/0
|
{
"file_path": "ContextualSP/lemon/corpus_generation/propara_corpus_generation.py",
"repo_id": "ContextualSP",
"token_count": 2952
}
| 234 |
from abc import abstractproperty, ABCMeta, abstractmethod
import tensorflow as tf
from keras.layers import Dense, LSTM
from gtd.ml.framework import Feedable, Model
from gtd.ml.seq_batch import FeedSequenceBatch, embed, reduce_mean, SequenceBatch, reduce_sum, weighted_sum, reduce_max
from gtd.ml.vocab import Vocab
class Embedder(Feedable, metaclass=ABCMeta):
"""A map from objects to embeddings."""
@abstractproperty
def embeds(self):
"""A Tensor of shape [vocab_size, :, ..., :]."""
pass
@property
def embed_dim(self):
return self.embeds.get_shape().as_list()[1]
class TokenEmbedder(Embedder):
"""An embedding model for simple token-like objects (such as words).
The embedding matrix is a TensorFlow Variable, with one row for each token.
"""
def __init__(self, simple_embeddings, var_name, trainable=True):
"""Create VariableEmbeddings.
Args:
simple_embeddings (SimpleEmbeddings): a gtd.vocab.SimpleEmbeddings object
var_name (str): name for the Variable
trainable (bool): whether the embedding matrix is trainable or not
"""
vocab = simple_embeddings.vocab
vocab_size = len(vocab)
embed_dim = simple_embeddings.embed_dim
embeds = tf.get_variable(var_name, shape=[vocab_size, embed_dim],
initializer=tf.constant_initializer(simple_embeddings.array), trainable=trainable)
self._embeds = embeds
self._embed_dim = embed_dim
self._vocab = vocab
@property
def vocab(self):
return self._vocab
@property
def embeds(self):
return self._embeds
@property
def embed_dim(self):
return self._embed_dim
@property
def vocab_size(self):
return len(self.vocab)
def inputs_to_feed_dict(self, *args, **kwargs):
return {}
class SequenceEmbedder(Embedder, metaclass=ABCMeta):
"""An embedding matrix for objects that can be represented as sequences (such as sentences)."""
def __init__(self, token_embeds, align='left', seq_length=None, name='SequenceEmbedder'):
"""Create a SequenceEmbeddings object.
Args:
token_embeds (Tensor): a Tensor of shape (token_vocab_size, token_dim)
align (str): see FeedSequenceBatch
seq_length (int): see FeedSequenceBatch
"""
with tf.name_scope(name):
sequence_batch = FeedSequenceBatch(align=align, seq_length=seq_length) # (sequence_vocab_size, seq_length)
embedded_sequence_batch = embed(sequence_batch, token_embeds)
embeds = self.embed_sequences(embedded_sequence_batch)
self._sequence_batch = sequence_batch
self._embedded_sequence_batch = embedded_sequence_batch
self._embeds = embeds
@abstractmethod
def embed_sequences(self, embedded_sequence_batch):
"""Convert an embedded SequenceBatch into a Tensor of sequence embeddings.
Args:
embedded_sequence_batch (gtd.ml.seq_batch.SequenceBatch): a SequenceBatch of shape
[seq_vocab_size, seq_length, token_dim]
Returns:
sequence_embeds (Tensor): of shape [seq_vocab_size, seq_dim]
"""
pass
def inputs_to_feed_dict(self, sequences, token_vocab):
"""Feed sequences.
Args:
sequences (list[list[unicode]]): a list of sequences
token_vocab (SimpleVocab): a map from token names to integers
Returns:
feed_dict
"""
return self._sequence_batch.inputs_to_feed_dict(sequences, token_vocab)
@property
def embeds(self):
return self._embeds
class MeanSequenceEmbedder(SequenceEmbedder):
def __init__(self, token_embeds, align='left', seq_length=None, allow_empty=False, name='MeanSequenceEmbedder'):
"""MeanSequenceEmbedder.
Args:
allow_empty (bool): allow computing the average of an empty sequence. In this case, we assume 0/0 == 0,
rather than NaN. Default is False, causing an error to be thrown.
(see SequenceEmbedder for other args)
"""
self._allow_empty = allow_empty
super(MeanSequenceEmbedder, self).__init__(token_embeds, align=align, seq_length=seq_length, name=name)
def embed_sequences(self, embedded_sequence_batch):
return reduce_mean(embedded_sequence_batch, allow_empty=self._allow_empty)
class MaxSequenceEmbedder(SequenceEmbedder):
def embed_sequences(self, embedded_sequence_batch):
return reduce_max(embedded_sequence_batch)
class ConcatSequenceEmbedder(SequenceEmbedder):
def embed_sequences(self, embedded_sequence_batch):
values = embedded_sequence_batch.values
shape = tf.shape(values)
nrows, ncols = shape[0], shape[1] * shape[2]
new_shape = tf.pack([nrows, ncols])
result = tf.reshape(values, new_shape) # (batch_size, seq_length * embed_dim)
# add static shape info
batch_dim, seq_length_dim, token_dim = values.get_shape()
concat_dim = token_dim * seq_length_dim
result.set_shape(tf.TensorShape([batch_dim, concat_dim]))
return result
class Attention(Model):
"""Implements standard attention.
Given some memory, a memory mask and a query, outputs the weighted memory cells.
"""
def __init__(self, memory_cells, query, project_query=False):
"""Define Attention.
Args:
memory_cells (SequenceBatch): a SequenceBatch containing a Tensor of shape (batch_size, num_cells, cell_dim)
query (Tensor): a tensor of shape (batch_size, query_dim).
project_query (bool): defaults to False. If True, the query goes through an extra projection layer to
coerce it to cell_dim.
"""
cell_dim = memory_cells.values.get_shape().as_list()[2]
if project_query:
# project the query up/down to cell_dim
self._projection_layer = Dense(cell_dim, activation='linear')
query = self._projection_layer(query) # (batch_size, cand_dim)
memory_values, memory_mask = memory_cells.values, memory_cells.mask
# batch matrix multiply to compute logit scores for all choices in all batches
query = tf.expand_dims(query, 2) # (batch_size, cell_dim, 1)
logit_values = tf.batch_matmul(memory_values, query) # (batch_size, num_cells, 1)
logit_values = tf.squeeze(logit_values, [2]) # (batch_size, num_cells)
# set all pad logits to negative infinity
logits = SequenceBatch(logit_values, memory_mask)
logits = logits.with_pad_value(-float('inf'))
# normalize to get probs
probs = tf.nn.softmax(logits.values) # (batch_size, num_cells)
retrieved = tf.batch_matmul(tf.expand_dims(probs, 1), memory_values) # (batch_size, 1, cell_dim)
retrieved = tf.squeeze(retrieved, [1]) # (batch_size, cell_dim)
self._logits = logits.values
self._probs = probs
self._retrieved = retrieved
@property
def logits(self):
return self._logits # (batch_size, num_cells)
@property
def probs(self):
return self._probs # (batch_size, num_cells)
@property
def retrieved(self):
return self._retrieved # (batch_size, cell_dim)
@property
def projection_weights(self):
"""Get projection weights.
Returns:
(np.array, np.array): a pair of numpy arrays, (W, b) used to project the query tensor to
match the predicate embedding dimension.
"""
return self._projection_layer.get_weights()
@projection_weights.setter
def projection_weights(self, value):
W, b = value
self._projection_layer.set_weights([W, b])
class Scorer(Model, metaclass=ABCMeta):
@abstractproperty
def scores(self):
"""Return a SequenceBatch."""
pass
class CandidateScorer(Feedable, Scorer):
def __init__(self, query, cand_embeds, project_query=False):
"""Create a CandidateScorer.
Args:
query (Tensor): of shape (batch_size, query_dim)
cand_embeds (Tensor): of shape (cand_vocab_size, cand_dim)
project_query (bool): whether to project the query tensor to match the dimension of the cand_embeds
"""
with tf.name_scope("CandidateScorer"):
cand_batch = FeedSequenceBatch()
embedded_cand_batch = embed(cand_batch, cand_embeds) # (batch_size, num_candidates, cand_dim)
attention = Attention(embedded_cand_batch, query, project_query=project_query)
self._attention = attention
self._cand_batch = cand_batch
self._scores = SequenceBatch(attention.logits, cand_batch.mask)
self._probs = SequenceBatch(attention.probs, cand_batch.mask)
@property
def probs(self):
return self._probs
@property
def scores(self):
return self._scores
@property
def projection_weights(self):
return self._attention.projection_weights
@projection_weights.setter
def projection_weights(self, value):
self._attention.projection_weights = value
def inputs_to_feed_dict(self, candidates, cand_vocab):
"""Feed inputs.
Args:
candidates (list[list[unicode]]): a batch of sequences, where each sequence is a unique set of candidates.
cand_vocab (Vocab): a map from a candidate string to an int
Returns:
feed_dict
"""
return self._cand_batch.inputs_to_feed_dict(candidates, cand_vocab)
class SoftCopyScorer(Feedable, Scorer):
def __init__(self, input_scores):
"""Align a candidate with elements of the input, and define its score to be the summed score of aligned inputs.
Args:
input_scores (Tensor): of shape (batch_size, input_length)
"""
input_scores_flat = tf.reshape(input_scores, shape=[-1]) # (batch_size * input_length,)
self._input_length = input_scores.get_shape().as_list()[1]
alignments_flat = FeedSequenceBatch() # (total_candidates, max_alignments)
alignment_weights_flat = FeedSequenceBatch(dtype=tf.float32) # (total_candidates, max_alignments)
aligned_attention_weights = embed(alignments_flat, input_scores_flat) # (total_candidates, max_alignments)
scores_flat = weighted_sum(aligned_attention_weights, alignment_weights_flat.with_pad_value(0).values) # (total_candidates,)
unflatten = FeedSequenceBatch() # (batch_size, num_candidates)
scores = embed(unflatten, scores_flat).with_pad_value(0) # (batch_size, num_candidates)
self._alignments_flat = alignments_flat
self._alignment_weights_flat = alignment_weights_flat
self._unflatten = unflatten
self._scores = scores
@property
def input_length(self):
return self._input_length
@property
def scores(self):
"""A SequenceBatch."""
return self._scores
def inputs_to_feed_dict(self, alignments):
"""Feed inputs.
Args:
alignments (list[list[list[(int, float)]]]): alignments[i][j] is a list of alignments for candidate j
of example i. Each alignment is an (idx, strength) pair. `idx` corresponds to a position in the input
sequence. `strength` is a float.
Returns:
a feed_dict
"""
alignments_flat = []
alignment_weights_flat = []
unflatten = []
flat_idx = 0
for ex_idx, ex_alignments in enumerate(alignments): # loop over examples
uf = []
for aligns in ex_alignments: # loop over candidates
if len(aligns) > 0:
positions, strengths = [list(l) for l in zip(*aligns)]
if max(positions) > (self._input_length - 1):
raise ValueError("alignment positions must not exceed input length")
else:
positions, strengths = [], []
offset = ex_idx * self.input_length
positions_flat = [offset + i for i in positions]
alignments_flat.append(positions_flat)
alignment_weights_flat.append(strengths)
uf.append(flat_idx)
flat_idx += 1
unflatten.append(uf)
feed = {}
feed.update(self._alignments_flat.inputs_to_feed_dict(alignments_flat))
feed.update(self._alignment_weights_flat.inputs_to_feed_dict(alignment_weights_flat))
feed.update(self._unflatten.inputs_to_feed_dict(unflatten))
return feed
class LSTMSequenceEmbedder(SequenceEmbedder):
"""Forward LSTM Sequence Embedder
Also provide attention states.
"""
def __init__(self, token_embeds, seq_length, align='left', name='LSTMSequenceEmbedder', hidden_size=50):
self.hidden_size = hidden_size
super(LSTMSequenceEmbedder, self).__init__(token_embeds, align=align, seq_length=seq_length, name=name)
def embed_sequences(self, embed_sequence_batch):
self._forward_lstm = LSTM(self.hidden_size, return_sequences=True)
# Pass input through the LSTMs
# Shape: (batch_size, seq_length, hidden_size)
hidden_state_values = self._forward_lstm(embed_sequence_batch.values, embed_sequence_batch.mask)
self._hidden_states = SequenceBatch(hidden_state_values, embed_sequence_batch.mask)
# Embedding dimension: (batch_size, hidden_size)
shape = tf.shape(embed_sequence_batch.values)
forward_final = tf.slice(hidden_state_values, [0, shape[1] - 1, 0], [-1, 1, self.hidden_size])
return tf.squeeze(forward_final, [1])
@property
def weights(self):
return self._forward_lstm.get_weights()
@weights.setter
def weights(self, w):
self._forward_lstm.set_weights(w)
@property
def hidden_states(self):
return self._hidden_states
class BidiLSTMSequenceEmbedder(SequenceEmbedder):
"""Bidirectional LSTM Sequence Embedder
Also provide attention states.
"""
def __init__(self, token_embeds, seq_length, align='left', name='BidiLSTMSequenceEmbedder', hidden_size=50):
self.seq_length = seq_length
self.hidden_size = hidden_size
super(BidiLSTMSequenceEmbedder, self).__init__(token_embeds, align=align, seq_length=seq_length, name=name)
def embed_sequences(self, embed_sequence_batch):
"""Return sentence embeddings as a tensor with with shape
[batch_size, hidden_size * 2]
"""
forward_values = embed_sequence_batch.values
forward_mask = embed_sequence_batch.mask
backward_values = tf.reverse(forward_values, [False, True, False])
backward_mask = tf.reverse(forward_mask, [False, True])
# Initialize LSTMs
self._forward_lstm = LSTM(self.hidden_size, return_sequences=True)
self._backward_lstm = LSTM(self.hidden_size, return_sequences=True)
# Pass input through the LSTMs
# Shape: (batch_size, seq_length, hidden_size)
forward_seq = self._forward_lstm(forward_values, forward_mask)
forward_seq.set_shape((None, self.seq_length, self.hidden_size))
backward_seq = self._backward_lstm(backward_values, backward_mask)
backward_seq.set_shape((None, self.seq_length, self.hidden_size))
# Stitch the outputs together --> hidden states (for computing attention)
# Final dimension: (batch_size, seq_length, hidden_size * 2)
lstm_states = tf.concat(2, [forward_seq, tf.reverse(backward_seq, [False, True, False])])
self._hidden_states = SequenceBatch(lstm_states, forward_mask)
# Stitch the final outputs together --> sequence embedding
# Final dimension: (batch_size, hidden_size * 2)
seq_length = tf.shape(forward_values)[1]
forward_final = tf.slice(forward_seq, [0, seq_length - 1, 0], [-1, 1, self.hidden_size])
backward_final = tf.slice(backward_seq, [0, seq_length - 1, 0], [-1, 1, self.hidden_size])
return tf.squeeze(tf.concat(2, [forward_final, backward_final]), [1])
@property
def weights(self):
return (self._forward_lstm.get_weights(), self._backward_lstm.get_weights())
@weights.setter
def weights(self, w):
forward_weights, backward_weights = w
self._forward_lstm.set_weights(forward_weights)
self._backward_lstm.set_weights(backward_weights)
@property
def hidden_states(self):
"""Return a SequenceBatch whose value has shape
[batch_size, max_seq_length, hidden_size * 2]
"""
return self._hidden_states
|
ContextualSP/lemon/executor/gtd/ml/model.py/0
|
{
"file_path": "ContextualSP/lemon/executor/gtd/ml/model.py",
"repo_id": "ContextualSP",
"token_count": 6965
}
| 235 |
from unittest import TestCase
from gtd.graph import Graph
class TestGraph(TestCase):
def test_shortest_path(self):
triples = [
('1', '2', '3'),
('3', '4', '5'),
('1', '0', '5'),
]
self.assertEqual(
Graph(triples).shortest_path('1', '5'),
['1', '0', '5']
)
self.assertEqual(
Graph(triples[:2]).shortest_path('1', '5'),
['1', '2', '3', '4', '5']
)
|
ContextualSP/lemon/executor/gtd/tests/test_graph.py/0
|
{
"file_path": "ContextualSP/lemon/executor/gtd/tests/test_graph.py",
"repo_id": "ContextualSP",
"token_count": 255
}
| 236 |
from abc import ABCMeta, abstractproperty
class ExampleFactory(object, metaclass=ABCMeta):
@abstractproperty
def examples(self):
"""Return an iterable of Examples."""
raise NotImplementedError
|
ContextualSP/lemon/executor/strongsup/example_factory.py/0
|
{
"file_path": "ContextualSP/lemon/executor/strongsup/example_factory.py",
"repo_id": "ContextualSP",
"token_count": 73
}
| 237 |
"""Knowledge graph constructed from a table.
The graph is stored as a list of triples.
"""
import os
import re
import sys
from collections import Counter
from itertools import chain
from strongsup.tables.structure import parse_number, parse_date, InfiniteSet
from strongsup.tables.utils import tsv_unescape, tsv_unescape_list
from dependency.data_directory import DataDirectory
################################
# Constants
REL_NEXT = 'fb:row.row.next'
REL_INDEX = 'fb:row.row.index'
REL_NUMBER = 'fb:cell.cell.number'
REL_DATE = 'fb:cell.cell.date'
REL_NUM2 = 'fb:cell.cell.num2'
REL_PART = 'fb:cell.cell.part'
ALL_GRAPH_BUILT_INS = (REL_NEXT, REL_INDEX, REL_NUMBER, REL_DATE, REL_NUM2, REL_PART)
ALL_GRAPH_BUILT_INS += tuple('!' + x for x in ALL_GRAPH_BUILT_INS)
NULL_CELL = 'fb:cell.null'
################################
# TablesKnowledgeGraph
class TablesKnowledgeGraph(object):
"""A knowledge graph constructed from a table."""
# Whether to start the row indices from 1 (default) or 0 (legacy).
FIRST_ROW_INDEX = 1
def __init__(self, fin, name=None):
"""Construct a TablesKnowledgeGraph from a CoreNLP-tagged table TSV file.
Each line in the TSV file describe a cell in the context table.
The following fields must be present:
- row: Row index (-1 = header row, body row index starts from 0)
- col: Column index (starts from 0)
- id: ID of the cell
- content: Original string content of the cell
The following fields are optional:
- number: Possible number normalization ("40 cakes" --> 40)
- date: Possible date normalization ("Jan 5" --> xx-01-05)
- num2: Possible second-number normalization ("3-1" --> 1)
- list and listID: Possible list normalization
("Apple, Banana, Orange" --> Apple|Banana|Orange)
listID contains the ID of the list items,
while list contains the original strings
Args:
fin: A filename string or a file object.
name: Unique identifier
"""
if isinstance(fin, str):
with open(fin) as fin_file:
self.__init__(fin_file, name=(name or fin))
return
self._name = name or (fin.name if hasattr(fin, 'name') else str(fin))
# Map from relation -> [{first -> seconds}, {second -> firsts}]
self._relations = {}
# Map from id -> original string
self._original_strings = {}
# Set of all row IDs
self._rows = set()
# List of column IDs
self._columns = []
# _grid[i][j] = cell id at row i column j
self._grid = []
# Now fin is a file object
current_row, current_row_id = None, None
header = fin.readline().rstrip('\n').split('\t')
for line in fin:
line = line.rstrip('\n').split('\t')
if len(line) < len(header):
line.extend([''] * (len(header) - len(line)))
record = dict(list(zip(header, line)))
if record['row'] == '-1':
# column headers
self._columns.append(record['id'])
self._original_strings[record['id']] = tsv_unescape(record['content'])
self._original_strings['!' + record['id']] = tsv_unescape(record['content'])
else:
# normal cell
# Define a bunch of knowledge graph edges.
row, col = int(record['row']), int(record['col'])
if current_row != row:
current_row = row
actual_row_index = current_row + self.FIRST_ROW_INDEX
previous_row_id = current_row_id
current_row_id = 'fb:row.r{}'.format(actual_row_index)
if previous_row_id is not None:
# Row --> Next Row relation
self._add_relation(REL_NEXT, previous_row_id, current_row_id)
# Row --> Index relation
self._add_relation(REL_INDEX, current_row_id, float(actual_row_index))
self._rows.add(current_row_id)
self._grid.append([])
# Assume that the cells are listed in the correct order
assert len(self._grid[row]) == col
self._grid[row].append(record['id'])
self._original_strings[record['id']] = tsv_unescape(record['content'])
# Row --> Cell relation
self._add_relation(self._columns[col], current_row_id, record['id'])
# Normalization relations
if record.get('number'):
for second in (parse_number(x) for x in record['number'].split('|')):
self._add_relation(REL_NUMBER, record['id'], second)
if record.get('date'):
for second in (parse_date(x) for x in record['date'].split('|')):
self._add_relation(REL_DATE, record['id'], second)
if record.get('num2'):
for second in (parse_number(x) for x in record['num2'].split('|')):
self._add_relation(REL_NUM2, record['id'], second)
if record.get('listId'):
list_ids = record['listId'].split('|')
for second in list_ids:
self._add_relation(REL_PART, record['id'], second)
# Original strings for listIds
list_strings = tsv_unescape_list(record['list'])
for list_id, list_string in zip(list_ids, list_strings):
self._original_strings[list_id] = list_string
def _add_relation(self, relation, first, second):
"""Internal function for adding a knowledge graph edge (x, r, y).
Args:
relation: Relation r (string)
first: Entity x (string, number, or date)
second: Entity y (string, number, or date)
"""
mapping = self._relations.setdefault(relation, [{}, {}])
mapping[0].setdefault(first, set()).add(second)
mapping[1].setdefault(second, set()).add(first)
################################
# Queries
@property
def name(self):
return self._name
def __str__(self):
return '<TablesKnowledgeGraph {}>'.format(self._name.encode('utf8', 'ignore'))
__repr__ = __str__
@property
def executor(self):
try:
return self._executor
except AttributeError:
# Import here to prevent recursive import
from strongsup.tables.executor import TablesPostfixExecutor
self._executor = TablesPostfixExecutor(self)
return self._executor
def join(self, relation, seconds):
"""Return the set of all x such that for some y in seconds,
(x, relation, y) is in the graph.
Note that the shorthand reversed relations (e.g., !fb:row.row.name) does not work.
Args:
relation (basestring): relation r
seconds (set, InfiniteSet, or list): the set of y's
Returns:
the set of x's
"""
second_to_firsts = self._relations.get(relation, [{}, {}])[1]
if isinstance(seconds, (list, set)):
return set(chain.from_iterable(second_to_firsts.get(y, []) for y in seconds))
elif isinstance(seconds, InfiniteSet):
return set(chain.from_iterable(xs for (y, xs) in second_to_firsts.items() if y in seconds))
else:
raise NotImplementedError('? . {} . {}'.format(relation, seconds))
def reversed_join(self, relation, firsts):
"""Return the collection of all y such that for some x in firsts,
(x, relation, y) is in the graph.
Note that the shorthand reversed relations (e.g., !fb:row.row.name) does not work.
Args:
relation (basestring): Relation r (string)
firsts (set, InfiniteSet, or list): the set of x's
Returns:
the set of y's
"""
first_to_seconds = self._relations.get(relation, [{}, {}])[0]
if isinstance(firsts, (list, set)):
return set(chain.from_iterable(first_to_seconds.get(x, []) for x in firsts))
elif isinstance(firsts, InfiniteSet):
return set(chain.from_iterable(ys for (x, ys) in first_to_seconds.items() if x in firsts))
else:
raise NotImplementedError('{} . {} . ?'.format(firsts, relation))
@property
def all_rows(self):
"""Return the set of all rows fb:row.r0, ..., fb:row.r(M-1)"""
return self._rows
@property
def all_columns(self):
"""Return the set of all column IDs"""
return set(self._columns)
def has_id(self, id_):
return id_ in self._original_strings
def original_string(self, id_):
"""Return the original string (e.g., fb:cell.obama --> "Obama")"""
return self._original_strings[id_]
################################
# Debug
if __name__ == '__main__':
table = TablesKnowledgeGraph(sys.argv[1])
print(table._rows)
print(table._columns)
print(table._grid)
print(table._relations)
print(table._original_strings)
|
ContextualSP/lemon/executor/strongsup/tables/graph.py/0
|
{
"file_path": "ContextualSP/lemon/executor/strongsup/tables/graph.py",
"repo_id": "ContextualSP",
"token_count": 4157
}
| 238 |
# -*- coding: utf-8 -*-
import pytest
from strongsup.tables.utils import (
tsv_unescape, tsv_unescape_list, normalize,
)
class TestStringMethods(object):
def test_tsv_unescape(self):
assert tsv_unescape(r'abn\ncd\p\\\pp') == 'abn\ncd|\\|p'
assert tsv_unescape_list(r'abn\ncd\p\\\pp|u\n\pac|r||d') == [
'abn\ncd|\\|p', 'u\n|ac', 'r', '', 'd']
def test_normalize(self):
assert normalize(' This is a BOOK†[a][1]') == 'this is a book'
assert normalize('Apollo 11 (1969) 「阿波罗」') == 'apollo 11 (1969) 「阿波罗」'
assert normalize('"Apollo 11 (1969)"') == 'apollo 11'
assert normalize('"Apollo 11" (1969)') == 'apollo 11'
assert normalize('“Erdős café – ε’š delight.”') == 'erdos cafe - ε\'s delight'
assert normalize('3.14') == '3.14'
|
ContextualSP/lemon/executor/strongsup/tests/tables/test_utils.py/0
|
{
"file_path": "ContextualSP/lemon/executor/strongsup/tests/tables/test_utils.py",
"repo_id": "ContextualSP",
"token_count": 422
}
| 239 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import re
from collections import defaultdict
from re import RegexFlag
from typing import List
def extract_structure_data(plain_text_content: str):
# extracts lines starts with specific flags
# map id to its related information
data = []
predict_outputs = re.findall("^D.+", plain_text_content, RegexFlag.MULTILINE)
ground_outputs = re.findall("^T.+", plain_text_content, RegexFlag.MULTILINE)
source_inputs = re.findall("^S.+", plain_text_content, RegexFlag.MULTILINE)
for predict, ground, source in zip(predict_outputs, ground_outputs, source_inputs):
try:
predict_id, _, predict_clean = predict.split('\t')
ground_id, ground_clean = ground.split('\t')
source_id, source_clean = source.split('\t')
assert predict_id[2:] == ground_id [2:]
assert ground_id[2:] == source_id[2:]
except Exception:
print("An error occurred in source: {}".format(source))
continue
data.append((predict_clean, ground_clean, source_clean, predict_id[2:]))
return data
def evaluate(data: List, target_delimiter: str):
def evaluate_example(_predict_str: str, _ground_str: str):
_predict_spans = _predict_str.split(target_delimiter)
_ground_spans = _ground_str.split(target_delimiter)
_predict_values = defaultdict(lambda: 0)
_ground_values = defaultdict(lambda: 0)
for span in _predict_spans:
try:
_predict_values[float(span)] += 1
except ValueError:
_predict_values[span.strip()] += 1
for span in _ground_spans:
try:
_ground_values[float(span)] += 1
except ValueError:
_ground_values[span.strip()] += 1
_is_correct = _predict_values == _ground_values
return _is_correct
correct_num = 0
correct_arr = []
total = len(data)
for example in data:
predict_str, ground_str, source_str, predict_id = example
is_correct = evaluate_example(predict_str, ground_str)
if is_correct:
correct_num += 1
correct_arr.append(is_correct)
print("Correct / Total : {} / {}, Denotation Accuracy : {:.3f}".format(correct_num, total, correct_num / total))
return correct_arr
def evaluate_generate_file(generate_file_path, target_delimiter):
with open(generate_file_path, "r", encoding="utf8") as generate_f:
file_content = generate_f.read()
data = extract_structure_data(file_content)
correct_arr = evaluate(data, target_delimiter)
# write into eval file
eval_file_path = generate_file_path + ".eval"
eval_file = open(eval_file_path, "w", encoding="utf8")
eval_file.write("Score\tPredict\tGolden\tSource\tID\n")
for example, correct in zip(data, correct_arr):
eval_file.write(str(correct) + "\t" + "\t".join(example) + "\n")
eval_file.close()
|
ContextualSP/lemon/lemon/model_eval.py/0
|
{
"file_path": "ContextualSP/lemon/lemon/model_eval.py",
"repo_id": "ContextualSP",
"token_count": 1290
}
| 240 |
# OpenBookQA
* [evaluator](evaluator/) is the program used by the AI2 Leaderboard to evaluate submitted predictions.
* `data` have the files (and scripts to generate them) used for evaluating Leaderboard predictions.
## Example usage
To evaluate dummy predictions (every question is predicted to be `A`) against the dataset, run this:
```
% python3 evaluator/evaluator.py -qa data/question-answers.jsonl -p data/dummy-predictions.csv -o metrics.json
% cat metrics.json
{"accuracy": 0.276}
```
|
ContextualSP/lemon/propara_evaluator/aristo-leaderboard/openbookqa/README.md/0
|
{
"file_path": "ContextualSP/lemon/propara_evaluator/aristo-leaderboard/openbookqa/README.md",
"repo_id": "ContextualSP",
"token_count": 149
}
| 241 |
#!/bin/bash
echo ----------------------------------
echo removing pycache detritus
echo ----------------------------------
echo
rm -vrf $(find . -type d -name __pycache__)
echo
echo ----------------------------------
echo removing mypy detritus
echo ----------------------------------
echo
rm -vrf .mypy_cache
|
ContextualSP/lemon/propara_evaluator/aristo-leaderboard/propara/evaluator/clean.sh/0
|
{
"file_path": "ContextualSP/lemon/propara_evaluator/aristo-leaderboard/propara/evaluator/clean.sh",
"repo_id": "ContextualSP",
"token_count": 74
}
| 242 |
from scoring.question import QuestionScores
|
ContextualSP/lemon/propara_evaluator/aristo-leaderboard/propara/evaluator/scoring/__init__.py/0
|
{
"file_path": "ContextualSP/lemon/propara_evaluator/aristo-leaderboard/propara/evaluator/scoring/__init__.py",
"repo_id": "ContextualSP",
"token_count": 9
}
| 243 |
## Test case: Prediction and answer are both empty
* answers.tsv is empty.
* predictions.tsv is empty.
An evaluation on this prediction should abort.
|
ContextualSP/lemon/propara_evaluator/aristo-leaderboard/propara/evaluator/testfiles-4/README.md/0
|
{
"file_path": "ContextualSP/lemon/propara_evaluator/aristo-leaderboard/propara/evaluator/testfiles-4/README.md",
"repo_id": "ContextualSP",
"token_count": 40
}
| 244 |
# QASC
* [evaluator](evaluator/) is the program used by the AI2 Leaderboard to evaluate submitted predictions.
* `data` have example prediction files
## Example usage
To evaluate your predictions against the train or dev datasets, run either of these and look at the resulting metrics.json file:
```
% python3 evaluator/evaluator.py -qa data/train.jsonl -p /path/to/your/train/predictions.csv -o metrics.json
% python3 evaluator/evaluator.py -qa data/dev.jsonl -p /path/to/your/dev/predictions.csv -o metrics.json
```
For example, to evaluate dummy predictions (every question is predicted to be `A`) against the train dataset, run this:
```
% python3 evaluator/evaluator.py -qa data/train.jsonl -p data/train-predictions.csv -o metrics.json
% cat metrics.json
{"accuracy": 0.12417014998770592}
```
For usage of the evaluator, see the [evaluator README](evaluator/).
|
ContextualSP/lemon/propara_evaluator/aristo-leaderboard/qasc/README.md/0
|
{
"file_path": "ContextualSP/lemon/propara_evaluator/aristo-leaderboard/qasc/README.md",
"repo_id": "ContextualSP",
"token_count": 281
}
| 245 |
# SciTail
* [evaluator](evaluator/) is the program used by the AI2 Leaderboard to evaluate submitted predictions.
## Example usage
To evaluate dummy predictions (every pair of sentences is predicted to entail) against the SciTail dataset, run this:
```
% python3 evaluator/evaluator.py -a data/test/answers.jsonl -p data/test/dummy-predictions.csv
accuracy: 0.39604891815616183
```
Replace `data/test/dummy-predictions.csv` with your predictions to compute your test score.
You can also evaluate predictions against the Dev set by running:
```
% python3 evaluator/evaluator.py -a data/dev/answers.jsonl -p data/dev/dummy-predictions.csv
accuracy: 0.5038343558282209
```
Replace `data/dev/dummy-predictions.csv` with your predictions to compute your dev score.
|
ContextualSP/lemon/propara_evaluator/aristo-leaderboard/scitail/README.md/0
|
{
"file_path": "ContextualSP/lemon/propara_evaluator/aristo-leaderboard/scitail/README.md",
"repo_id": "ContextualSP",
"token_count": 247
}
| 246 |
{"id": "tracie-train-uniform-0000", "label": "entailment"}
{"id": "tracie-train-uniform-0001", "label": "entailment"}
{"id": "tracie-train-uniform-0002", "label": "entailment"}
{"id": "tracie-train-uniform-0003", "label": "entailment"}
{"id": "tracie-train-uniform-0004", "label": "entailment"}
{"id": "tracie-train-uniform-0005", "label": "entailment"}
{"id": "tracie-train-uniform-0006", "label": "entailment"}
{"id": "tracie-train-uniform-0007", "label": "entailment"}
{"id": "tracie-train-uniform-0008", "label": "entailment"}
{"id": "tracie-train-uniform-0009", "label": "entailment"}
{"id": "tracie-train-uniform-0010", "label": "entailment"}
{"id": "tracie-train-uniform-0011", "label": "entailment"}
{"id": "tracie-train-uniform-0012", "label": "entailment"}
{"id": "tracie-train-uniform-0013", "label": "entailment"}
{"id": "tracie-train-uniform-0014", "label": "entailment"}
{"id": "tracie-train-uniform-0015", "label": "entailment"}
{"id": "tracie-train-uniform-0016", "label": "entailment"}
{"id": "tracie-train-uniform-0017", "label": "entailment"}
{"id": "tracie-train-uniform-0018", "label": "entailment"}
{"id": "tracie-train-uniform-0019", "label": "entailment"}
{"id": "tracie-train-uniform-0020", "label": "entailment"}
{"id": "tracie-train-uniform-0021", "label": "entailment"}
{"id": "tracie-train-uniform-0022", "label": "entailment"}
{"id": "tracie-train-uniform-0023", "label": "entailment"}
{"id": "tracie-train-uniform-0024", "label": "entailment"}
{"id": "tracie-train-uniform-0025", "label": "entailment"}
{"id": "tracie-train-uniform-0026", "label": "entailment"}
{"id": "tracie-train-uniform-0027", "label": "entailment"}
{"id": "tracie-train-uniform-0028", "label": "entailment"}
{"id": "tracie-train-uniform-0029", "label": "entailment"}
{"id": "tracie-train-uniform-0030", "label": "entailment"}
{"id": "tracie-train-uniform-0031", "label": "entailment"}
{"id": "tracie-train-uniform-0032", "label": "entailment"}
{"id": "tracie-train-uniform-0033", "label": "entailment"}
{"id": "tracie-train-uniform-0034", "label": "entailment"}
{"id": "tracie-train-uniform-0035", "label": "entailment"}
{"id": "tracie-train-uniform-0036", "label": "entailment"}
{"id": "tracie-train-uniform-0037", "label": "entailment"}
{"id": "tracie-train-uniform-0038", "label": "entailment"}
{"id": "tracie-train-uniform-0039", "label": "entailment"}
{"id": "tracie-train-uniform-0040", "label": "entailment"}
{"id": "tracie-train-uniform-0041", "label": "entailment"}
{"id": "tracie-train-uniform-0042", "label": "entailment"}
{"id": "tracie-train-uniform-0043", "label": "entailment"}
{"id": "tracie-train-uniform-0044", "label": "entailment"}
{"id": "tracie-train-uniform-0045", "label": "entailment"}
{"id": "tracie-train-uniform-0046", "label": "entailment"}
{"id": "tracie-train-uniform-0047", "label": "entailment"}
{"id": "tracie-train-uniform-0048", "label": "entailment"}
{"id": "tracie-train-uniform-0049", "label": "entailment"}
{"id": "tracie-train-uniform-0050", "label": "entailment"}
{"id": "tracie-train-uniform-0051", "label": "entailment"}
{"id": "tracie-train-uniform-0052", "label": "entailment"}
{"id": "tracie-train-uniform-0053", "label": "entailment"}
{"id": "tracie-train-uniform-0054", "label": "entailment"}
{"id": "tracie-train-uniform-0055", "label": "entailment"}
{"id": "tracie-train-uniform-0056", "label": "entailment"}
{"id": "tracie-train-uniform-0057", "label": "entailment"}
{"id": "tracie-train-uniform-0058", "label": "entailment"}
{"id": "tracie-train-uniform-0059", "label": "entailment"}
{"id": "tracie-train-uniform-0060", "label": "entailment"}
{"id": "tracie-train-uniform-0061", "label": "entailment"}
{"id": "tracie-train-uniform-0062", "label": "entailment"}
{"id": "tracie-train-uniform-0063", "label": "entailment"}
{"id": "tracie-train-uniform-0064", "label": "entailment"}
{"id": "tracie-train-uniform-0065", "label": "entailment"}
{"id": "tracie-train-uniform-0066", "label": "entailment"}
{"id": "tracie-train-uniform-0067", "label": "entailment"}
{"id": "tracie-train-uniform-0068", "label": "entailment"}
{"id": "tracie-train-uniform-0069", "label": "entailment"}
{"id": "tracie-train-uniform-0070", "label": "entailment"}
{"id": "tracie-train-uniform-0071", "label": "entailment"}
{"id": "tracie-train-uniform-0072", "label": "entailment"}
{"id": "tracie-train-uniform-0073", "label": "entailment"}
{"id": "tracie-train-uniform-0074", "label": "entailment"}
{"id": "tracie-train-uniform-0075", "label": "entailment"}
{"id": "tracie-train-uniform-0076", "label": "entailment"}
{"id": "tracie-train-uniform-0077", "label": "entailment"}
{"id": "tracie-train-uniform-0078", "label": "entailment"}
{"id": "tracie-train-uniform-0079", "label": "entailment"}
{"id": "tracie-train-uniform-0080", "label": "entailment"}
{"id": "tracie-train-uniform-0081", "label": "entailment"}
{"id": "tracie-train-uniform-0082", "label": "entailment"}
{"id": "tracie-train-uniform-0083", "label": "entailment"}
{"id": "tracie-train-uniform-0084", "label": "entailment"}
{"id": "tracie-train-uniform-0085", "label": "entailment"}
{"id": "tracie-train-uniform-0086", "label": "entailment"}
{"id": "tracie-train-uniform-0087", "label": "entailment"}
{"id": "tracie-train-uniform-0088", "label": "entailment"}
{"id": "tracie-train-uniform-0089", "label": "entailment"}
{"id": "tracie-train-uniform-0090", "label": "entailment"}
{"id": "tracie-train-uniform-0091", "label": "entailment"}
{"id": "tracie-train-uniform-0092", "label": "entailment"}
{"id": "tracie-train-uniform-0093", "label": "entailment"}
{"id": "tracie-train-uniform-0094", "label": "entailment"}
{"id": "tracie-train-uniform-0095", "label": "entailment"}
{"id": "tracie-train-uniform-0096", "label": "entailment"}
{"id": "tracie-train-uniform-0097", "label": "entailment"}
{"id": "tracie-train-uniform-0098", "label": "entailment"}
{"id": "tracie-train-uniform-0099", "label": "entailment"}
{"id": "tracie-train-uniform-0100", "label": "entailment"}
{"id": "tracie-train-uniform-0101", "label": "entailment"}
{"id": "tracie-train-uniform-0102", "label": "entailment"}
{"id": "tracie-train-uniform-0103", "label": "entailment"}
{"id": "tracie-train-uniform-0104", "label": "entailment"}
{"id": "tracie-train-uniform-0105", "label": "entailment"}
{"id": "tracie-train-uniform-0106", "label": "entailment"}
{"id": "tracie-train-uniform-0107", "label": "entailment"}
{"id": "tracie-train-uniform-0108", "label": "entailment"}
{"id": "tracie-train-uniform-0109", "label": "entailment"}
{"id": "tracie-train-uniform-0110", "label": "entailment"}
{"id": "tracie-train-uniform-0111", "label": "entailment"}
{"id": "tracie-train-uniform-0112", "label": "entailment"}
{"id": "tracie-train-uniform-0113", "label": "entailment"}
{"id": "tracie-train-uniform-0114", "label": "entailment"}
{"id": "tracie-train-uniform-0115", "label": "entailment"}
{"id": "tracie-train-uniform-0116", "label": "entailment"}
{"id": "tracie-train-uniform-0117", "label": "entailment"}
{"id": "tracie-train-uniform-0118", "label": "entailment"}
{"id": "tracie-train-uniform-0119", "label": "entailment"}
{"id": "tracie-train-uniform-0120", "label": "entailment"}
{"id": "tracie-train-uniform-0121", "label": "entailment"}
{"id": "tracie-train-uniform-0122", "label": "entailment"}
{"id": "tracie-train-uniform-0123", "label": "entailment"}
{"id": "tracie-train-uniform-0124", "label": "entailment"}
{"id": "tracie-train-uniform-0125", "label": "entailment"}
{"id": "tracie-train-uniform-0126", "label": "entailment"}
{"id": "tracie-train-uniform-0127", "label": "entailment"}
{"id": "tracie-train-uniform-0128", "label": "entailment"}
{"id": "tracie-train-uniform-0129", "label": "entailment"}
{"id": "tracie-train-uniform-0130", "label": "entailment"}
{"id": "tracie-train-uniform-0131", "label": "entailment"}
{"id": "tracie-train-uniform-0132", "label": "entailment"}
{"id": "tracie-train-uniform-0133", "label": "entailment"}
{"id": "tracie-train-uniform-0134", "label": "entailment"}
{"id": "tracie-train-uniform-0135", "label": "entailment"}
{"id": "tracie-train-uniform-0136", "label": "entailment"}
{"id": "tracie-train-uniform-0137", "label": "entailment"}
{"id": "tracie-train-uniform-0138", "label": "entailment"}
{"id": "tracie-train-uniform-0139", "label": "entailment"}
{"id": "tracie-train-uniform-0140", "label": "entailment"}
{"id": "tracie-train-uniform-0141", "label": "entailment"}
{"id": "tracie-train-uniform-0142", "label": "entailment"}
{"id": "tracie-train-uniform-0143", "label": "entailment"}
{"id": "tracie-train-uniform-0144", "label": "entailment"}
{"id": "tracie-train-uniform-0145", "label": "entailment"}
{"id": "tracie-train-uniform-0146", "label": "entailment"}
{"id": "tracie-train-uniform-0147", "label": "entailment"}
{"id": "tracie-train-uniform-0148", "label": "entailment"}
{"id": "tracie-train-uniform-0149", "label": "entailment"}
{"id": "tracie-train-uniform-0150", "label": "entailment"}
{"id": "tracie-train-uniform-0151", "label": "entailment"}
{"id": "tracie-train-uniform-0152", "label": "entailment"}
{"id": "tracie-train-uniform-0153", "label": "entailment"}
{"id": "tracie-train-uniform-0154", "label": "entailment"}
{"id": "tracie-train-uniform-0155", "label": "entailment"}
{"id": "tracie-train-uniform-0156", "label": "entailment"}
{"id": "tracie-train-uniform-0157", "label": "entailment"}
{"id": "tracie-train-uniform-0158", "label": "entailment"}
{"id": "tracie-train-uniform-0159", "label": "entailment"}
{"id": "tracie-train-uniform-0160", "label": "entailment"}
{"id": "tracie-train-uniform-0161", "label": "entailment"}
{"id": "tracie-train-uniform-0162", "label": "entailment"}
{"id": "tracie-train-uniform-0163", "label": "entailment"}
{"id": "tracie-train-uniform-0164", "label": "entailment"}
{"id": "tracie-train-uniform-0165", "label": "entailment"}
{"id": "tracie-train-uniform-0166", "label": "entailment"}
{"id": "tracie-train-uniform-0167", "label": "entailment"}
{"id": "tracie-train-uniform-0168", "label": "entailment"}
{"id": "tracie-train-uniform-0169", "label": "entailment"}
{"id": "tracie-train-uniform-0170", "label": "entailment"}
{"id": "tracie-train-uniform-0171", "label": "entailment"}
{"id": "tracie-train-uniform-0172", "label": "entailment"}
{"id": "tracie-train-uniform-0173", "label": "entailment"}
{"id": "tracie-train-uniform-0174", "label": "entailment"}
{"id": "tracie-train-uniform-0175", "label": "entailment"}
{"id": "tracie-train-uniform-0176", "label": "entailment"}
{"id": "tracie-train-uniform-0177", "label": "entailment"}
{"id": "tracie-train-uniform-0178", "label": "entailment"}
{"id": "tracie-train-uniform-0179", "label": "entailment"}
{"id": "tracie-train-uniform-0180", "label": "entailment"}
{"id": "tracie-train-uniform-0181", "label": "entailment"}
{"id": "tracie-train-uniform-0182", "label": "entailment"}
{"id": "tracie-train-uniform-0183", "label": "entailment"}
{"id": "tracie-train-uniform-0184", "label": "entailment"}
{"id": "tracie-train-uniform-0185", "label": "entailment"}
{"id": "tracie-train-uniform-0186", "label": "entailment"}
{"id": "tracie-train-uniform-0187", "label": "entailment"}
{"id": "tracie-train-uniform-0188", "label": "entailment"}
{"id": "tracie-train-uniform-0189", "label": "entailment"}
{"id": "tracie-train-uniform-0190", "label": "entailment"}
{"id": "tracie-train-uniform-0191", "label": "entailment"}
{"id": "tracie-train-uniform-0192", "label": "entailment"}
{"id": "tracie-train-uniform-0193", "label": "entailment"}
{"id": "tracie-train-uniform-0194", "label": "entailment"}
{"id": "tracie-train-uniform-0195", "label": "entailment"}
{"id": "tracie-train-uniform-0196", "label": "entailment"}
{"id": "tracie-train-uniform-0197", "label": "entailment"}
{"id": "tracie-train-uniform-0198", "label": "entailment"}
{"id": "tracie-train-uniform-0199", "label": "entailment"}
{"id": "tracie-train-uniform-0200", "label": "entailment"}
{"id": "tracie-train-uniform-0201", "label": "entailment"}
{"id": "tracie-train-uniform-0202", "label": "entailment"}
{"id": "tracie-train-uniform-0203", "label": "entailment"}
{"id": "tracie-train-uniform-0204", "label": "entailment"}
{"id": "tracie-train-uniform-0205", "label": "entailment"}
{"id": "tracie-train-uniform-0206", "label": "entailment"}
{"id": "tracie-train-uniform-0207", "label": "entailment"}
{"id": "tracie-train-uniform-0208", "label": "entailment"}
{"id": "tracie-train-uniform-0209", "label": "entailment"}
{"id": "tracie-train-uniform-0210", "label": "entailment"}
{"id": "tracie-train-uniform-0211", "label": "entailment"}
{"id": "tracie-train-uniform-0212", "label": "entailment"}
{"id": "tracie-train-uniform-0213", "label": "entailment"}
{"id": "tracie-train-uniform-0214", "label": "entailment"}
{"id": "tracie-train-uniform-0215", "label": "entailment"}
{"id": "tracie-train-uniform-0216", "label": "entailment"}
{"id": "tracie-train-uniform-0217", "label": "entailment"}
{"id": "tracie-train-uniform-0218", "label": "entailment"}
{"id": "tracie-train-uniform-0219", "label": "entailment"}
{"id": "tracie-train-uniform-0220", "label": "entailment"}
{"id": "tracie-train-uniform-0221", "label": "entailment"}
{"id": "tracie-train-uniform-0222", "label": "entailment"}
{"id": "tracie-train-uniform-0223", "label": "entailment"}
{"id": "tracie-train-uniform-0224", "label": "entailment"}
{"id": "tracie-train-uniform-0225", "label": "entailment"}
{"id": "tracie-train-uniform-0226", "label": "entailment"}
{"id": "tracie-train-uniform-0227", "label": "entailment"}
{"id": "tracie-train-uniform-0228", "label": "entailment"}
{"id": "tracie-train-uniform-0229", "label": "entailment"}
{"id": "tracie-train-uniform-0230", "label": "entailment"}
{"id": "tracie-train-uniform-0231", "label": "entailment"}
{"id": "tracie-train-uniform-0232", "label": "entailment"}
{"id": "tracie-train-uniform-0233", "label": "entailment"}
{"id": "tracie-train-uniform-0234", "label": "entailment"}
{"id": "tracie-train-uniform-0235", "label": "entailment"}
{"id": "tracie-train-uniform-0236", "label": "entailment"}
{"id": "tracie-train-uniform-0237", "label": "entailment"}
{"id": "tracie-train-uniform-0238", "label": "entailment"}
{"id": "tracie-train-uniform-0239", "label": "entailment"}
{"id": "tracie-train-uniform-0240", "label": "entailment"}
{"id": "tracie-train-uniform-0241", "label": "entailment"}
{"id": "tracie-train-uniform-0242", "label": "entailment"}
{"id": "tracie-train-uniform-0243", "label": "entailment"}
{"id": "tracie-train-uniform-0244", "label": "entailment"}
{"id": "tracie-train-uniform-0245", "label": "entailment"}
{"id": "tracie-train-uniform-0246", "label": "entailment"}
{"id": "tracie-train-uniform-0247", "label": "entailment"}
{"id": "tracie-train-uniform-0248", "label": "entailment"}
{"id": "tracie-train-uniform-0249", "label": "entailment"}
{"id": "tracie-train-uniform-0250", "label": "entailment"}
{"id": "tracie-train-uniform-0251", "label": "entailment"}
{"id": "tracie-train-uniform-0252", "label": "entailment"}
{"id": "tracie-train-uniform-0253", "label": "entailment"}
{"id": "tracie-train-uniform-0254", "label": "entailment"}
{"id": "tracie-train-uniform-0255", "label": "entailment"}
{"id": "tracie-train-uniform-0256", "label": "entailment"}
{"id": "tracie-train-uniform-0257", "label": "entailment"}
{"id": "tracie-train-uniform-0258", "label": "entailment"}
{"id": "tracie-train-uniform-0259", "label": "entailment"}
{"id": "tracie-train-uniform-0260", "label": "entailment"}
{"id": "tracie-train-uniform-0261", "label": "entailment"}
{"id": "tracie-train-uniform-0262", "label": "entailment"}
{"id": "tracie-train-uniform-0263", "label": "entailment"}
{"id": "tracie-train-uniform-0264", "label": "entailment"}
{"id": "tracie-train-uniform-0265", "label": "entailment"}
{"id": "tracie-train-uniform-0266", "label": "entailment"}
{"id": "tracie-train-uniform-0267", "label": "entailment"}
{"id": "tracie-train-uniform-0268", "label": "entailment"}
{"id": "tracie-train-uniform-0269", "label": "entailment"}
{"id": "tracie-train-uniform-0270", "label": "entailment"}
{"id": "tracie-train-uniform-0271", "label": "entailment"}
{"id": "tracie-train-uniform-0272", "label": "entailment"}
{"id": "tracie-train-uniform-0273", "label": "entailment"}
{"id": "tracie-train-uniform-0274", "label": "entailment"}
{"id": "tracie-train-uniform-0275", "label": "entailment"}
{"id": "tracie-train-uniform-0276", "label": "entailment"}
{"id": "tracie-train-uniform-0277", "label": "entailment"}
{"id": "tracie-train-uniform-0278", "label": "entailment"}
{"id": "tracie-train-uniform-0279", "label": "entailment"}
{"id": "tracie-train-uniform-0280", "label": "entailment"}
{"id": "tracie-train-uniform-0281", "label": "entailment"}
{"id": "tracie-train-uniform-0282", "label": "entailment"}
{"id": "tracie-train-uniform-0283", "label": "entailment"}
{"id": "tracie-train-uniform-0284", "label": "entailment"}
{"id": "tracie-train-uniform-0285", "label": "entailment"}
{"id": "tracie-train-uniform-0286", "label": "entailment"}
{"id": "tracie-train-uniform-0287", "label": "entailment"}
{"id": "tracie-train-uniform-0288", "label": "entailment"}
{"id": "tracie-train-uniform-0289", "label": "entailment"}
{"id": "tracie-train-uniform-0290", "label": "entailment"}
{"id": "tracie-train-uniform-0291", "label": "entailment"}
{"id": "tracie-train-uniform-0292", "label": "entailment"}
{"id": "tracie-train-uniform-0293", "label": "entailment"}
{"id": "tracie-train-uniform-0294", "label": "entailment"}
{"id": "tracie-train-uniform-0295", "label": "entailment"}
{"id": "tracie-train-uniform-0296", "label": "entailment"}
{"id": "tracie-train-uniform-0297", "label": "entailment"}
{"id": "tracie-train-uniform-0298", "label": "entailment"}
{"id": "tracie-train-uniform-0299", "label": "entailment"}
{"id": "tracie-train-uniform-0300", "label": "entailment"}
{"id": "tracie-train-uniform-0301", "label": "entailment"}
{"id": "tracie-train-uniform-0302", "label": "entailment"}
{"id": "tracie-train-uniform-0303", "label": "entailment"}
{"id": "tracie-train-uniform-0304", "label": "entailment"}
{"id": "tracie-train-uniform-0305", "label": "entailment"}
{"id": "tracie-train-uniform-0306", "label": "entailment"}
{"id": "tracie-train-uniform-0307", "label": "entailment"}
{"id": "tracie-train-uniform-0308", "label": "entailment"}
{"id": "tracie-train-uniform-0309", "label": "entailment"}
{"id": "tracie-train-uniform-0310", "label": "entailment"}
{"id": "tracie-train-uniform-0311", "label": "entailment"}
{"id": "tracie-train-uniform-0312", "label": "entailment"}
{"id": "tracie-train-uniform-0313", "label": "entailment"}
{"id": "tracie-train-uniform-0314", "label": "entailment"}
{"id": "tracie-train-uniform-0315", "label": "entailment"}
{"id": "tracie-train-uniform-0316", "label": "entailment"}
{"id": "tracie-train-uniform-0317", "label": "entailment"}
{"id": "tracie-train-uniform-0318", "label": "entailment"}
{"id": "tracie-train-uniform-0319", "label": "entailment"}
{"id": "tracie-train-uniform-0320", "label": "entailment"}
{"id": "tracie-train-uniform-0321", "label": "entailment"}
{"id": "tracie-train-uniform-0322", "label": "entailment"}
{"id": "tracie-train-uniform-0323", "label": "entailment"}
{"id": "tracie-train-uniform-0324", "label": "entailment"}
{"id": "tracie-train-uniform-0325", "label": "entailment"}
{"id": "tracie-train-uniform-0326", "label": "entailment"}
{"id": "tracie-train-uniform-0327", "label": "entailment"}
{"id": "tracie-train-uniform-0328", "label": "entailment"}
{"id": "tracie-train-uniform-0329", "label": "entailment"}
{"id": "tracie-train-uniform-0330", "label": "entailment"}
{"id": "tracie-train-uniform-0331", "label": "entailment"}
{"id": "tracie-train-uniform-0332", "label": "entailment"}
{"id": "tracie-train-uniform-0333", "label": "entailment"}
{"id": "tracie-train-uniform-0334", "label": "entailment"}
{"id": "tracie-train-uniform-0335", "label": "entailment"}
{"id": "tracie-train-uniform-0336", "label": "entailment"}
{"id": "tracie-train-uniform-0337", "label": "entailment"}
{"id": "tracie-train-uniform-0338", "label": "entailment"}
{"id": "tracie-train-uniform-0339", "label": "entailment"}
{"id": "tracie-train-uniform-0340", "label": "entailment"}
{"id": "tracie-train-uniform-0341", "label": "entailment"}
{"id": "tracie-train-uniform-0342", "label": "entailment"}
{"id": "tracie-train-uniform-0343", "label": "entailment"}
{"id": "tracie-train-uniform-0344", "label": "entailment"}
{"id": "tracie-train-uniform-0345", "label": "entailment"}
{"id": "tracie-train-uniform-0346", "label": "entailment"}
{"id": "tracie-train-uniform-0347", "label": "entailment"}
{"id": "tracie-train-uniform-0348", "label": "entailment"}
{"id": "tracie-train-uniform-0349", "label": "entailment"}
{"id": "tracie-train-uniform-0350", "label": "entailment"}
{"id": "tracie-train-uniform-0351", "label": "entailment"}
{"id": "tracie-train-uniform-0352", "label": "entailment"}
{"id": "tracie-train-uniform-0353", "label": "entailment"}
{"id": "tracie-train-uniform-0354", "label": "entailment"}
{"id": "tracie-train-uniform-0355", "label": "entailment"}
{"id": "tracie-train-uniform-0356", "label": "entailment"}
{"id": "tracie-train-uniform-0357", "label": "entailment"}
{"id": "tracie-train-uniform-0358", "label": "entailment"}
{"id": "tracie-train-uniform-0359", "label": "entailment"}
{"id": "tracie-train-uniform-0360", "label": "entailment"}
{"id": "tracie-train-uniform-0361", "label": "entailment"}
{"id": "tracie-train-uniform-0362", "label": "entailment"}
{"id": "tracie-train-uniform-0363", "label": "entailment"}
{"id": "tracie-train-uniform-0364", "label": "entailment"}
{"id": "tracie-train-uniform-0365", "label": "entailment"}
{"id": "tracie-train-uniform-0366", "label": "entailment"}
{"id": "tracie-train-uniform-0367", "label": "entailment"}
{"id": "tracie-train-uniform-0368", "label": "entailment"}
{"id": "tracie-train-uniform-0369", "label": "entailment"}
{"id": "tracie-train-uniform-0370", "label": "entailment"}
{"id": "tracie-train-uniform-0371", "label": "entailment"}
{"id": "tracie-train-uniform-0372", "label": "entailment"}
{"id": "tracie-train-uniform-0373", "label": "entailment"}
{"id": "tracie-train-uniform-0374", "label": "entailment"}
{"id": "tracie-train-uniform-0375", "label": "entailment"}
{"id": "tracie-train-uniform-0376", "label": "entailment"}
{"id": "tracie-train-uniform-0377", "label": "entailment"}
{"id": "tracie-train-uniform-0378", "label": "entailment"}
{"id": "tracie-train-uniform-0379", "label": "entailment"}
{"id": "tracie-train-uniform-0380", "label": "entailment"}
{"id": "tracie-train-uniform-0381", "label": "entailment"}
{"id": "tracie-train-uniform-0382", "label": "entailment"}
{"id": "tracie-train-uniform-0383", "label": "entailment"}
{"id": "tracie-train-uniform-0384", "label": "entailment"}
{"id": "tracie-train-uniform-0385", "label": "entailment"}
{"id": "tracie-train-uniform-0386", "label": "entailment"}
{"id": "tracie-train-uniform-0387", "label": "entailment"}
{"id": "tracie-train-uniform-0388", "label": "entailment"}
{"id": "tracie-train-uniform-0389", "label": "entailment"}
{"id": "tracie-train-uniform-0390", "label": "entailment"}
{"id": "tracie-train-uniform-0391", "label": "entailment"}
{"id": "tracie-train-uniform-0392", "label": "entailment"}
{"id": "tracie-train-uniform-0393", "label": "entailment"}
{"id": "tracie-train-uniform-0394", "label": "entailment"}
{"id": "tracie-train-uniform-0395", "label": "entailment"}
{"id": "tracie-train-uniform-0396", "label": "entailment"}
{"id": "tracie-train-uniform-0397", "label": "entailment"}
{"id": "tracie-train-uniform-0398", "label": "entailment"}
{"id": "tracie-train-uniform-0399", "label": "entailment"}
{"id": "tracie-train-uniform-0400", "label": "entailment"}
{"id": "tracie-train-uniform-0401", "label": "entailment"}
{"id": "tracie-train-uniform-0402", "label": "entailment"}
{"id": "tracie-train-uniform-0403", "label": "entailment"}
{"id": "tracie-train-uniform-0404", "label": "entailment"}
{"id": "tracie-train-uniform-0405", "label": "entailment"}
{"id": "tracie-train-uniform-0406", "label": "entailment"}
{"id": "tracie-train-uniform-0407", "label": "entailment"}
{"id": "tracie-train-uniform-0408", "label": "entailment"}
{"id": "tracie-train-uniform-0409", "label": "entailment"}
{"id": "tracie-train-uniform-0410", "label": "entailment"}
{"id": "tracie-train-uniform-0411", "label": "entailment"}
{"id": "tracie-train-uniform-0412", "label": "entailment"}
{"id": "tracie-train-uniform-0413", "label": "entailment"}
{"id": "tracie-train-uniform-0414", "label": "entailment"}
{"id": "tracie-train-uniform-0415", "label": "entailment"}
{"id": "tracie-train-uniform-0416", "label": "entailment"}
{"id": "tracie-train-uniform-0417", "label": "entailment"}
{"id": "tracie-train-uniform-0418", "label": "entailment"}
{"id": "tracie-train-uniform-0419", "label": "entailment"}
{"id": "tracie-train-uniform-0420", "label": "entailment"}
{"id": "tracie-train-uniform-0421", "label": "entailment"}
{"id": "tracie-train-uniform-0422", "label": "entailment"}
{"id": "tracie-train-uniform-0423", "label": "entailment"}
{"id": "tracie-train-uniform-0424", "label": "entailment"}
{"id": "tracie-train-uniform-0425", "label": "entailment"}
{"id": "tracie-train-uniform-0426", "label": "entailment"}
{"id": "tracie-train-uniform-0427", "label": "entailment"}
{"id": "tracie-train-uniform-0428", "label": "entailment"}
{"id": "tracie-train-uniform-0429", "label": "entailment"}
{"id": "tracie-train-uniform-0430", "label": "entailment"}
{"id": "tracie-train-uniform-0431", "label": "entailment"}
{"id": "tracie-train-uniform-0432", "label": "entailment"}
{"id": "tracie-train-uniform-0433", "label": "entailment"}
{"id": "tracie-train-uniform-0434", "label": "entailment"}
{"id": "tracie-train-uniform-0435", "label": "entailment"}
{"id": "tracie-train-uniform-0436", "label": "entailment"}
{"id": "tracie-train-uniform-0437", "label": "entailment"}
{"id": "tracie-train-uniform-0438", "label": "entailment"}
{"id": "tracie-train-uniform-0439", "label": "entailment"}
{"id": "tracie-train-uniform-0440", "label": "entailment"}
{"id": "tracie-train-uniform-0441", "label": "entailment"}
{"id": "tracie-train-uniform-0442", "label": "entailment"}
{"id": "tracie-train-uniform-0443", "label": "entailment"}
{"id": "tracie-train-uniform-0444", "label": "entailment"}
{"id": "tracie-train-uniform-0445", "label": "entailment"}
{"id": "tracie-train-uniform-0446", "label": "entailment"}
{"id": "tracie-train-uniform-0447", "label": "entailment"}
{"id": "tracie-train-uniform-0448", "label": "entailment"}
{"id": "tracie-train-uniform-0449", "label": "entailment"}
{"id": "tracie-train-uniform-0450", "label": "entailment"}
{"id": "tracie-train-uniform-0451", "label": "entailment"}
{"id": "tracie-train-uniform-0452", "label": "entailment"}
{"id": "tracie-train-uniform-0453", "label": "entailment"}
{"id": "tracie-train-uniform-0454", "label": "entailment"}
{"id": "tracie-train-uniform-0455", "label": "entailment"}
{"id": "tracie-train-uniform-0456", "label": "entailment"}
{"id": "tracie-train-uniform-0457", "label": "entailment"}
{"id": "tracie-train-uniform-0458", "label": "entailment"}
{"id": "tracie-train-uniform-0459", "label": "entailment"}
{"id": "tracie-train-uniform-0460", "label": "entailment"}
{"id": "tracie-train-uniform-0461", "label": "entailment"}
{"id": "tracie-train-uniform-0462", "label": "entailment"}
{"id": "tracie-train-uniform-0463", "label": "entailment"}
{"id": "tracie-train-uniform-0464", "label": "entailment"}
{"id": "tracie-train-uniform-0465", "label": "entailment"}
{"id": "tracie-train-uniform-0466", "label": "entailment"}
{"id": "tracie-train-uniform-0467", "label": "entailment"}
{"id": "tracie-train-uniform-0468", "label": "entailment"}
{"id": "tracie-train-uniform-0469", "label": "entailment"}
{"id": "tracie-train-uniform-0470", "label": "entailment"}
{"id": "tracie-train-uniform-0471", "label": "entailment"}
{"id": "tracie-train-uniform-0472", "label": "entailment"}
{"id": "tracie-train-uniform-0473", "label": "entailment"}
{"id": "tracie-train-uniform-0474", "label": "entailment"}
{"id": "tracie-train-uniform-0475", "label": "entailment"}
{"id": "tracie-train-uniform-0476", "label": "entailment"}
{"id": "tracie-train-uniform-0477", "label": "entailment"}
{"id": "tracie-train-uniform-0478", "label": "entailment"}
{"id": "tracie-train-uniform-0479", "label": "entailment"}
{"id": "tracie-train-uniform-0480", "label": "entailment"}
{"id": "tracie-train-uniform-0481", "label": "entailment"}
{"id": "tracie-train-uniform-0482", "label": "entailment"}
{"id": "tracie-train-uniform-0483", "label": "entailment"}
{"id": "tracie-train-uniform-0484", "label": "entailment"}
{"id": "tracie-train-uniform-0485", "label": "entailment"}
{"id": "tracie-train-uniform-0486", "label": "entailment"}
{"id": "tracie-train-uniform-0487", "label": "entailment"}
{"id": "tracie-train-uniform-0488", "label": "entailment"}
{"id": "tracie-train-uniform-0489", "label": "entailment"}
{"id": "tracie-train-uniform-0490", "label": "entailment"}
{"id": "tracie-train-uniform-0491", "label": "entailment"}
{"id": "tracie-train-uniform-0492", "label": "entailment"}
{"id": "tracie-train-uniform-0493", "label": "entailment"}
{"id": "tracie-train-uniform-0494", "label": "entailment"}
{"id": "tracie-train-uniform-0495", "label": "entailment"}
{"id": "tracie-train-uniform-0496", "label": "entailment"}
{"id": "tracie-train-uniform-0497", "label": "entailment"}
{"id": "tracie-train-uniform-0498", "label": "entailment"}
{"id": "tracie-train-uniform-0499", "label": "entailment"}
{"id": "tracie-train-uniform-0500", "label": "entailment"}
{"id": "tracie-train-uniform-0501", "label": "entailment"}
{"id": "tracie-train-uniform-0502", "label": "entailment"}
{"id": "tracie-train-uniform-0503", "label": "entailment"}
{"id": "tracie-train-uniform-0504", "label": "entailment"}
{"id": "tracie-train-uniform-0505", "label": "entailment"}
{"id": "tracie-train-uniform-0506", "label": "entailment"}
{"id": "tracie-train-uniform-0507", "label": "entailment"}
{"id": "tracie-train-uniform-0508", "label": "entailment"}
{"id": "tracie-train-uniform-0509", "label": "entailment"}
{"id": "tracie-train-uniform-0510", "label": "entailment"}
{"id": "tracie-train-uniform-0511", "label": "entailment"}
{"id": "tracie-train-uniform-0512", "label": "entailment"}
{"id": "tracie-train-uniform-0513", "label": "entailment"}
{"id": "tracie-train-uniform-0514", "label": "entailment"}
{"id": "tracie-train-uniform-0515", "label": "entailment"}
{"id": "tracie-train-uniform-0516", "label": "entailment"}
{"id": "tracie-train-uniform-0517", "label": "entailment"}
{"id": "tracie-train-uniform-0518", "label": "entailment"}
{"id": "tracie-train-uniform-0519", "label": "entailment"}
{"id": "tracie-train-uniform-0520", "label": "entailment"}
{"id": "tracie-train-uniform-0521", "label": "entailment"}
{"id": "tracie-train-uniform-0522", "label": "entailment"}
{"id": "tracie-train-uniform-0523", "label": "entailment"}
{"id": "tracie-train-uniform-0524", "label": "entailment"}
{"id": "tracie-train-uniform-0525", "label": "entailment"}
{"id": "tracie-train-uniform-0526", "label": "entailment"}
{"id": "tracie-train-uniform-0527", "label": "entailment"}
{"id": "tracie-train-uniform-0528", "label": "entailment"}
{"id": "tracie-train-uniform-0529", "label": "entailment"}
{"id": "tracie-train-uniform-0530", "label": "entailment"}
{"id": "tracie-train-uniform-0531", "label": "entailment"}
{"id": "tracie-train-uniform-0532", "label": "entailment"}
{"id": "tracie-train-uniform-0533", "label": "entailment"}
{"id": "tracie-train-uniform-0534", "label": "entailment"}
{"id": "tracie-train-uniform-0535", "label": "entailment"}
{"id": "tracie-train-uniform-0536", "label": "entailment"}
{"id": "tracie-train-uniform-0537", "label": "entailment"}
{"id": "tracie-train-uniform-0538", "label": "entailment"}
{"id": "tracie-train-uniform-0539", "label": "entailment"}
{"id": "tracie-train-uniform-0540", "label": "entailment"}
{"id": "tracie-train-uniform-0541", "label": "entailment"}
{"id": "tracie-train-uniform-0542", "label": "entailment"}
{"id": "tracie-train-uniform-0543", "label": "entailment"}
{"id": "tracie-train-uniform-0544", "label": "entailment"}
{"id": "tracie-train-uniform-0545", "label": "entailment"}
{"id": "tracie-train-uniform-0546", "label": "entailment"}
{"id": "tracie-train-uniform-0547", "label": "entailment"}
{"id": "tracie-train-uniform-0548", "label": "entailment"}
{"id": "tracie-train-uniform-0549", "label": "entailment"}
{"id": "tracie-train-uniform-0550", "label": "entailment"}
{"id": "tracie-train-uniform-0551", "label": "entailment"}
{"id": "tracie-train-uniform-0552", "label": "entailment"}
{"id": "tracie-train-uniform-0553", "label": "entailment"}
{"id": "tracie-train-uniform-0554", "label": "entailment"}
{"id": "tracie-train-uniform-0555", "label": "entailment"}
{"id": "tracie-train-uniform-0556", "label": "entailment"}
{"id": "tracie-train-uniform-0557", "label": "entailment"}
{"id": "tracie-train-uniform-0558", "label": "entailment"}
{"id": "tracie-train-uniform-0559", "label": "entailment"}
{"id": "tracie-train-uniform-0560", "label": "entailment"}
{"id": "tracie-train-uniform-0561", "label": "entailment"}
{"id": "tracie-train-uniform-0562", "label": "entailment"}
{"id": "tracie-train-uniform-0563", "label": "entailment"}
{"id": "tracie-train-uniform-0564", "label": "entailment"}
{"id": "tracie-train-uniform-0565", "label": "entailment"}
{"id": "tracie-train-uniform-0566", "label": "entailment"}
{"id": "tracie-train-uniform-0567", "label": "entailment"}
{"id": "tracie-train-uniform-0568", "label": "entailment"}
{"id": "tracie-train-uniform-0569", "label": "entailment"}
{"id": "tracie-train-uniform-0570", "label": "entailment"}
{"id": "tracie-train-uniform-0571", "label": "entailment"}
{"id": "tracie-train-uniform-0572", "label": "entailment"}
{"id": "tracie-train-uniform-0573", "label": "entailment"}
{"id": "tracie-train-uniform-0574", "label": "entailment"}
{"id": "tracie-train-uniform-0575", "label": "entailment"}
{"id": "tracie-train-uniform-0576", "label": "entailment"}
{"id": "tracie-train-uniform-0577", "label": "entailment"}
{"id": "tracie-train-uniform-0578", "label": "entailment"}
{"id": "tracie-train-uniform-0579", "label": "entailment"}
{"id": "tracie-train-uniform-0580", "label": "entailment"}
{"id": "tracie-train-uniform-0581", "label": "entailment"}
{"id": "tracie-train-uniform-0582", "label": "entailment"}
{"id": "tracie-train-uniform-0583", "label": "entailment"}
{"id": "tracie-train-uniform-0584", "label": "entailment"}
{"id": "tracie-train-uniform-0585", "label": "entailment"}
{"id": "tracie-train-uniform-0586", "label": "entailment"}
{"id": "tracie-train-uniform-0587", "label": "entailment"}
{"id": "tracie-train-uniform-0588", "label": "entailment"}
{"id": "tracie-train-uniform-0589", "label": "entailment"}
{"id": "tracie-train-uniform-0590", "label": "entailment"}
{"id": "tracie-train-uniform-0591", "label": "entailment"}
{"id": "tracie-train-uniform-0592", "label": "entailment"}
{"id": "tracie-train-uniform-0593", "label": "entailment"}
{"id": "tracie-train-uniform-0594", "label": "entailment"}
{"id": "tracie-train-uniform-0595", "label": "entailment"}
{"id": "tracie-train-uniform-0596", "label": "entailment"}
{"id": "tracie-train-uniform-0597", "label": "entailment"}
{"id": "tracie-train-uniform-0598", "label": "entailment"}
{"id": "tracie-train-uniform-0599", "label": "entailment"}
{"id": "tracie-train-uniform-0600", "label": "entailment"}
{"id": "tracie-train-uniform-0601", "label": "entailment"}
{"id": "tracie-train-uniform-0602", "label": "entailment"}
{"id": "tracie-train-uniform-0603", "label": "entailment"}
{"id": "tracie-train-uniform-0604", "label": "entailment"}
{"id": "tracie-train-uniform-0605", "label": "entailment"}
{"id": "tracie-train-uniform-0606", "label": "entailment"}
{"id": "tracie-train-uniform-0607", "label": "entailment"}
{"id": "tracie-train-uniform-0608", "label": "entailment"}
{"id": "tracie-train-uniform-0609", "label": "entailment"}
{"id": "tracie-train-uniform-0610", "label": "entailment"}
{"id": "tracie-train-uniform-0611", "label": "entailment"}
{"id": "tracie-train-uniform-0612", "label": "entailment"}
{"id": "tracie-train-uniform-0613", "label": "entailment"}
{"id": "tracie-train-uniform-0614", "label": "entailment"}
{"id": "tracie-train-uniform-0615", "label": "entailment"}
{"id": "tracie-train-uniform-0616", "label": "entailment"}
{"id": "tracie-train-uniform-0617", "label": "entailment"}
{"id": "tracie-train-uniform-0618", "label": "entailment"}
{"id": "tracie-train-uniform-0619", "label": "entailment"}
{"id": "tracie-train-uniform-0620", "label": "entailment"}
{"id": "tracie-train-uniform-0621", "label": "entailment"}
{"id": "tracie-train-uniform-0622", "label": "entailment"}
{"id": "tracie-train-uniform-0623", "label": "entailment"}
{"id": "tracie-train-uniform-0624", "label": "entailment"}
{"id": "tracie-train-uniform-0625", "label": "entailment"}
{"id": "tracie-train-uniform-0626", "label": "entailment"}
{"id": "tracie-train-uniform-0627", "label": "entailment"}
{"id": "tracie-train-uniform-0628", "label": "entailment"}
{"id": "tracie-train-uniform-0629", "label": "entailment"}
{"id": "tracie-train-uniform-0630", "label": "entailment"}
{"id": "tracie-train-uniform-0631", "label": "entailment"}
{"id": "tracie-train-uniform-0632", "label": "entailment"}
{"id": "tracie-train-uniform-0633", "label": "entailment"}
{"id": "tracie-train-uniform-0634", "label": "entailment"}
{"id": "tracie-train-uniform-0635", "label": "entailment"}
{"id": "tracie-train-uniform-0636", "label": "entailment"}
{"id": "tracie-train-uniform-0637", "label": "entailment"}
{"id": "tracie-train-uniform-0638", "label": "entailment"}
{"id": "tracie-train-uniform-0639", "label": "entailment"}
{"id": "tracie-train-uniform-0640", "label": "entailment"}
{"id": "tracie-train-uniform-0641", "label": "entailment"}
{"id": "tracie-train-uniform-0642", "label": "entailment"}
{"id": "tracie-train-uniform-0643", "label": "entailment"}
{"id": "tracie-train-uniform-0644", "label": "entailment"}
{"id": "tracie-train-uniform-0645", "label": "entailment"}
{"id": "tracie-train-uniform-0646", "label": "entailment"}
{"id": "tracie-train-uniform-0647", "label": "entailment"}
{"id": "tracie-train-uniform-0648", "label": "entailment"}
{"id": "tracie-train-uniform-0649", "label": "entailment"}
{"id": "tracie-train-uniform-0650", "label": "entailment"}
{"id": "tracie-train-uniform-0651", "label": "entailment"}
{"id": "tracie-train-uniform-0652", "label": "entailment"}
{"id": "tracie-train-uniform-0653", "label": "entailment"}
{"id": "tracie-train-uniform-0654", "label": "entailment"}
{"id": "tracie-train-uniform-0655", "label": "entailment"}
{"id": "tracie-train-uniform-0656", "label": "entailment"}
{"id": "tracie-train-uniform-0657", "label": "entailment"}
{"id": "tracie-train-uniform-0658", "label": "entailment"}
{"id": "tracie-train-uniform-0659", "label": "entailment"}
{"id": "tracie-train-uniform-0660", "label": "entailment"}
{"id": "tracie-train-uniform-0661", "label": "entailment"}
{"id": "tracie-train-uniform-0662", "label": "entailment"}
{"id": "tracie-train-uniform-0663", "label": "entailment"}
{"id": "tracie-train-uniform-0664", "label": "entailment"}
{"id": "tracie-train-uniform-0665", "label": "entailment"}
{"id": "tracie-train-uniform-0666", "label": "entailment"}
{"id": "tracie-train-uniform-0667", "label": "entailment"}
{"id": "tracie-train-uniform-0668", "label": "entailment"}
{"id": "tracie-train-uniform-0669", "label": "entailment"}
{"id": "tracie-train-uniform-0670", "label": "entailment"}
{"id": "tracie-train-uniform-0671", "label": "entailment"}
{"id": "tracie-train-uniform-0672", "label": "entailment"}
{"id": "tracie-train-uniform-0673", "label": "entailment"}
{"id": "tracie-train-uniform-0674", "label": "entailment"}
{"id": "tracie-train-uniform-0675", "label": "entailment"}
{"id": "tracie-train-uniform-0676", "label": "entailment"}
{"id": "tracie-train-uniform-0677", "label": "entailment"}
{"id": "tracie-train-uniform-0678", "label": "entailment"}
{"id": "tracie-train-uniform-0679", "label": "entailment"}
{"id": "tracie-train-uniform-0680", "label": "entailment"}
{"id": "tracie-train-uniform-0681", "label": "entailment"}
{"id": "tracie-train-uniform-0682", "label": "entailment"}
{"id": "tracie-train-uniform-0683", "label": "entailment"}
{"id": "tracie-train-uniform-0684", "label": "entailment"}
{"id": "tracie-train-uniform-0685", "label": "entailment"}
{"id": "tracie-train-uniform-0686", "label": "entailment"}
{"id": "tracie-train-uniform-0687", "label": "entailment"}
{"id": "tracie-train-uniform-0688", "label": "entailment"}
{"id": "tracie-train-uniform-0689", "label": "entailment"}
{"id": "tracie-train-uniform-0690", "label": "entailment"}
{"id": "tracie-train-uniform-0691", "label": "entailment"}
{"id": "tracie-train-uniform-0692", "label": "entailment"}
{"id": "tracie-train-uniform-0693", "label": "entailment"}
{"id": "tracie-train-uniform-0694", "label": "entailment"}
{"id": "tracie-train-uniform-0695", "label": "entailment"}
{"id": "tracie-train-uniform-0696", "label": "entailment"}
{"id": "tracie-train-uniform-0697", "label": "entailment"}
{"id": "tracie-train-uniform-0698", "label": "entailment"}
{"id": "tracie-train-uniform-0699", "label": "entailment"}
{"id": "tracie-train-uniform-0700", "label": "entailment"}
{"id": "tracie-train-uniform-0701", "label": "entailment"}
{"id": "tracie-train-uniform-0702", "label": "entailment"}
{"id": "tracie-train-uniform-0703", "label": "entailment"}
{"id": "tracie-train-uniform-0704", "label": "entailment"}
{"id": "tracie-train-uniform-0705", "label": "entailment"}
{"id": "tracie-train-uniform-0706", "label": "entailment"}
{"id": "tracie-train-uniform-0707", "label": "entailment"}
{"id": "tracie-train-uniform-0708", "label": "entailment"}
{"id": "tracie-train-uniform-0709", "label": "entailment"}
{"id": "tracie-train-uniform-0710", "label": "entailment"}
{"id": "tracie-train-uniform-0711", "label": "entailment"}
{"id": "tracie-train-uniform-0712", "label": "entailment"}
{"id": "tracie-train-uniform-0713", "label": "entailment"}
{"id": "tracie-train-uniform-0714", "label": "entailment"}
{"id": "tracie-train-uniform-0715", "label": "entailment"}
{"id": "tracie-train-uniform-0716", "label": "entailment"}
{"id": "tracie-train-uniform-0717", "label": "entailment"}
{"id": "tracie-train-uniform-0718", "label": "entailment"}
{"id": "tracie-train-uniform-0719", "label": "entailment"}
{"id": "tracie-train-uniform-0720", "label": "entailment"}
{"id": "tracie-train-uniform-0721", "label": "entailment"}
{"id": "tracie-train-uniform-0722", "label": "entailment"}
{"id": "tracie-train-uniform-0723", "label": "entailment"}
{"id": "tracie-train-uniform-0724", "label": "entailment"}
{"id": "tracie-train-uniform-0725", "label": "entailment"}
{"id": "tracie-train-uniform-0726", "label": "entailment"}
{"id": "tracie-train-uniform-0727", "label": "entailment"}
{"id": "tracie-train-uniform-0728", "label": "entailment"}
{"id": "tracie-train-uniform-0729", "label": "entailment"}
{"id": "tracie-train-uniform-0730", "label": "entailment"}
{"id": "tracie-train-uniform-0731", "label": "entailment"}
{"id": "tracie-train-uniform-0732", "label": "entailment"}
{"id": "tracie-train-uniform-0733", "label": "entailment"}
{"id": "tracie-train-uniform-0734", "label": "entailment"}
{"id": "tracie-train-uniform-0735", "label": "entailment"}
{"id": "tracie-train-uniform-0736", "label": "entailment"}
{"id": "tracie-train-uniform-0737", "label": "entailment"}
{"id": "tracie-train-uniform-0738", "label": "entailment"}
{"id": "tracie-train-uniform-0739", "label": "entailment"}
{"id": "tracie-train-uniform-0740", "label": "entailment"}
{"id": "tracie-train-uniform-0741", "label": "entailment"}
{"id": "tracie-train-uniform-0742", "label": "entailment"}
{"id": "tracie-train-uniform-0743", "label": "entailment"}
{"id": "tracie-train-uniform-0744", "label": "entailment"}
{"id": "tracie-train-uniform-0745", "label": "entailment"}
{"id": "tracie-train-uniform-0746", "label": "entailment"}
{"id": "tracie-train-uniform-0747", "label": "entailment"}
{"id": "tracie-train-uniform-0748", "label": "entailment"}
{"id": "tracie-train-uniform-0749", "label": "entailment"}
{"id": "tracie-train-uniform-0750", "label": "entailment"}
{"id": "tracie-train-uniform-0751", "label": "entailment"}
{"id": "tracie-train-uniform-0752", "label": "entailment"}
{"id": "tracie-train-uniform-0753", "label": "entailment"}
{"id": "tracie-train-uniform-0754", "label": "entailment"}
{"id": "tracie-train-uniform-0755", "label": "entailment"}
{"id": "tracie-train-uniform-0756", "label": "entailment"}
{"id": "tracie-train-uniform-0757", "label": "entailment"}
{"id": "tracie-train-uniform-0758", "label": "entailment"}
{"id": "tracie-train-uniform-0759", "label": "entailment"}
{"id": "tracie-train-uniform-0760", "label": "entailment"}
{"id": "tracie-train-uniform-0761", "label": "entailment"}
{"id": "tracie-train-uniform-0762", "label": "entailment"}
{"id": "tracie-train-uniform-0763", "label": "entailment"}
{"id": "tracie-train-uniform-0764", "label": "entailment"}
{"id": "tracie-train-uniform-0765", "label": "entailment"}
{"id": "tracie-train-uniform-0766", "label": "entailment"}
{"id": "tracie-train-uniform-0767", "label": "entailment"}
{"id": "tracie-train-uniform-0768", "label": "entailment"}
{"id": "tracie-train-uniform-0769", "label": "entailment"}
{"id": "tracie-train-uniform-0770", "label": "entailment"}
{"id": "tracie-train-uniform-0771", "label": "entailment"}
{"id": "tracie-train-uniform-0772", "label": "entailment"}
{"id": "tracie-train-uniform-0773", "label": "entailment"}
{"id": "tracie-train-uniform-0774", "label": "entailment"}
{"id": "tracie-train-uniform-0775", "label": "entailment"}
{"id": "tracie-train-uniform-0776", "label": "entailment"}
{"id": "tracie-train-uniform-0777", "label": "entailment"}
{"id": "tracie-train-uniform-0778", "label": "entailment"}
{"id": "tracie-train-uniform-0779", "label": "entailment"}
{"id": "tracie-train-uniform-0780", "label": "entailment"}
{"id": "tracie-train-uniform-0781", "label": "entailment"}
{"id": "tracie-train-uniform-0782", "label": "entailment"}
{"id": "tracie-train-uniform-0783", "label": "entailment"}
{"id": "tracie-train-uniform-0784", "label": "entailment"}
{"id": "tracie-train-uniform-0785", "label": "entailment"}
{"id": "tracie-train-uniform-0786", "label": "entailment"}
{"id": "tracie-train-uniform-0787", "label": "entailment"}
{"id": "tracie-train-uniform-0788", "label": "entailment"}
{"id": "tracie-train-uniform-0789", "label": "entailment"}
{"id": "tracie-train-uniform-0790", "label": "entailment"}
{"id": "tracie-train-uniform-0791", "label": "entailment"}
{"id": "tracie-train-uniform-0792", "label": "entailment"}
{"id": "tracie-train-uniform-0793", "label": "entailment"}
{"id": "tracie-train-uniform-0794", "label": "entailment"}
{"id": "tracie-train-uniform-0795", "label": "entailment"}
{"id": "tracie-train-uniform-0796", "label": "entailment"}
{"id": "tracie-train-uniform-0797", "label": "entailment"}
{"id": "tracie-train-uniform-0798", "label": "entailment"}
{"id": "tracie-train-uniform-0799", "label": "entailment"}
{"id": "tracie-train-uniform-0800", "label": "entailment"}
{"id": "tracie-train-uniform-0801", "label": "entailment"}
{"id": "tracie-train-uniform-0802", "label": "entailment"}
{"id": "tracie-train-uniform-0803", "label": "entailment"}
{"id": "tracie-train-uniform-0804", "label": "entailment"}
{"id": "tracie-train-uniform-0805", "label": "entailment"}
{"id": "tracie-train-uniform-0806", "label": "entailment"}
{"id": "tracie-train-uniform-0807", "label": "entailment"}
{"id": "tracie-train-uniform-0808", "label": "entailment"}
{"id": "tracie-train-uniform-0809", "label": "entailment"}
{"id": "tracie-train-uniform-0810", "label": "entailment"}
{"id": "tracie-train-uniform-0811", "label": "entailment"}
{"id": "tracie-train-uniform-0812", "label": "entailment"}
{"id": "tracie-train-uniform-0813", "label": "entailment"}
{"id": "tracie-train-uniform-0814", "label": "entailment"}
{"id": "tracie-train-uniform-0815", "label": "entailment"}
{"id": "tracie-train-uniform-0816", "label": "entailment"}
{"id": "tracie-train-uniform-0817", "label": "entailment"}
{"id": "tracie-train-uniform-0818", "label": "entailment"}
{"id": "tracie-train-uniform-0819", "label": "entailment"}
{"id": "tracie-train-uniform-0820", "label": "entailment"}
{"id": "tracie-train-uniform-0821", "label": "entailment"}
{"id": "tracie-train-uniform-0822", "label": "entailment"}
{"id": "tracie-train-uniform-0823", "label": "entailment"}
{"id": "tracie-train-uniform-0824", "label": "entailment"}
{"id": "tracie-train-uniform-0825", "label": "entailment"}
{"id": "tracie-train-uniform-0826", "label": "entailment"}
{"id": "tracie-train-uniform-0827", "label": "entailment"}
{"id": "tracie-train-uniform-0828", "label": "entailment"}
{"id": "tracie-train-uniform-0829", "label": "entailment"}
{"id": "tracie-train-uniform-0830", "label": "entailment"}
{"id": "tracie-train-uniform-0831", "label": "entailment"}
{"id": "tracie-train-uniform-0832", "label": "entailment"}
{"id": "tracie-train-uniform-0833", "label": "entailment"}
{"id": "tracie-train-uniform-0834", "label": "entailment"}
{"id": "tracie-train-uniform-0835", "label": "entailment"}
{"id": "tracie-train-uniform-0836", "label": "entailment"}
{"id": "tracie-train-uniform-0837", "label": "entailment"}
{"id": "tracie-train-uniform-0838", "label": "entailment"}
{"id": "tracie-train-uniform-0839", "label": "entailment"}
{"id": "tracie-train-uniform-0840", "label": "entailment"}
{"id": "tracie-train-uniform-0841", "label": "entailment"}
{"id": "tracie-train-uniform-0842", "label": "entailment"}
{"id": "tracie-train-uniform-0843", "label": "entailment"}
{"id": "tracie-train-uniform-0844", "label": "entailment"}
{"id": "tracie-train-uniform-0845", "label": "entailment"}
{"id": "tracie-train-uniform-0846", "label": "entailment"}
{"id": "tracie-train-uniform-0847", "label": "entailment"}
{"id": "tracie-train-uniform-0848", "label": "entailment"}
{"id": "tracie-train-uniform-0849", "label": "entailment"}
{"id": "tracie-train-uniform-0850", "label": "entailment"}
{"id": "tracie-train-uniform-0851", "label": "entailment"}
{"id": "tracie-train-uniform-0852", "label": "entailment"}
{"id": "tracie-train-uniform-0853", "label": "entailment"}
{"id": "tracie-train-uniform-0854", "label": "entailment"}
{"id": "tracie-train-uniform-0855", "label": "entailment"}
{"id": "tracie-train-uniform-0856", "label": "entailment"}
{"id": "tracie-train-uniform-0857", "label": "entailment"}
{"id": "tracie-train-uniform-0858", "label": "entailment"}
{"id": "tracie-train-uniform-0859", "label": "entailment"}
|
ContextualSP/lemon/propara_evaluator/aristo-leaderboard/tracie/data/predictions.jsonl/0
|
{
"file_path": "ContextualSP/lemon/propara_evaluator/aristo-leaderboard/tracie/data/predictions.jsonl",
"repo_id": "ContextualSP",
"token_count": 21493
}
| 247 |
#!/bin/bash
GPU_NUM=16
python -m torch.distributed.launch --nproc_per_node=${GPU_NUM} hf_generation_multi_es.py \
--model_name_or_path $1 \
--output_dir $2 \
--data_dir $3 \
--train_file $4 \
--validation_file $5 \
--per_device_train_batch_size $6 \
--gradient_accumulation_steps $7 \
--learning_rate $8 \
--num_train_epochs $9 \
--seed ${10} \
--remove_unused_columns False \
--num_beams ${17} \
--save_strategy epoch \
--evaluation_strategy no \
--logging_steps 200 \
--max_train_samples ${11} \
--max_predict_samples ${12} \
--predict_with_generate \
--do_predict ${13} \
--test_file ${14} \
--do_eval False \
--do_train ${15} \
--max_eval_samples 16 \
--prediction_mode ${16} \
--gan_alpha ${18} \
--per_device_eval_batch_size ${19} \
--overwrite_cache\
--overwrite_output_dir
|
ContextualSP/logigan/pre-training/run_hf.sh/0
|
{
"file_path": "ContextualSP/logigan/pre-training/run_hf.sh",
"repo_id": "ContextualSP",
"token_count": 403
}
| 248 |
# Local path to the dataset (after it has been downloaded).
dataset_local_path="./data/dataset.json"
if [[ ! -f "${dataset_local_path}" ]]; then
echo "ERROR: Dataset not found."
echo "Please download the dataset first from ${dataset_url}!"
echo "See further instructions in the README."
exit 1
fi
# preprocess data from raw CFQ dataset.json
python -m preprocess_cfq --dataset_path="${dataset_local_path}" \
--split_path="./data/splits/mcd1.json" --save_path="./data/mcd1/"
python -m preprocess_cfq --dataset_path="${dataset_local_path}" \
--split_path="./data/splits/mcd2.json" --save_path="./data/mcd2/"
python -m preprocess_cfq --dataset_path="${dataset_local_path}" \
--split_path="./data/splits/mcd3.json" --save_path="./data/mcd3/"
# preprocess data for sketch
python preprocess_hierarchical_training.py
# preprocess data for primi
|
ContextualSP/poset_decoding/preprocess.sh/0
|
{
"file_path": "ContextualSP/poset_decoding/preprocess.sh",
"repo_id": "ContextualSP",
"token_count": 325
}
| 249 |
language: python
cache: pip
sudo: true
env:
global:
- PYTHONPATH=$PYTHONPATH:$TRAVIS_BUILD_DIR/tests:$TRAVIS_BUILD_DIR/matchzoo
matrix:
allow_failures:
- os: osx
include:
- os: linux
dist: xenial
python: 3.6
- os: osx
osx_image: xcode10.2
language: shell
install:
- pip3 install --progress-bar off -r requirements.txt
- python3 -m nltk.downloader punkt
- python3 -m nltk.downloader wordnet
- python3 -m nltk.downloader stopwords
script:
- stty cols 80
- export COLUMNS=80
- if [ "$TRAVIS_EVENT_TYPE" == "pull_request" ]; then make push; fi
- if [ "$TRAVIS_EVENT_TYPE" == "push" ]; then make push; fi
- if [ "$TRAVIS_EVENT_TYPE" == "cron" ]; then make cron; fi
after_success:
- codecov
|
ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/.travis.yml/0
|
{
"file_path": "ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/.travis.yml",
"repo_id": "ContextualSP",
"token_count": 328
}
| 250 |
from pathlib import Path
USER_DIR = Path.expanduser(Path('~')).joinpath('.matchzoo')
if not USER_DIR.exists():
USER_DIR.mkdir()
USER_DATA_DIR = USER_DIR.joinpath('datasets')
if not USER_DATA_DIR.exists():
USER_DATA_DIR.mkdir()
USER_TUNED_MODELS_DIR = USER_DIR.joinpath('tuned_models')
from .version import __version__
from .data_pack import DataPack
from .data_pack import pack
from .data_pack import load_data_pack
from . import preprocessors
from . import dataloader
from .preprocessors.chain_transform import chain_transform
from . import auto
from . import tasks
from . import metrics
from . import losses
from . import engine
from . import models
from . import trainers
from . import embedding
from . import datasets
from . import modules
from .engine import hyper_spaces
from .engine.base_preprocessor import load_preprocessor
from .engine.param import Param
from .engine.param_table import ParamTable
from .embedding.embedding import Embedding
from .preprocessors.build_unit_from_data_pack import build_unit_from_data_pack
from .preprocessors.build_vocab_unit import build_vocab_unit
|
ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/matchzoo/__init__.py/0
|
{
"file_path": "ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/matchzoo/__init__.py",
"repo_id": "ContextualSP",
"token_count": 350
}
| 251 |
import typing
from collections import Iterable
import numpy as np
from matchzoo.engine.base_callback import BaseCallback
def _infer_dtype(value):
"""Infer the dtype for the features.
It is required as the input is usually array of objects before padding.
"""
while isinstance(value, (list, tuple)) and len(value) > 0:
value = value[0]
if not isinstance(value, Iterable):
return np.array(value).dtype
if value is not None and len(value) > 0 and np.issubdtype(
np.array(value).dtype, np.generic):
dtype = np.array(value[0]).dtype
else:
dtype = value.dtype
# Single Precision
if dtype == np.double:
dtype = np.float32
return dtype
def _padding_2D(input, output, mode: str = 'pre'):
"""
Pad the input 2D-tensor to the output 2D-tensor.
:param input: The input 2D-tensor contains the origin values.
:param output: The output is a shapped 2D-tensor which have filled with pad
value.
:param mode: The padding model, which can be 'pre' or 'post'.
"""
batch_size = min(output.shape[0], len(input))
pad_length = output.shape[1]
if mode == 'post':
for i in range(batch_size):
end_pos = min(len(input[i]), pad_length)
if end_pos > 0:
output[i][:end_pos] = input[i][:end_pos]
elif mode == 'pre':
for i in range(batch_size):
start_pos = min(len(input[i]), pad_length)
if start_pos > 0:
output[i][-start_pos:] = input[i][-start_pos:]
else:
raise ValueError('{} is not a vaild pad mode.'.format(mode))
def _padding_3D(input, output, mode: str = 'pre'):
"""
Pad the input 3D-tensor to the output 3D-tensor.
:param input: The input 3D-tensor contains the origin values.
:param output: The output is a shapped 3D-tensor which have filled with pad
value.
:param mode: The padding model, which can be 'pre' or 'post'.
"""
batch_size = min(output.shape[0], len(input))
pad_1d_length = output.shape[1]
pad_2d_length = output.shape[2]
if mode == 'post':
for i in range(batch_size):
len_d1 = min(len(input[i]), pad_1d_length)
for j in range(len_d1):
end_pos = min(len(input[i][j]), pad_2d_length)
if end_pos > 0:
output[i][j][:end_pos] = input[i][j][:end_pos]
elif mode == 'pre':
for i in range(batch_size):
len_d1 = min(len(input[i]), pad_1d_length)
for j in range(len_d1):
start_pos = min(len(input[i][j]), pad_2d_length)
if start_pos > 0:
output[i][j][-start_pos:] = input[i][j][-start_pos:]
else:
raise ValueError('{} is not a vaild pad mode.'.format(mode))
class BasicPadding(BaseCallback):
"""
Pad data for basic preprocessor.
:param fixed_length_left: Integer. If set, `text_left` will be padded
to this length.
:param fixed_length_right: Integer. If set, `text_right` will be padded
to this length.
:param pad_word_value: the value to fill text.
:param pad_word_mode: String, `pre` or `post`:
pad either before or after each sequence.
:param with_ngram: Boolean. Whether to pad the n-grams.
:param fixed_ngram_length: Integer. If set, each word will be padded to
this length, or it will be set as the maximum length of words in
current batch.
:param pad_ngram_value: the value to fill empty n-grams.
:param pad_ngram_mode: String, `pre` or `post`: pad either before of after
each sequence.
"""
def __init__(
self,
fixed_length_left: int = None,
fixed_length_right: int = None,
pad_word_value: typing.Union[int, str] = 0,
pad_word_mode: str = 'pre',
with_ngram: bool = False,
fixed_ngram_length: int = None,
pad_ngram_value: typing.Union[int, str] = 0,
pad_ngram_mode: str = 'pre'
):
"""Init."""
self._fixed_length_left = fixed_length_left
self._fixed_length_right = fixed_length_right
self._pad_word_value = pad_word_value
self._pad_word_mode = pad_word_mode
self._with_ngram = with_ngram
self._fixed_ngram_length = fixed_ngram_length
self._pad_ngram_value = pad_ngram_value
self._pad_ngram_mode = pad_ngram_mode
def on_batch_unpacked(self, x: dict, y: np.ndarray):
"""Pad `x['text_left']` and `x['text_right]`."""
batch_size = len(x['id_left'])
pad_length_left = int(max(x['length_left']))
pad_length_right = int(max(x['length_right']))
if self._with_ngram:
ngram_length_left = max([len(w)
for k in x['ngram_left'] for w in k])
ngram_length_right = max([len(w)
for k in x['ngram_right'] for w in k])
ngram_length = max(ngram_length_left, ngram_length_right)
if self._fixed_ngram_length:
ngram_length = self._fixed_ngram_length
if self._fixed_length_left is not None:
pad_length_left = self._fixed_length_left
if self._fixed_length_right is not None:
pad_length_right = self._fixed_length_right
for key, value in x.items():
dtype = _infer_dtype(value)
if key == 'text_left':
padded_value = np.full([batch_size, pad_length_left],
self._pad_word_value, dtype=dtype)
_padding_2D(value, padded_value, self._pad_word_mode)
elif key == 'text_right':
padded_value = np.full([batch_size, pad_length_right],
self._pad_word_value, dtype=dtype)
_padding_2D(value, padded_value, self._pad_word_mode)
elif key == 'ngram_left':
padded_value = np.full(
[batch_size, pad_length_left, ngram_length],
self._pad_ngram_value, dtype=dtype
)
_padding_3D(value, padded_value, self._pad_ngram_mode)
elif key == 'ngram_right':
padded_value = np.full(
[batch_size, pad_length_right, ngram_length],
self._pad_ngram_value, dtype=dtype
)
_padding_3D(value, padded_value, self._pad_ngram_mode)
else:
continue
x[key] = padded_value
class DRMMPadding(BaseCallback):
"""
Pad data for DRMM Model.
:param fixed_length_left: Integer. If set, `text_left` and
`match_histogram` will be padded to this length.
:param fixed_length_right: Integer. If set, `text_right` will be padded
to this length.
:param pad_value: the value to fill text.
:param pad_mode: String, `pre` or `post`:
pad either before or after each sequence.
"""
def __init__(
self,
fixed_length_left: int = None,
fixed_length_right: int = None,
pad_value: typing.Union[int, str] = 0,
pad_mode: str = 'pre',
):
"""Init."""
self._fixed_length_left = fixed_length_left
self._fixed_length_right = fixed_length_right
self._pad_value = pad_value
self._pad_mode = pad_mode
def on_batch_unpacked(self, x: dict, y: np.ndarray):
"""
Padding.
Pad `x['text_left']`, `x['text_right]` and `x['match_histogram']`.
"""
batch_size = len(x['id_left'])
pad_length_left = max(x['length_left'])
pad_length_right = max(x['length_right'])
bin_size = len(x['match_histogram'][0][0])
if self._fixed_length_left is not None:
pad_length_left = self._fixed_length_left
if self._fixed_length_right is not None:
pad_length_right = self._fixed_length_right
for key, value in x.items():
if key != 'text_left' and key != 'text_right' and \
key != 'match_histogram':
continue
dtype = _infer_dtype(value)
if key == 'text_left':
padded_value = np.full([batch_size, pad_length_left],
self._pad_value, dtype=dtype)
_padding_2D(value, padded_value, self._pad_mode)
elif key == 'text_right':
padded_value = np.full([batch_size, pad_length_right],
self._pad_value, dtype=dtype)
_padding_2D(value, padded_value, self._pad_mode)
else: # key == 'match_histogram'
padded_value = np.full(
[batch_size, pad_length_left, bin_size],
self._pad_value, dtype=dtype)
_padding_3D(value, padded_value, self._pad_mode)
x[key] = padded_value
class BertPadding(BaseCallback):
"""
Pad data for bert preprocessor.
:param fixed_length_left: Integer. If set, `text_left` will be padded
to this length.
:param fixed_length_right: Integer. If set, `text_right` will be padded
to this length.
:param pad_value: the value to fill text.
:param pad_mode: String, `pre` or `post`:
pad either before or after each sequence.
"""
def __init__(
self,
fixed_length_left: int = None,
fixed_length_right: int = None,
pad_value: typing.Union[int, str] = 0,
pad_mode: str = 'pre',
):
"""Init."""
self._padding = BasicPadding(fixed_length_left=fixed_length_left,
fixed_length_right=fixed_length_right,
pad_word_value=pad_value,
pad_word_mode=pad_mode)
def on_batch_unpacked(self, x: dict, y: np.ndarray):
"""Pad `x['text_left']` and `x['text_right]`."""
self._padding.on_batch_unpacked(x, y)
x['text_left'] = np.insert(x['text_left'], 0, 101, axis=1)
x['text_left'] = np.insert(x['text_left'], x['text_left'][0].size, 102, axis=1)
x['text_right'] = np.insert(x['text_right'], x['text_right'][0].size, 102, axis=1)
|
ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/matchzoo/dataloader/callbacks/padding.py/0
|
{
"file_path": "ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/matchzoo/dataloader/callbacks/padding.py",
"repo_id": "ContextualSP",
"token_count": 4960
}
| 252 |
from .load_data import load_data
|
ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/matchzoo/datasets/quora_qp/__init__.py/0
|
{
"file_path": "ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/matchzoo/datasets/quora_qp/__init__.py",
"repo_id": "ContextualSP",
"token_count": 10
}
| 253 |
"""Base Model."""
import abc
import typing
from pathlib import Path
import numpy as np
import torch
import torch.nn as nn
from matchzoo.utils import parse_activation
from matchzoo.engine.base_callback import BaseCallback
from matchzoo.engine import hyper_spaces
from matchzoo.engine.base_preprocessor import BasePreprocessor
from matchzoo.engine.param_table import ParamTable
from matchzoo.engine.param import Param
from matchzoo.dataloader import callbacks
from matchzoo import preprocessors
from matchzoo import tasks
class BaseModel(nn.Module, abc.ABC):
"""
Abstract base class of all MatchZoo models.
MatchZoo models are wrapped over pytorch models. `params` is a set of model
hyper-parameters that deterministically builds a model. In other words,
`params['model_class'](params=params)` of the same `params` always create
models with the same structure.
:param params: Model hyper-parameters. (default: return value from
:meth:`get_default_params`)
Example:
>>> BaseModel() # doctest: +ELLIPSIS
Traceback (most recent call last):
...
TypeError: Can't instantiate abstract class BaseModel ...
>>> class MyModel(BaseModel):
... def build(self):
... pass
... def forward(self):
... pass
>>> isinstance(MyModel(), BaseModel)
True
"""
def __init__(
self,
params: typing.Optional[ParamTable] = None
):
"""Init."""
super().__init__()
self._params = params or self.get_default_params()
@classmethod
def get_default_params(
cls,
with_embedding=False,
with_multi_layer_perceptron=False
) -> ParamTable:
"""
Model default parameters.
The common usage is to instantiate :class:`matchzoo.engine.ModelParams`
first, then set the model specific parametrs.
Examples:
>>> class MyModel(BaseModel):
... def build(self):
... print(self._params['num_eggs'], 'eggs')
... print('and', self._params['ham_type'])
... def forward(self, greeting):
... print(greeting)
...
... @classmethod
... def get_default_params(cls):
... params = ParamTable()
... params.add(Param('num_eggs', 512))
... params.add(Param('ham_type', 'Parma Ham'))
... return params
>>> my_model = MyModel()
>>> my_model.build()
512 eggs
and Parma Ham
>>> my_model('Hello MatchZoo!')
Hello MatchZoo!
Notice that all parameters must be serialisable for the entire model
to be serialisable. Therefore, it's strongly recommended to use python
native data types to store parameters.
:return: model parameters
"""
params = ParamTable()
params.add(Param(
name='model_class', value=cls,
desc="Model class. Used internally for save/load. "
"Changing this may cause unexpected behaviors."
))
params.add(Param(
name='task',
desc="Decides model output shape, loss, and metrics."
))
params.add(Param(
name='out_activation_func', value=None,
desc="Activation function used in output layer."
))
if with_embedding:
params.add(Param(
name='with_embedding', value=True,
desc="A flag used help `auto` module. Shouldn't be changed."
))
params.add(Param(
name='embedding',
desc='FloatTensor containing weights for the Embedding.',
validator=lambda x: isinstance(x, np.ndarray)
))
params.add(Param(
name='embedding_input_dim',
desc='Usually equals vocab size + 1. Should be set manually.'
))
params.add(Param(
name='embedding_output_dim',
desc='Should be set manually.'
))
params.add(Param(
name='padding_idx', value=0,
desc='If given, pads the output with the embedding vector at'
'padding_idx (initialized to zeros) whenever it encounters'
'the index.'
))
params.add(Param(
name='embedding_freeze', value=False,
desc='`True` to freeze embedding layer training, '
'`False` to enable embedding parameters.'
))
if with_multi_layer_perceptron:
params.add(Param(
name='with_multi_layer_perceptron', value=True,
desc="A flag of whether a multiple layer perceptron is used. "
"Shouldn't be changed."
))
params.add(Param(
name='mlp_num_units', value=128,
desc="Number of units in first `mlp_num_layers` layers.",
hyper_space=hyper_spaces.quniform(8, 256, 8)
))
params.add(Param(
name='mlp_num_layers', value=3,
desc="Number of layers of the multiple layer percetron.",
hyper_space=hyper_spaces.quniform(1, 6)
))
params.add(Param(
name='mlp_num_fan_out', value=64,
desc="Number of units of the layer that connects the multiple "
"layer percetron and the output.",
hyper_space=hyper_spaces.quniform(4, 128, 4)
))
params.add(Param(
name='mlp_activation_func', value='relu',
desc='Activation function used in the multiple '
'layer perceptron.'
))
return params
def guess_and_fill_missing_params(self, verbose=1):
"""
Guess and fill missing parameters in :attr:`params`.
Use this method to automatically fill-in other hyper parameters.
This involves some guessing so the parameter it fills could be
wrong. For example, the default task is `Ranking`, and if we do not
set it to `Classification` manaully for data packs prepared for
classification, then the shape of the model output and the data will
mismatch.
:param verbose: Verbosity.
"""
self._params.get('task').set_default(tasks.Ranking(), verbose)
if 'with_embedding' in self._params:
self._params.get('embedding_input_dim').set_default(300, verbose)
self._params.get('embedding_output_dim').set_default(300, verbose)
def _set_param_default(self, name: str,
default_val: str, verbose: int = 0):
if self._params[name] is None:
self._params[name] = default_val
if verbose:
print(f"Parameter \"{name}\" set to {default_val}.")
@classmethod
def get_default_preprocessor(
cls,
truncated_mode: str = 'pre',
truncated_length_left: typing.Optional[int] = None,
truncated_length_right: typing.Optional[int] = None,
filter_mode: str = 'df',
filter_low_freq: float = 1,
filter_high_freq: float = float('inf'),
remove_stop_words: bool = False,
ngram_size: typing.Optional[int] = None,
) -> BasePreprocessor:
"""
Model default preprocessor.
The preprocessor's transform should produce a correctly shaped data
pack that can be used for training.
:return: Default preprocessor.
"""
return preprocessors.BasicPreprocessor(
truncated_mode=truncated_mode,
truncated_length_left=truncated_length_left,
truncated_length_right=truncated_length_right,
filter_mode=filter_mode,
filter_low_freq=filter_low_freq,
filter_high_freq=filter_high_freq,
remove_stop_words=remove_stop_words,
ngram_size=ngram_size
)
@classmethod
def get_default_padding_callback(
cls,
fixed_length_left: int = None,
fixed_length_right: int = None,
pad_word_value: typing.Union[int, str] = 0,
pad_word_mode: str = 'pre',
with_ngram: bool = False,
fixed_ngram_length: int = None,
pad_ngram_value: typing.Union[int, str] = 0,
pad_ngram_mode: str = 'pre'
) -> BaseCallback:
"""
Model default padding callback.
The padding callback's on_batch_unpacked would pad a batch of data to
a fixed length.
:return: Default padding callback.
"""
return callbacks.BasicPadding(
fixed_length_left=fixed_length_left,
fixed_length_right=fixed_length_right,
pad_word_value=pad_word_value,
pad_word_mode=pad_word_mode,
with_ngram=with_ngram,
fixed_ngram_length=fixed_ngram_length,
pad_ngram_value=pad_ngram_value,
pad_ngram_mode=pad_ngram_mode
)
@property
def params(self) -> ParamTable:
""":return: model parameters."""
return self._params
@params.setter
def params(self, val):
self._params = val
@abc.abstractmethod
def build(self):
"""Build model, each subclass need to implement this method."""
raise NotImplementedError(
"Build method not implemented in the subclass."
)
@abc.abstractmethod
def forward(self, *input):
"""
Defines the computation performed at every call.
Should be overridden by all subclasses.
"""
raise NotImplementedError(
"Forward method not implemented in the subclass."
)
def _make_embedding_layer(
self,
num_embeddings: int = 0,
embedding_dim: int = 0,
freeze: bool = True,
embedding: typing.Optional[np.ndarray] = None,
**kwargs
) -> nn.Module:
""":return: an embedding module."""
if isinstance(embedding, np.ndarray):
return nn.Embedding.from_pretrained(
embeddings=torch.Tensor(embedding),
freeze=freeze
)
else:
return nn.Embedding(
num_embeddings=num_embeddings,
embedding_dim=embedding_dim
)
def _make_default_embedding_layer(
self,
**kwargs
) -> nn.Module:
""":return: an embedding module."""
if isinstance(self._params['embedding'], np.ndarray):
self._params['embedding_input_dim'] = (
self._params['embedding'].shape[0]
)
self._params['embedding_output_dim'] = (
self._params['embedding'].shape[1]
)
return nn.Embedding.from_pretrained(
embeddings=torch.Tensor(self._params['embedding']),
freeze=self._params['embedding_freeze'],
padding_idx=self._params['padding_idx']
)
else:
return nn.Embedding(
num_embeddings=self._params['embedding_input_dim'],
embedding_dim=self._params['embedding_output_dim'],
padding_idx=self._params['padding_idx']
)
def _make_output_layer(
self,
in_features: int = 0
) -> nn.Module:
""":return: a correctly shaped torch module for model output."""
task = self._params['task']
if isinstance(task, tasks.Classification):
out_features = task.num_classes
elif isinstance(task, tasks.Ranking):
out_features = 1
else:
raise ValueError(f"{task} is not a valid task type. "
f"Must be in `Ranking` and `Classification`.")
if self._params['out_activation_func']:
return nn.Sequential(
nn.Linear(in_features, out_features),
parse_activation(self._params['out_activation_func'])
)
else:
return nn.Linear(in_features, out_features)
def _make_perceptron_layer(
self,
in_features: int = 0,
out_features: int = 0,
activation: nn.Module = nn.ReLU()
) -> nn.Module:
""":return: a perceptron layer."""
return nn.Sequential(
nn.Linear(in_features, out_features),
activation
)
def _make_multi_layer_perceptron_layer(self, in_features) -> nn.Module:
""":return: a multiple layer perceptron."""
if not self._params['with_multi_layer_perceptron']:
raise AttributeError(
'Parameter `with_multi_layer_perception` not set.')
activation = parse_activation(self._params['mlp_activation_func'])
mlp_sizes = [
in_features,
*self._params['mlp_num_layers'] * [self._params['mlp_num_units']],
self._params['mlp_num_fan_out']
]
mlp = [
self._make_perceptron_layer(in_f, out_f, activation)
for in_f, out_f in zip(mlp_sizes, mlp_sizes[1:])
]
return nn.Sequential(*mlp)
|
ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/matchzoo/engine/base_model.py/0
|
{
"file_path": "ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/matchzoo/engine/base_model.py",
"repo_id": "ContextualSP",
"token_count": 6350
}
| 254 |
"""Normalized discounted cumulative gain metric for ranking."""
import numpy as np
from matchzoo.engine.base_metric import (
BaseMetric, sort_and_couple, RankingMetric
)
from .discounted_cumulative_gain import DiscountedCumulativeGain
class NormalizedDiscountedCumulativeGain(RankingMetric):
"""Normalized discounted cumulative gain metric."""
ALIAS = ['normalized_discounted_cumulative_gain', 'ndcg']
def __init__(self, k: int = 1, threshold: float = 0.):
"""
:class:`NormalizedDiscountedCumulativeGain` constructor.
:param k: Number of results to consider
:param threshold: the label threshold of relevance degree.
"""
self._k = k
self._threshold = threshold
def __repr__(self) -> str:
""":return: Formated string representation of the metric."""
return f"{self.ALIAS[0]}@{self._k}({self._threshold})"
def __call__(self, y_true: np.array, y_pred: np.array) -> float:
"""
Calculate normalized discounted cumulative gain (ndcg).
Relevance is positive real values or binary values.
Example:
>>> y_true = [0, 1, 2, 0]
>>> y_pred = [0.4, 0.2, 0.5, 0.7]
>>> ndcg = NormalizedDiscountedCumulativeGain
>>> ndcg(k=1)(y_true, y_pred)
0.0
>>> round(ndcg(k=2)(y_true, y_pred), 2)
0.52
>>> round(ndcg(k=3)(y_true, y_pred), 2)
0.52
>>> type(ndcg()(y_true, y_pred))
<class 'float'>
:param y_true: The ground true label of each document.
:param y_pred: The predicted scores of each document.
:return: Normalized discounted cumulative gain.
"""
dcg_metric = DiscountedCumulativeGain(k=self._k,
threshold=self._threshold)
idcg_val = dcg_metric(y_true, y_true)
dcg_val = dcg_metric(y_true, y_pred)
return dcg_val / idcg_val if idcg_val != 0 else 0
|
ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/matchzoo/metrics/normalized_discounted_cumulative_gain.py/0
|
{
"file_path": "ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/matchzoo/metrics/normalized_discounted_cumulative_gain.py",
"repo_id": "ContextualSP",
"token_count": 919
}
| 255 |
"""An implementation of DUET Model."""
import typing
import torch
import torch.nn as nn
import torch.nn.functional as F
from matchzoo import preprocessors
from matchzoo.engine import hyper_spaces
from matchzoo.engine.param import Param
from matchzoo.engine.base_model import BaseModel
from matchzoo.engine.param_table import ParamTable
from matchzoo.engine.base_callback import BaseCallback
from matchzoo.dataloader import callbacks
from matchzoo.modules import Attention
from matchzoo.utils import parse_activation
class DUET(BaseModel):
"""
Duet Model.
Examples:
>>> model = DUET()
>>> model.params['left_length'] = 10
>>> model.params['right_length'] = 40
>>> model.params['lm_filters'] = 300
>>> model.params['mlp_num_layers'] = 2
>>> model.params['mlp_num_units'] = 300
>>> model.params['mlp_num_fan_out'] = 300
>>> model.params['mlp_activation_func'] = 'relu'
>>> model.params['vocab_size'] = 2000
>>> model.params['dm_filters'] = 300
>>> model.params['dm_conv_activation_func'] = 'relu'
>>> model.params['dm_kernel_size'] = 3
>>> model.params['dm_right_pool_size'] = 8
>>> model.params['dropout_rate'] = 0.5
>>> model.guess_and_fill_missing_params(verbose=0)
>>> model.build()
"""
@classmethod
def get_default_params(cls) -> ParamTable:
""":return: model default parameters."""
params = super().get_default_params(
with_embedding=False,
with_multi_layer_perceptron=True
)
params.add(Param(name='mask_value', value=0,
desc="The value to be masked from inputs."))
params.add(Param(name='left_length', value=10,
desc='Length of left input.'))
params.add(Param(name='right_length', value=40,
desc='Length of right input.'))
params.add(Param(name='lm_filters', value=300,
desc="Filter size of 1D convolution layer in "
"the local model."))
params.add(Param(name='vocab_size', value=419,
desc="Vocabulary size of the tri-letters used in "
"the distributed model."))
params.add(Param(name='dm_filters', value=300,
desc="Filter size of 1D convolution layer in "
"the distributed model."))
params.add(Param(name='dm_kernel_size', value=3,
desc="Kernel size of 1D convolution layer in "
"the distributed model."))
params.add(Param(name='dm_conv_activation_func', value='relu',
desc="Activation functions of the convolution layer "
"in the distributed model."))
params.add(Param(name='dm_right_pool_size', value=8,
desc="Kernel size of 1D convolution layer in "
"the distributed model."))
params.add(Param(
name='dropout_rate', value=0.5,
hyper_space=hyper_spaces.quniform(low=0.0, high=0.8, q=0.02),
desc="The dropout rate."))
return params
@classmethod
def get_default_preprocessor(
cls,
truncated_mode: str = 'pre',
truncated_length_left: int = 10,
truncated_length_right: int = 40,
filter_mode: str = 'df',
filter_low_freq: float = 1,
filter_high_freq: float = float('inf'),
remove_stop_words: bool = False,
ngram_size: int = 3
):
""":return: Default preprocessor."""
return preprocessors.BasicPreprocessor(
truncated_mode=truncated_mode,
truncated_length_left=truncated_length_left,
truncated_length_right=truncated_length_right,
filter_mode=filter_mode,
filter_low_freq=filter_low_freq,
filter_high_freq=filter_high_freq,
remove_stop_words=remove_stop_words,
ngram_size=ngram_size
)
@classmethod
def get_default_padding_callback(
cls,
fixed_length_left: int = 10,
fixed_length_right: int = 40,
pad_word_value: typing.Union[int, str] = 0,
pad_word_mode: str = 'pre',
with_ngram: bool = True,
fixed_ngram_length: int = None,
pad_ngram_value: typing.Union[int, str] = 0,
pad_ngram_mode: str = 'pre'
) -> BaseCallback:
"""
Model default padding callback.
The padding callback's on_batch_unpacked would pad a batch of data to
a fixed length.
:return: Default padding callback.
"""
return callbacks.BasicPadding(
fixed_length_left=fixed_length_left,
fixed_length_right=fixed_length_right,
pad_word_value=pad_word_value,
pad_word_mode=pad_word_mode,
with_ngram=with_ngram,
fixed_ngram_length=fixed_ngram_length,
pad_ngram_value=pad_ngram_value,
pad_ngram_mode=pad_ngram_mode
)
@classmethod
def _xor_match(cls, x, y):
"""Xor match of two inputs."""
x_expand = torch.unsqueeze(x, 2).repeat(1, 1, y.shape[1])
y_expand = torch.unsqueeze(y, 1).repeat(1, x.shape[1], 1)
out = torch.eq(x_expand, y_expand).float()
return out
def build(self):
"""Build model structure."""
self.lm_conv1d = nn.Conv1d(
in_channels=self._params['right_length'],
out_channels=self.params['lm_filters'],
kernel_size=1,
stride=1
)
lm_mlp_size = self._params['left_length'] * self._params['lm_filters']
self.lm_mlp = self._make_multi_layer_perceptron_layer(lm_mlp_size)
self.lm_linear = self._make_perceptron_layer(
in_features=self._params['mlp_num_fan_out'],
out_features=1
)
self.dm_conv_activation_func = parse_activation(
self._params['dm_conv_activation_func']
)
self.dm_conv_left = nn.Conv1d(
self._params['vocab_size'],
self._params['dm_filters'],
self._params['dm_kernel_size']
)
self.dm_mlp_left = self._make_perceptron_layer(
in_features=self._params['dm_filters'],
out_features=self._params['dm_filters']
)
self.dm_conv1_right = nn.Conv1d(
self._params['vocab_size'],
self._params['dm_filters'],
self._params['dm_kernel_size']
)
self.dm_conv2_right = nn.Conv1d(
self._params['dm_filters'],
self._params['dm_filters'],
1
)
dm_mp_size = (
(self._params['right_length'] - self._params['dm_kernel_size'] + 1) // (
self._params['dm_right_pool_size']) * self._params['dm_filters']
)
self.dm_mlp = self._make_multi_layer_perceptron_layer(dm_mp_size)
self.dm_linear = self._make_perceptron_layer(
in_features=self._params['mlp_num_fan_out'],
out_features=1
)
self.dropout = nn.Dropout(self._params['dropout_rate'])
self.out = self._make_output_layer(1)
def forward(self, inputs):
"""Forward."""
# Scalar dimensions referenced here:
# B = batch size (number of sequences)
# L = `input_left` sequence length
# R = `input_right` sequence length
# Left input and right input.
# shape = [B, L]
# shape = [B, R]
query_word, doc_word = inputs['text_left'], inputs['text_right']
# shape = [B, L]
mask_query = (query_word != self._params['mask_value']).float()
mask_doc = (doc_word != self._params['mask_value']).float()
# shape = [B, ngram_size, L]
# shape = [B, ngram_size, R]
query_ngram, doc_ngram = inputs['ngram_left'], inputs['ngram_right']
query_ngram = F.normalize(query_ngram, p=2, dim=2)
doc_ngram = F.normalize(doc_ngram, p=2, dim=2)
# shape = [B, R, L]
matching_xor = self._xor_match(doc_word, query_word)
mask_xor = torch.einsum('bi, bj->bij', mask_doc, mask_query)
xor_res = torch.einsum('bij, bij->bij', matching_xor, mask_xor)
# Process local model
lm_res = self.lm_conv1d(xor_res)
lm_res = lm_res.flatten(start_dim=1, end_dim=-1)
lm_res = self.lm_mlp(lm_res)
lm_res = self.dropout(lm_res)
lm_res = self.lm_linear(lm_res)
# Process distributed model
dm_left = self.dm_conv_left(query_ngram.permute(0, 2, 1))
dm_left = self.dm_conv_activation_func(dm_left)
dm_left = torch.max(dm_left, dim=-1)[0]
dm_left = self.dm_mlp_left(dm_left)
dm_right = self.dm_conv1_right(doc_ngram.permute(0, 2, 1))
dm_right = F.max_pool2d(
self.dm_conv_activation_func(dm_right),
(1, self._params['dm_right_pool_size'])
)
dm_right = self.dm_conv2_right(dm_right)
dm_res = torch.einsum('bl,blk->blk', dm_left, dm_right)
dm_res = dm_res.flatten(start_dim=1, end_dim=-1)
dm_res = self.dm_mlp(dm_res)
dm_res = self.dropout(dm_res)
dm_res = self.dm_linear(dm_res)
x = lm_res + dm_res
out = self.out(x)
return out
|
ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/matchzoo/models/duet.py/0
|
{
"file_path": "ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/matchzoo/models/duet.py",
"repo_id": "ContextualSP",
"token_count": 4688
}
| 256 |
"""Gaussian kernel module."""
import typing
import torch
import torch.nn as nn
class GaussianKernel(nn.Module):
"""
Gaussian kernel module.
:param mu: Float, mean of the kernel.
:param sigma: Float, sigma of the kernel.
Examples:
>>> import torch
>>> kernel = GaussianKernel()
>>> x = torch.randn(4, 5, 10)
>>> x.shape
torch.Size([4, 5, 10])
>>> kernel(x).shape
torch.Size([4, 5, 10])
"""
def __init__(self, mu: float = 1., sigma: float = 1.):
"""Gaussian kernel constructor."""
super().__init__()
self.mu = mu
self.sigma = sigma
def forward(self, x):
"""Forward."""
return torch.exp(
-0.5 * ((x - self.mu) ** 2) / (self.sigma ** 2)
)
|
ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/matchzoo/modules/gaussian_kernel.py/0
|
{
"file_path": "ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/matchzoo/modules/gaussian_kernel.py",
"repo_id": "ContextualSP",
"token_count": 365
}
| 257 |
from .unit import Unit
class DigitRemoval(Unit):
"""Process unit to remove digits."""
def transform(self, input_: list) -> list:
"""
Remove digits from list of tokens.
:param input_: list of tokens to be filtered.
:return tokens: tokens of tokens without digits.
"""
return [token for token in input_ if not token.isdigit()]
|
ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/matchzoo/preprocessors/units/digit_removal.py/0
|
{
"file_path": "ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/matchzoo/preprocessors/units/digit_removal.py",
"repo_id": "ContextualSP",
"token_count": 141
}
| 258 |
from .classification import Classification
from .ranking import Ranking
|
ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/matchzoo/tasks/__init__.py/0
|
{
"file_path": "ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/matchzoo/tasks/__init__.py",
"repo_id": "ContextualSP",
"token_count": 13
}
| 259 |
import torch
import numpy as np
from matchzoo import losses
def test_hinge_loss():
true_value = torch.Tensor([[1.2], [1], [1], [1]])
pred_value = torch.Tensor([[1.2], [0.1], [0], [-0.3]])
expected_loss = torch.Tensor([(0 + 1 - 0.3 + 0) / 2.0])
loss = losses.RankHingeLoss()(pred_value, true_value)
assert torch.isclose(expected_loss, loss)
expected_loss = torch.Tensor(
[(2 + 0.1 - 1.2 + 2 - 0.3 + 0) / 2.0])
loss = losses.RankHingeLoss(margin=2)(pred_value, true_value)
assert torch.isclose(expected_loss, loss)
true_value = torch.Tensor(
[[1.2], [1], [0.8], [1], [1], [0.8]])
pred_value = torch.Tensor(
[[1.2], [0.1], [-0.5], [0], [0], [-0.3]])
expected_loss = torch.Tensor(
[(0 + 1 - 0.15) / 2.0])
loss = losses.RankHingeLoss(num_neg=2, margin=1)(
pred_value, true_value)
assert torch.isclose(expected_loss, loss)
def test_rank_crossentropy_loss():
losses.neg_num = 1
def softmax(x):
return np.exp(x) / np.sum(np.exp(x), axis=0)
true_value = torch.Tensor([[1.], [0.], [0.], [1.]])
pred_value = torch.Tensor([[0.8], [0.1], [0.8], [0.1]])
expected_loss = torch.Tensor(
[(-np.log(softmax([0.8, 0.1])[0]) - np.log(
softmax([0.8, 0.1])[1])) / 2])
loss = losses.RankCrossEntropyLoss()(pred_value, true_value)
assert torch.isclose(expected_loss, loss)
true_value = torch.Tensor([[1.], [0.], [0.], [0.], [1.], [0.]])
pred_value = torch.Tensor([[0.8], [0.1], [0.1], [0.8], [0.1], [0.1]])
expected_loss = torch.Tensor(
[(-np.log(softmax([0.8, 0.1, 0.1])[0]) - np.log(
softmax([0.8, 0.1, 0.1])[1])) / 2])
loss = losses.RankCrossEntropyLoss(num_neg=2)(
pred_value, true_value)
assert torch.isclose(expected_loss, loss)
|
ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/tests/test_losses.py/0
|
{
"file_path": "ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/tests/test_losses.py",
"repo_id": "ContextualSP",
"token_count": 888
}
| 260 |
<jupyter_start><jupyter_code>%run init.ipynb
ranking_task = mz.tasks.Ranking(losses=mz.losses.RankCrossEntropyLoss(num_neg=4))
ranking_task.metrics = [
mz.metrics.NormalizedDiscountedCumulativeGain(k=3),
mz.metrics.NormalizedDiscountedCumulativeGain(k=5),
mz.metrics.MeanAveragePrecision()
]
preprocessor = mz.models.DSSM.get_default_preprocessor(ngram_size=3)
train_pack_processed = preprocessor.fit_transform(train_pack_raw)
valid_pack_processed = preprocessor.transform(dev_pack_raw)
test_pack_processed = preprocessor.transform(test_pack_raw)
preprocessor.context
triletter_callback = mz.dataloader.callbacks.Ngram(
preprocessor, mode='aggregate')
trainset = mz.dataloader.Dataset(
data_pack=train_pack_processed,
mode='pair',
num_dup=1,
num_neg=4,
callbacks=[triletter_callback]
)
testset = mz.dataloader.Dataset(
data_pack=test_pack_processed,
callbacks=[triletter_callback]
)
padding_callback = mz.models.DSSM.get_default_padding_callback()
trainloader = mz.dataloader.DataLoader(
dataset=trainset,
batch_size=32,
stage='train',
resample=True,
callback=padding_callback
)
testloader = mz.dataloader.DataLoader(
dataset=testset,
batch_size=32,
stage='dev',
callback=padding_callback
)
model = mz.models.DSSM()
model.params['task'] = ranking_task
model.params['vocab_size'] = preprocessor.context['ngram_vocab_size']
model.params['mlp_num_layers'] = 3
model.params['mlp_num_units'] = 300
model.params['mlp_num_fan_out'] = 128
model.params['mlp_activation_func'] = 'relu'
model.build()
print(model, sum(p.numel() for p in model.parameters() if p.requires_grad))
optimizer = torch.optim.Adam(model.parameters())
trainer = mz.trainers.Trainer(
model=model,
optimizer=optimizer,
trainloader=trainloader,
validloader=testloader,
validate_interval=None,
epochs=10
)
trainer.run()<jupyter_output><empty_output>
|
ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/tutorials/ranking/dssm.ipynb/0
|
{
"file_path": "ContextualSP/poset_decoding/traversal_path_prediction/MatchZoo-py/tutorials/ranking/dssm.ipynb",
"repo_id": "ContextualSP",
"token_count": 765
}
| 261 |
# Semantic Parsing in Context <img src="https://pytorch.org/assets/images/logo-dark.svg" height = "25" align=center />
The official pytorch implementation of our paper [How Far are We from Effective Context Modeling ? An Exploratory Study on Semantic Parsing in Context](https://arxiv.org/pdf/2002.00652.pdf). This code contains multiple context modeling techniques on modeling context in semantic parsing. It provides `readable`, `fast` and `strong` baselines for the community.
If you find our code useful, please consider citing our paper:
```bib
@inproceedings{qian2020how,
title={How Far are We from Effective Context Modeling? An Exploratory Study on Semantic Parsing in Context twitter},
author={Liu, Qian and Chen, Bei and Guo, Jiaqi and Lou, Jian-Guang and Zhou, Bin and Zhang, Dongmei},
booktitle={IJCAI},
year={2020}
}
```
## Content
- [Task Introduction](#task)
- [Model Framework](#model)
- [Install Requirements](#requirements)
- [Prepare Dataset](#data)
- [Train Model](#train)
- [Evaluate Model](#evaluate)
- [Predict on Custom Data](#predict-on-custom-data)
- [Demo on Web](#demo)
- [Pretrained Weights](#experiment)
- [Fine-grained Analysis](#analysis)
- [Frequent Asked Questions](#faq)
## Task
<img src="misc/task.png" height=150>
Semantic parsing, which translates a natural language sentence into its corresponding executable logic form (e.g. Structured Query Language, SQL), relieves users from the burden of learning techniques behind the logic form. The majority of previous studies on semantic parsing assume that queries are context-independent and analyze them in isolation. However, in reality, users prefer to interact with systems in a dialogue, where users are allowed to ask context-dependent incomplete questions. That arises the task of **Semantic Parsing in Context**, which is quite challenging as there are complex contextual phenomena.
## Model
<img src="misc/semantic_framework.png" height=400>
Our backbone is the Sequence to Sequence model with a Grammar-Based Decoder, especially using the IRNet grammar (SemQL).
## Requirements
### Python Environment
First of all, you should setup a python environment. This code base has been tested under python 3.x, and we officially support python 3.7.
After installing python 3.7, we strongly recommend you to use `virtualenv` (a tool to create isolated Python environments) to manage the python environment. You could use following commands to create a environment.
```bash
python -m pip install virtualenv
virtualenv venv
```
### Activate Virtual Environment
Then you should activate the environment to install the dependencies. You could achieve it via using the command as below. (Please change $ENV_FOLDER to your own virtualenv folder path, e.g. venv)
```bash
$ENV_FOLDER\Scripts\activate.bat (Windows)
source $ENV_FOLDER/bin/activate (Linux)
```
### Install Libraries
The most important requirements of our code base are as following:
- pytorch >= 1.2.0 (not tested on other versions, but 1.0.0 may work though)
- allennlp == 0.9.0
Then you should install following packages:
- dill
- ordered_set
- edit_distance
You should install them at first.
### SQL-based evaluation
Now our code saves the best model checkpoint based on SQL based comparison, which is the performance indicator on the Spider, SParC and CoSQL benchmarks. Here we adopt the evaluation script which is suited for python3 from [EditSQL](https://github.com/ryanzhumich/editsql).
## Data
### Prepare Dataset
You could download the two datasets [SParC](https://yale-lily.github.io/sparc) and [CoSQL](https://yale-lily.github.io/cosql). And then rename the dataset top folder as `dataset_sparc` and `dataset_cosql` respectively. An example structure for dataset `SParC` is as following:
```
|- dataset_sparc
|- database
|- academic
|- activity_1
|- ...
|- train.json
|- dev.json
|- tables.json
|- models
|- predictor
|- ...
```
### Prepare Glove
If you want to train models without BERT, please download [Glove Twitter](http://nlp.stanford.edu/data/glove.twitter.27B.zip). Unzip and rename the folder into `glove`.
## Train

We use the command `allennlp` to train our models, and all the hyper-parameters for different settings are stored in configs listed under `train_configs` and `train_configs_bert`. The config and model architectures in our paper are as following:
| Config | Model in Paper |
| :--- | :---:
| concat.none.jsonnet | Concat History |
| turn.none.jsonnet | Turn-level Encoder|
| none.attn.jsonnet | SQL Attention|
| none.gate.jsonnet | Gate Mechanism |
| none.token.jsonnet | Action Copy |
| none.tree.jsonnet | Tree Copy |
| concat.attn.jsonnet | Concat + Attention |
| concat.token.jsonnet | Concat + Token Copy |
| concat.tree.jsonnet | Concat + Tree Copy |
| turn.attn.jsonnet | Turn + SQL Attention|
| turn.token.jsonnet | Turn + Action Copy|
| turn.tree.jsonnet | Turn + Tree Copy|
| turn.token.attn.jsonnet | Turn + Action Copy + SQL Attention|
For example, you could run `Concat History` on `SParC` using the following script (*We also provide [`windows`](https://github.com/microsoft/ContextualSP/tree/master/bash_files/windows) and [`linux`](https://github.com/microsoft/ContextualSP/tree/master/bash_files/linux) batch script in the folder [`batch_files`](https://github.com/microsoft/ContextualSP/tree/master/bash_files) for your convenience. Please run them under the root directory `./`.*):
- Under linux:
```bash
export seed=1
export config_file=train_configs/concat.none.jsonnet
export model_file=checkpoints_sparc/sparc_concat_model
export tables_file=dataset_sparc/tables.json
export database_path=dataset_sparc/database
export dataset_path=dataset_sparc
export train_data_path=dataset_sparc/train.json
export validation_data_path=dataset_sparc/dev.json
export pretrained_file=glove/glove.twitter.27B.100d.txt
allennlp train -s ${model_file} ${config_file} \
--include-package dataset_reader.sparc_reader \
--include-package models.sparc_parser \
-o "{\"model.serialization_dir\":\"${model_file}\",\"random_seed\":\"${seed}\",\"numpy_seed\":\"${seed}\",\"pytorch_seed\":\"${seed}\",\"dataset_reader.tables_file\":\"${tables_file}\",\"dataset_reader.database_path\":\"${database_path}\",\"train_data_path\":\"${train_data_path}\",\"validation_data_path\":\"${validation_data_path}\",\"model.text_embedder.tokens.pretrained_file\":\"${pretrained_file}\",\"model.dataset_path\":\"${dataset_path}\"}"
```
- Under Windows (`"""` is to escape the double quotation mark, equalivant to `"`):
```cmd
set seed=1
set config_file=train_configs/concat.none.jsonnet
set model_file=checkpoints_sparc/sparc_concat_model
set tables_file=dataset_sparc/tables.json
set database_path=dataset_sparc/database
set dataset_path=dataset_sparc
set train_data_path=dataset_sparc/train.json
set validation_data_path=dataset_sparc/dev.json
set pretrained_file=glove/glove.twitter.27B.100d.txt
allennlp train -s %model_file% %config_file% ^
--include-package dataset_reader.sparc_reader ^
--include-package models.sparc_parser ^
-o {"""model.serialization_dir""":"""%model_file%""","""random_seed""":"""%seed%""","""numpy_seed""":"""%seed%""","""pytorch_seed""":"""%seed%""","""dataset_reader.tables_file""":"""%tables_file%""","""dataset_reader.database_path""":"""%database_path%""","""train_data_path""":"""%train_data_path%""","""validation_data_path""":"""%validation_data_path%""","""model.text_embedder.tokens.pretrained_file""":"""%pretrained_file%""","""model.dataset_path""":"""%dataset_path%"""}
```
## Evaluate
You could predict and evaluate SQLs using trained model checkpoint file (e.g. `checkpoints_sparc/sparc_concat_model/model.tar.gz`) using the following command:
- Under Linux
```bash
export model_file=checkpoints_sparc/sparc_concat_model
export validation_file=dataset_sparc/dev.json
export validation_out_file=dataset_sparc/dev.jsonl
export prediction_out_file=predict.jsonl
python postprocess.py --valid_file ${validation_file} --valid_out_file ${validation_out_file}
allennlp predict \
--include-package dataset_reader.sparc_reader \
--include-package models.sparc_parser \
--include-package predictor.sparc_predictor \
--predictor sparc \
--dataset-reader-choice validation \
--batch-size 1 \
--cuda-device 0 \
--output-file ${model_file}/${prediction_out_file} \
${model_file}/model.tar.gz ${validation_out_file}
```
- Under Windows
```cmd
set model_file=checkpoints_sparc/sparc_concat_model
set validation_file=dataset_sparc/dev.json
set validation_out_file=dataset_sparc/dev.jsonl
set prediction_out_file=predict.jsonl
python postprocess.py --valid_file %validation_file% --valid_out_file %validation_out_file%
allennlp predict ^
--include-package dataset_reader.sparc_reader ^
--include-package models.sparc_parser ^
--include-package predictor.sparc_predictor ^
--predictor sparc ^
--dataset-reader-choice validation ^
--batch-size 1 ^
--cuda-device 0 ^
--output-file %model_file%/%prediction_out_file% ^
%model_file%/model.tar.gz %validation_out_file
```
## Predict On Custom Data
Our code also supports function call to predict SQls on custom data. You could find it in `predict.py`. Before running it, you should firstly store your own database into paths corresponding to the arguments of `PredictManager` with the same format as SParC/CoSQL.
## Demo
You could also host a demo page using the following command using a well-trained archived model (e.g. `checkpoints_sparc/sparc_concat_model/model.tar.gz`):
- Under Linux
```bash
export model_file=checkpoints_sparc/sparc_concat_model
python -m allennlp.service.server_simple \
--archive-path ${model_file}/model.tar.gz \
--predictor sparc \
--include-package predictor.sparc_predictor \
--include-package dataset_reader.sparc_reader \
--include-package models.sparc_parser \
--title "Contextual Semantic Parsing Demo" \
--field-name question \
--field-name database_id
```
- Under Windows
```cmd
set model_file=checkpoints_sparc/sparc_concat_model
python -m allennlp.service.server_simple ^
--archive-path %model_file%/model.tar.gz ^
--predictor sparc ^
--include-package predictor.sparc_predictor ^
--include-package dataset_reader.sparc_reader ^
--include-package models.sparc_parser ^
--title "Contextual Semantic Parsing Demo" ^
--field-name question ^
--field-name database_id
```
Once running, you could open the demo page in [http://localhost:8000](http://localhost:8000). The question field accepts an interaction of questions separated by `;`. See the demo page below (only accepts database_id appeared in `tables.json`):

## Experiment
| Dataset | BERT | Config | Best | Avg | Pretrained_Weights |
| :---: | :---: |:--- | :---: | :---: | :---: |
| SParC | No | concat.none.jsonnet | 41.8 | 40.0 | [model.tar.gz](https://github.com/microsoft/ContextualSP/releases/download/sparc.concat/model.tar.gz)|
| SParC | No | turn.none.jsonnet | 43.6 | 42.4 | [model.tar.gz](https://github.com/microsoft/ContextualSP/releases/download/sparc.turn/model.tar.gz)|
| SParC | No | none.token.jsonnet | 38.9 | 38.4 | [model.tar.gz](https://github.com/microsoft/ContextualSP/releases/download/sparc.token/model.tar.gz)|
| SParC | Yes | concat.none.jsonnet | 52.6 | 51.0 | [model.tar.gz](https://github.com/microsoft/ContextualSP/releases/download/sparc.bert.concat/model.tar.gz)|
| SParC | Yes | turn.none.jsonnet | 47.0 | 43.0 | [model.tar.gz](https://github.com/microsoft/ContextualSP/releases/download/sparc.bert.turn/model.tar.gz)|
| SParC | Yes | none.token.jsonnet | 46.1 | 45.4 | [model.tar.gz](https://github.com/microsoft/ContextualSP/releases/download/sparc.bert.token/model.tar.gz)|
| CoSQL | No | concat.none.jsonnet | 33.5 | 32.4 | [model.tar.gz](https://github.com/microsoft/ContextualSP/releases/download/cosql.concat/model.tar.gz)|
| CoSQL | No | turn.none.jsonnet | 31.9 | 31.3 | [model.tar.gz](https://github.com/microsoft/ContextualSP/releases/download/cosql.turn/model.tar.gz)|
| CoSQL | No | none.token.jsonnet | 32.8 | 31.9 | [model.tar.gz](https://github.com/microsoft/ContextualSP/releases/download/cosql.token/model.tar.gz)|
| CoSQL | Yes | concat.none.jsonnet | 41.0 | 40.4 | [model.tar.gz](https://github.com/microsoft/ContextualSP/releases/download/cosql.bert.concat/model.tar.gz)|
| CoSQL | Yes | turn.none.jsonnet | 39.2 | 38.9 | [model.tar.gz](https://github.com/microsoft/ContextualSP/releases/download/cosql.bert.turn/model.tar.gz)|
| CoSQL | Yes | none.token.jsonnet | 42.1 | 41.6 | [model.tar.gz](https://github.com/microsoft/ContextualSP/releases/download/cosql.bert.token/model.tar.gz)|
## Analysis
We also provide the fine-grained analysis results [here](https://github.com/microsoft/ContextualSP/blob/master/misc/dev_annotaed.tsv) annotated on the SParC validation set. You could either use it in your research work or debug your model in a fine-grained level.
## Acknowledgement
We will thank the following repos which are very helpful to us.
- [allennlp](https://github.com/allenai/allennlp)
- [IRNet](https://github.com/microsoft/IRNet)
- [spider-schema-gnn](https://github.com/benbogin/spider-schema-gnn)
- [sparc](https://github.com/taoyds/sparc)
- [editsql](https://github.com/ryanzhumich/editsql)
## FAQ
**1. allennlp.common.checks.ConfigurationError: 'Serialization directory (checkpoints_sparc/sparc_concat_none_model) already exists and is not empty. Specify --recover to recover training from existing output.'**
*Ans*: It means that there is already a checkpoint model named `checkpoints_sparc/sparc_concat_none_model`. Please add `--recover` option after the train commnd (if you want to resume the model to continue training) or delete the checkpoint model.
**2. FileNotFoundError: [Errno 2] No such file or directory: 'dataset_sparc/train.json'**
*Ans*: Please firstly download datasets and rename them as `dataset_sparc` and `dataset_cosql` following the above instructions.
**3. The GPU Memory is not enough for running experiments under BERT**
*Ans*: The training of this repo is based on batching `interactions` rather than `single sentences`. It means that, even when `batch_size` is set as `1`, one batch contains ~5 NL-SQL pairs (one interaction/dialogue). Therefore, the minimal memory requirement is nearly `17GB` in all settings under BERT.
Considering this, we provide a memory friendly config file `concat.none.mem.jsonnet`. In such a config, data batching is based on natural language sentences rather than interactions. It only needs at least nearly `5GB` when using `batch_size` as `1`.
To reduce memory consumed, you could also consider decreasing `maximum_history_len` hyper-parameter in #57 in sparc_reader.py (the default value is `5`). In practise, it also works well under `3` or `4`.
**4. How to debug my custom model**
*Ans*: We provide `debug.py` for debugging your custom model. Please change `config_file`(#9) into your custom one and debug by running `debug.py`.
**5. How to validate the correctness of generated SemQL**
*Ans*: We provide a [test script](https://github.com/microsoft/ContextualSP/blob/2b59163b3cca9922098c19895943b2c9e57c3447/semantic_parsing_in_context/test_sql_to_semql.py) for validating when translating SQL into SemQL. You could copy & modify the following script to achieve your testing goal as below:
```python
def test_example(self):
db_id = "flight_2"
sql_plain = "SELECT * FROM AIRLINES"
sql_clause = """
{
"orderBy": [],
"from": {
"table_units": [
[
"table_unit",
0
]
],
"conds": []
},
"union": null,
"except": null,
"groupBy": [],
"limit": null,
"intersect": null,
"where": [],
"having": [],
"select": [
false,
[
[
0,
[
0,
[
0,
0,
false
],
null
]
]
]
]
}
"""
expected_action_str = "[Statement -> Root, Root -> Select, Select -> A, A -> none C T, C -> *, T -> airlines]"
self.template(sql_plain, sql_clause, db_id, expected_action_str)
```
> `db_id`, `sql_plain` and `sql_clause` can be found in the dataset, and `expected_action_str` could be set as empty at first until you obtain the correct expected action sequence output.
|
ContextualSP/semantic_parsing_in_context/README.md/0
|
{
"file_path": "ContextualSP/semantic_parsing_in_context/README.md",
"repo_id": "ContextualSP",
"token_count": 5834
}
| 262 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import logging
import os
from functools import partial
from typing import List
from typing import Optional, Dict
from typing import Tuple
import edit_distance
import numpy as np
import torch
from allennlp.training.metrics.metric import Metric
from overrides import overrides
from context.converter import ActionConverter
from context.db_context import SparcDBContext
from scripts.sparc_evaluate import evaluate
class MetricUtil:
"""
This metric is deigned for full interaction matching ratio
"""
def __init__(self, dataset_path=None):
"""
Dataset path if provided for sql evaluation
:param dataset_path:
"""
self._total_value = 0.0
self._count = 0
# we package a complete evaluate function in `sparc_evaluate.py`
if dataset_path is not None:
self._evaluator = SQLEvaluator(dataset_path=dataset_path)
else:
self._evaluator = ActionEvaluator()
def __call__(self, best_action_indices: List[List[int]],
gold_labels,
batch_size: int,
mask: Optional[torch.LongTensor],
db_contexts: List[SparcDBContext] = None,
action_mapping: List[List[str]] = None,
with_sql: bool = False) -> Tuple:
# if with sql, we need the action mapping to restore the action mapping
assert (action_mapping is not None) == with_sql
assert (db_contexts is not None) == with_sql
assert isinstance(gold_labels[0], str) == with_sql
# convert best_final_states into best_action_indices
return self.calculation(best_action_indices, gold_labels, batch_size, mask,
db_contexts, action_mapping, with_sql)
def calculation(self, best_action_indices: List[List[int]],
gold_labels,
batch_size: int,
mask: Optional[torch.LongTensor],
db_contexts: List[SparcDBContext] = None,
action_mapping: List[List[str]] = None,
with_sql: bool = False,
soft_correct: bool = False):
"""
This method is designed for check the correctness of metric measuring
:param best_action_indices: predicted action sequence
:param gold_labels: if ground-truth is SQL, the type should be `str`; otherwise, it should be torch.LongTensor.
:param batch_size: batch size, for separate interaction accuracy calculation
:param mask: action mask which has shape batch_size * inter_size, max_action_len
:param db_contexts: db_context for mapping action str sequence into SQL
:param action_mapping: int -> str, mapping action into corresponding string
:param with_sql: whether evaluation under sql equality
:param soft_correct: soft correct will return similarity(float) rather than correctness(integer)
:return:
"""
assert (action_mapping is not None) == with_sql
assert (db_contexts is not None) == with_sql
assert isinstance(gold_labels[0], str) == with_sql
sen_mask = mask.sum(dim=1).ne(0)
iter_size = len(gold_labels)
assert iter_size % batch_size == 0
inter_size = iter_size // batch_size
# correct matrix
if soft_correct:
correct_mat = np.zeros((batch_size, inter_size), dtype=np.float)
else:
correct_mat = np.zeros((batch_size, inter_size), dtype=np.long)
# iteration over all instances
for i in range(iter_size):
# if equal to 0, skip it
if sen_mask[i] == 0:
continue
# for plain calculation
if with_sql:
# map predicted action into sql
action_seq_ind = best_action_indices[i]
action_seq_str = [action_mapping[i][ind] for ind in action_seq_ind]
if len(action_seq_ind) == 0:
match_score = 0.0
else:
converter = ActionConverter(db_context=db_contexts[i])
try:
action_sql = converter.translate_to_sql(action_seq_str)
except Exception as e:
logging.error("Converter error: {}".format(e))
action_sql = f'NULL'
match_score = self._evaluator.is_equal(action_sql, gold_labels[i], db_contexts[i].db_id)
else:
if soft_correct:
match_score = self._evaluator.similarity(best_action_indices[i], gold_labels[i], mask[i])
else:
match_score = self._evaluator.is_equal(best_action_indices[i], gold_labels[i], mask[i])
correct_mat[i // inter_size, i % inter_size] = match_score
sen_mask = sen_mask.view(batch_size, inter_size).cpu().data.numpy()
return correct_mat, sen_mask
class Evaluator:
"""
Abstract class for evaluator
"""
def __init__(self):
pass
def is_equal(self, predict, target, option) -> int:
"""
Judge whether `predict` is equal to `target`
:return: if equal return 1; otherwise, return 0.
"""
pass
def similarity(self, predict, target, option) -> float:
"""
Calculate similarity between predict and target,
:return: the similarity
"""
pass
class SQLEvaluator(Evaluator):
def __init__(self, dataset_path):
super().__init__()
self.evaluate_func = partial(evaluate,
db_dir=os.path.join(dataset_path, 'database'),
table=os.path.join(dataset_path, 'tables.json'))
@overrides
def is_equal(self, predict_sql, gold_sql, db_id) -> int:
"""
Judge whether given predict sql is equal to ground truth under the db_id
:return: if equal, return 1; otherwise, return 0
"""
try:
exact_match_score = self.evaluate_func(gold_sql, predict_sql, db_id)
except Exception as e:
logging.error("SQL Parse error: {}".format(e))
logging.error("DB_id: {}, Gold_SQL: {}, Predicted SQL: {}".format(db_id, gold_sql, predict_sql))
exact_match_score = 0
return exact_match_score
@overrides
def similarity(self, predict_sql, gold_sql, db_id) -> float:
raise NotImplementedError
class ActionEvaluator(Evaluator):
@overrides
def is_equal(self, predicted: List[int], targets: torch.LongTensor, target_mask: torch.LongTensor) -> int:
"""
Judge whether given predict sql is equal to ground truth under the db_id
:return: if equal, return 1; otherwise, return 0
"""
if len(predicted) > targets.size(0):
return 0
predicted_tensor = targets.new_tensor(predicted)
# remove padding ones
actual_len = target_mask.sum()
targets_trimmed = targets[:actual_len]
# Return 1 if the predicted sequence is anywhere in the list of targets.
is_correct = torch.equal(predicted_tensor, targets_trimmed)
if is_correct:
return 1
else:
return 0
@overrides
def similarity(self, predicted: List[int], targets: torch.LongTensor, target_mask: torch.LongTensor) -> float:
# remove padding ones
actual_len = target_mask.sum()
targets_trimmed = targets[:actual_len]
targets_trimmed = list(targets_trimmed.cpu().data.numpy())
sm = edit_distance.SequenceMatcher(a=predicted, b=targets_trimmed)
# get the edit distance similarity between two lists
return sm.ratio()
@Metric.register("turn_average")
class TurnAverage(Metric):
"""
This :class:`Metric` breaks with the typical ``Metric`` API and just stores values that were
computed in some fashion outside of a ``Metric``. If you have some external code that computes
the metric for you, for instance, you can use this to report the average result using our
``Metric`` API.
"""
def __init__(self, prefix) -> None:
self._total_seq_value = 0.0
self._total_inter_value = 0.0
self._total_turn_1_value = 0.0
self._total_turn_2_value = 0.0
self._total_turn_3_value = 0.0
self._total_turn_4_value = 0.0
self._seq_count = 0
self._inter_count = 0
self._turn_1_count = 0
self._turn_2_count = 0
self._turn_3_count = 0
self._turn_4_count = 0
# for record the metric
self.prefix = prefix
@overrides
def __call__(self, correct_mat, mask_mat):
"""
Parameters
----------
correct_mat : ``np.matrix``
has shape batch_size x inter_size, 1 means correct while 0 means error.
mask_mat: ``np.matrix``
has the same shape with correct mat, 0 means invalid and 1 means valid.
"""
# return the sequence correct number
correct_mat = correct_mat & mask_mat
self._total_seq_value += np.count_nonzero(correct_mat == 1)
self._seq_count += np.count_nonzero(mask_mat == 1)
batch_size, maximum_inter_size = correct_mat.shape
for i in range(maximum_inter_size):
statistic_score = np.count_nonzero(correct_mat[:, i] == 1)
statistic_count = np.count_nonzero(mask_mat[:, i] == 1)
if i == 0:
self._total_turn_1_value += statistic_score
self._turn_1_count += statistic_count
elif i == 1:
self._total_turn_2_value += statistic_score
self._turn_2_count += statistic_count
elif i == 2:
self._total_turn_3_value += statistic_score
self._turn_3_count += statistic_count
else:
self._total_turn_4_value += statistic_score
self._turn_4_count += statistic_count
# only valid & incorrect, return 1
not_correct_mat = np.logical_and(np.logical_not(correct_mat), mask_mat)
# if anyone is 1, the result is 0
correct_inter = 1 - not_correct_mat.max(axis=1)
mask_inter = mask_mat.sum(axis=1) != 0
correct_inter = correct_inter & mask_inter
self._total_inter_value += np.count_nonzero(correct_inter == 1)
self._inter_count += np.count_nonzero(mask_inter == 1)
@overrides
def get_metric(self, reset: bool = False) -> Dict:
"""
Returns
-------
The average of all values that were passed to ``__call__``.
"""
average_seq = self._total_seq_value / self._seq_count if self._seq_count > 0 else 0
average_inter = self._total_inter_value / self._inter_count if self._inter_count > 0 else 0
average_turn_1 = self._total_turn_1_value / self._turn_1_count if self._turn_1_count > 0 else 0
average_turn_2 = self._total_turn_2_value / self._turn_2_count if self._turn_2_count > 0 else 0
average_turn_3 = self._total_turn_3_value / self._turn_3_count if self._turn_3_count > 0 else 0
average_turn_4 = self._total_turn_4_value / self._turn_4_count if self._turn_4_count > 0 else 0
if reset:
self.reset()
return {
f'{self.prefix}_exact_match': average_seq,
# hidden them in TQDM report
f'_{self.prefix}_inter_exact_match': average_inter,
f'_{self.prefix}_turn_1_exact_match': average_turn_1,
f'_{self.prefix}_turn_2_exact_match': average_turn_2,
f'_{self.prefix}_turn_3_exact_match': average_turn_3,
f'_{self.prefix}_turn_4_exact_match': average_turn_4,
}
@overrides
def reset(self):
self._total_inter_value = 0.0
self._total_seq_value = 0.0
self._total_turn_1_value = 0.0
self._total_turn_2_value = 0.0
self._total_turn_3_value = 0.0
self._total_turn_4_value = 0.0
self._inter_count = 0
self._seq_count = 0
self._turn_1_count = 0
self._turn_2_count = 0
self._turn_3_count = 0
self._turn_4_count = 0
|
ContextualSP/semantic_parsing_in_context/models/metrics.py/0
|
{
"file_path": "ContextualSP/semantic_parsing_in_context/models/metrics.py",
"repo_id": "ContextualSP",
"token_count": 5552
}
| 263 |
import json
from context.converter import SQLConverter, SparcDBContext
import unittest
from allennlp.data.tokenizers import WordTokenizer
class TestSQLToSemQL(unittest.TestCase):
@staticmethod
def template(sql_plain, sql_text, db_id, expected_str):
sql_clause = json.loads(sql_text)
db_context = SparcDBContext(db_id=db_id,
utterance=[],
tokenizer=WordTokenizer(),
# TODO: Please first config the dataset path you want to test
tables_file="dataset_sparc\\tables.json",
database_path="dataset_sparc\\database")
converter = SQLConverter(db_context=db_context)
inter_seq = converter.translate_to_intermediate(sql_clause=sql_clause)
assert str(inter_seq) == expected_str, \
f'\nSQL:\t\t{sql_plain}\nExp:\t\t{expected_str}\nPred:\t\t{str(inter_seq)}\n'
def test_example(self):
db_id = "flight_2"
sql_plain = "SELECT * FROM AIRLINES"
sql_clause = """
{
"orderBy": [],
"from": {
"table_units": [
[
"table_unit",
0
]
],
"conds": []
},
"union": null,
"except": null,
"groupBy": [],
"limit": null,
"intersect": null,
"where": [],
"having": [],
"select": [
false,
[
[
0,
[
0,
[
0,
0,
false
],
null
]
]
]
]
}
"""
expected_action_str = "[Statement -> Root, Root -> Select, Select -> A, A -> none C T, C -> *, T -> airlines]"
self.template(sql_plain, sql_clause, db_id, expected_action_str)
|
ContextualSP/semantic_parsing_in_context/test_sql_to_semql.py/0
|
{
"file_path": "ContextualSP/semantic_parsing_in_context/test_sql_to_semql.py",
"repo_id": "ContextualSP",
"token_count": 1419
}
| 264 |
# Copyright (c) Facebook, Inc. and Microsoft Corporation.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from typing import Dict, List
import torch
from genre.trie import Trie
keyword = ['select', 'distinct', 'from', 'join', 'on', 'where', 'group', 'by', 'order', 'asc', 'desc', 'limit',
'having',
'and', 'not', 'or', 'like', 'between', 'in',
'sum', 'count', 'max', 'min', 'avg',
'(', ')', ',', '>', '<', '=', '>=', '!=', '<=',
'union', 'except', 'intersect',
'1', '2', '3', '4', '5']
def get_end_to_end_prefix_allowed_tokens_fn_hf(
model,
sentences: List[str],
start_mention_token="{",
end_mention_token="}",
start_entity_token="[",
end_entity_token="]",
mention_trie: Trie = None,
candidates_trie: Trie = None,
mention_to_candidates_dict: Dict[str, List[str]] = None,
):
return _get_end_to_end_prefix_allowed_tokens_fn(
lambda x: model.tokenizer.encode(x),
lambda x: model.tokenizer.decode(torch.tensor(x)),
model.tokenizer.bos_token_id,
model.tokenizer.pad_token_id,
model.tokenizer.eos_token_id,
len(model.tokenizer) - 1,
sentences,
start_mention_token,
end_mention_token,
start_entity_token,
end_entity_token,
mention_trie,
candidates_trie,
mention_to_candidates_dict,
)
def get_end_to_end_prefix_allowed_tokens_fn_fairseq(
model,
sentences: List[str],
start_mention_token="{",
end_mention_token="}",
start_entity_token="[",
end_entity_token="]",
mention_trie: Trie = None,
candidates_trie: Trie = None,
mention_to_candidates_dict: Dict[str, List[str]] = None,
):
return _get_end_to_end_prefix_allowed_tokens_fn(
lambda x: model.encode(x).tolist(),
lambda x: model.decode(torch.tensor(x)),
model.model.decoder.dictionary.bos(),
model.model.decoder.dictionary.pad(),
model.model.decoder.dictionary.eos(),
len(model.model.decoder.dictionary),
sentences,
start_mention_token,
end_mention_token,
start_entity_token,
end_entity_token,
mention_trie,
candidates_trie,
mention_to_candidates_dict,
)
def _get_end_to_end_prefix_allowed_tokens_fn(
encode_fn,
decode_fn,
bos_token_id,
pad_token_id,
eos_token_id,
vocabulary_length,
sentences: List[str],
start_mention_token="{",
end_mention_token="}",
start_entity_token="[",
end_entity_token="]",
mention_trie: Trie = None,
candidates_trie: Trie = None,
mention_to_candidates_dict: Dict[str, List[str]] = None,
):
assert not (
candidates_trie is not None and mention_to_candidates_dict is not None
), "`candidates_trie` and `mention_to_candidates_dict` cannot be both != `None`"
codes = {}
codes["EOS"] = eos_token_id
codes["BOS"] = bos_token_id
keyword_codes = {k: encode_fn(" {}".format(k))[1] for k in keyword}
keyword_codes['wselect'] = encode_fn("{}".format('select'))[1]
def prefix_allowed_tokens_fn(batch_id, sent):
sent = sent.tolist()
trie_out = get_trie_schema(sent)
return trie_out
def get_trie_schema(sent):
pointer_start = get_keyword_mention(sent)
keyword_rnt = list(keyword_codes.values())
if pointer_start + 1 < len(sent) and pointer_start != -1:
ment_next = mention_trie.get(sent[pointer_start + 1:])
if codes["EOS"] in ment_next:
return ment_next + keyword_rnt
else:
return ment_next
else:
ment_next = mention_trie.get([])
return ment_next + keyword_rnt + [codes["EOS"]]
def get_keyword_mention(sent):
pointer_start = -1
for i, e in enumerate(sent):
if e in keyword_codes.values():
pointer_start = i
return pointer_start
return prefix_allowed_tokens_fn
|
ContextualSP/unified_parser_text_to_sql/genre/entity_linking.py/0
|
{
"file_path": "ContextualSP/unified_parser_text_to_sql/genre/entity_linking.py",
"repo_id": "ContextualSP",
"token_count": 2026
}
| 265 |
import subprocess
import argparse
import os
def run_command(bash_command):
process = subprocess.Popen(bash_command.split())
output, error = process.communicate()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--dataset_path", type=str, default="", help="dataset path")
parser.add_argument("--exp_name", type=str, default="", help="test")
parser.add_argument("--models_path", type=str, default="", help="models path")
parser.add_argument("--bart_model_path", type=str, default="", help="bart init models path")
parser.add_argument("--total_num_update", type=int, default=200000)
parser.add_argument("--max_tokens", type=int, default=4096)
parser.add_argument("--tensorboard_path", type=str, default="", help="tensorboard path")
args = parser.parse_args()
print("START training")
run_command("printenv")
restore_file = os.path.join(args.bart_model_path, "model.pt")
cmd = f"""
fairseq-train {args.dataset_path} \
--save-dir {args.models_path}/{args.exp_name} \
--restore-file {restore_file} \
--arch bart_large \
--criterion label_smoothed_cross_entropy \
--source-lang src \
--target-lang tgt \
--truncate-source \
--label-smoothing 0.1 \
--max-tokens {args.max_tokens} \
--update-freq 4 \
--max-update {args.total_num_update} \
--required-batch-size-multiple 1 \
--dropout 0.1 \
--attention-dropout 0.1 \
--relu-dropout 0.0 \
--weight-decay 0.05 \
--optimizer adam \
--adam-eps 1e-08 \
--clip-norm 0.1 \
--lr-scheduler polynomial_decay \
--lr 1e-05 \
--total-num-update {args.total_num_update} \
--warmup-updates 5000 \
--ddp-backend no_c10d \
--num-workers 20 \
--reset-meters \
--reset-optimizer \
--reset-dataloader \
--share-all-embeddings \
--layernorm-embedding \
--share-decoder-input-output-embed \
--skip-invalid-size-inputs-valid-test \
--log-format json \
--log-interval 10 \
--save-interval-updates 500 \
--validate-interval-updates 500 \
--validate-interval 10 \
--save-interval 10 \
--patience 200 \
--no-last-checkpoints \
--no-save-optimizer-state \
--report-accuracy
"""
print("RUN {}".format(cmd))
run_command(cmd)
|
ContextualSP/unified_parser_text_to_sql/train.py/0
|
{
"file_path": "ContextualSP/unified_parser_text_to_sql/train.py",
"repo_id": "ContextualSP",
"token_count": 981
}
| 266 |
import math
import sys
from typing import Iterable, Optional
from timm.utils.model import unwrap_model
import torch
from timm.data import Mixup
from timm.utils import accuracy, ModelEma
from lib import utils
@torch.no_grad()
def evaluate(data_loader, model, device, amp=True):
criterion = torch.nn.CrossEntropyLoss()
metric_logger = utils.MetricLogger(delimiter=" ")
header = 'Test:'
# switch to evaluation mode
model.eval()
for images, target in metric_logger.log_every(data_loader, 10, header):
images = images.to(device, non_blocking=True)
target = target.to(device, non_blocking=True)
# compute output
if amp:
with torch.cuda.amp.autocast():
output = model(images)
loss = criterion(output, target)
else:
output = model(images)
loss = criterion(output, target)
acc1, acc5 = accuracy(output, target, topk=(1, 5))
batch_size = images.shape[0]
metric_logger.update(loss=loss.item())
metric_logger.meters['acc1'].update(acc1.item(), n=batch_size)
metric_logger.meters['acc5'].update(acc5.item(), n=batch_size)
# gather the stats from all processes
metric_logger.synchronize_between_processes()
print('* Acc@1 {top1.global_avg:.3f} Acc@5 {top5.global_avg:.3f} loss {losses.global_avg:.3f}'
.format(top1=metric_logger.acc1, top5=metric_logger.acc5, losses=metric_logger.loss))
return {k: meter.global_avg for k, meter in metric_logger.meters.items()}
|
Cream/AutoFormerV2/engine.py/0
|
{
"file_path": "Cream/AutoFormerV2/engine.py",
"repo_id": "Cream",
"token_count": 653
}
| 267 |
""" Retrain cell """
import _init_paths
import os
import torch
import json
import torch.nn as nn
import numpy as np
import lib.utils.genotypes as gt
from tensorboardX import SummaryWriter
from lib.models.cdarts_controller import CDARTSController
from lib.utils import utils
from lib.config import AugmentConfig
from lib.core.augment_function import train, validate
# config
config = AugmentConfig()
# make apex optional
if config.distributed:
# DDP = torch.nn.parallel.DistributedDataParallel
try:
import apex
from apex.parallel import DistributedDataParallel as DDP
from apex import amp, optimizers
from apex.fp16_utils import *
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to run this example.")
# tensorboard
writer = SummaryWriter(log_dir=os.path.join(config.path, "tb"))
writer.add_text('config', config.as_markdown(), 0)
logger = utils.get_logger(os.path.join(config.path, "{}.log".format(config.name)))
if config.local_rank == 0:
config.print_params(logger.info)
if 'cifar' in config.dataset:
from lib.datasets.cifar import get_augment_datasets
elif 'imagenet' in config.dataset:
from lib.datasets.imagenet import get_augment_datasets
else:
raise Exception("Not support dataset!")
def main():
logger.info("Logger is set - training start")
# set seed
np.random.seed(config.seed)
torch.manual_seed(config.seed)
torch.cuda.manual_seed_all(config.seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = True
if config.distributed:
config.gpu = config.local_rank % torch.cuda.device_count()
torch.cuda.set_device(config.gpu)
# distributed init
torch.distributed.init_process_group(backend='nccl', init_method=config.dist_url,
world_size=config.world_size, rank=config.local_rank)
config.world_size = torch.distributed.get_world_size()
config.total_batch_size = config.world_size * config.batch_size
else:
config.total_batch_size = config.batch_size
loaders, samplers = get_augment_datasets(config)
train_loader, valid_loader = loaders
train_sampler, valid_sampler = samplers
net_crit = nn.CrossEntropyLoss().cuda()
controller = CDARTSController(config, net_crit, n_nodes=4, stem_multiplier=config.stem_multiplier)
file = open(config.cell_file, 'r')
js = file.read()
r_dict = json.loads(js)
if config.local_rank == 0:
logger.info(r_dict)
file.close()
genotypes_dict = {}
for layer_idx, genotype in r_dict.items():
genotypes_dict[int(layer_idx)] = gt.from_str(genotype)
controller.build_augment_model(controller.init_channel, genotypes_dict)
resume_state = None
if config.resume:
resume_state = torch.load(config.resume_path, map_location='cpu')
controller.model_main.load_state_dict(resume_state['model_main'])
controller.model_main = controller.model_main.cuda()
param_size = utils.param_size(controller.model_main)
logger.info("param size = %fMB", param_size)
# change training hyper parameters according to cell type
if 'cifar' in config.dataset:
if param_size < 3.0:
config.weight_decay = 3e-4
config.drop_path_prob = 0.2
elif param_size > 3.0 and param_size < 3.5:
config.weight_decay = 3e-4
config.drop_path_prob = 0.3
else:
config.weight_decay = 5e-4
config.drop_path_prob = 0.3
if config.local_rank == 0:
logger.info("Current weight decay: {}".format(config.weight_decay))
logger.info("Current drop path prob: {}".format(config.drop_path_prob))
controller.model_main = apex.parallel.convert_syncbn_model(controller.model_main)
# weights optimizer
optimizer = torch.optim.SGD(controller.model_main.parameters(), lr=config.lr, momentum=config.momentum, weight_decay=config.weight_decay)
# optimizer = torch.optim.SGD(controller.model_main.parameters(), lr=config.lr, momentum=config.momentum, weight_decay=config.weight_decay, nesterov=True)
lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, config.epochs)
if config.use_amp:
controller.model_main, optimizer = amp.initialize(controller.model_main, optimizer, opt_level=config.opt_level)
if config.distributed:
controller.model_main = DDP(controller.model_main, delay_allreduce=True)
best_top1 = 0.
best_top5 = 0.
sta_epoch = 0
# training loop
if config.resume:
optimizer.load_state_dict(resume_state['optimizer'])
lr_scheduler.load_state_dict(resume_state['lr_scheduler'])
best_top1 = resume_state['best_top1']
best_top5 = resume_state['best_top5']
sta_epoch = resume_state['sta_epoch']
epoch_pool = [220, 230, 235, 240, 245]
for epoch in range(sta_epoch, config.epochs):
# reset iterators
train_sampler.set_epoch(epoch)
valid_sampler.set_epoch(epoch)
current_lr = lr_scheduler.get_lr()[0]
# current_lr = utils.adjust_lr(optimizer, epoch, config)
if config.local_rank == 0:
logger.info('Epoch: %d lr %e', epoch, current_lr)
if epoch < config.warmup_epochs and config.total_batch_size > 256:
for param_group in optimizer.param_groups:
param_group['lr'] = current_lr * (epoch + 1) / 5.0
if config.local_rank == 0:
logger.info('Warming-up Epoch: %d, LR: %e', epoch, current_lr * (epoch + 1) / 5.0)
drop_prob = config.drop_path_prob * epoch / config.epochs
controller.model_main.module.drop_path_prob(drop_prob)
# training
train(train_loader, controller.model_main, optimizer, epoch, writer, logger, config)
# validation
cur_step = (epoch+1) * len(train_loader)
top1, top5 = validate(valid_loader, controller.model_main, epoch, cur_step, writer, logger, config)
if 'cifar' in config.dataset:
lr_scheduler.step()
elif 'imagenet' in config.dataset:
lr_scheduler.step()
# current_lr = utils.adjust_lr(optimizer, epoch, config)
else:
raise Exception('Lr error!')
# save
if best_top1 < top1:
best_top1 = top1
best_top5 = top5
is_best = True
else:
is_best = False
# save
if config.local_rank == 0:
if ('imagenet' in config.dataset) and ((epoch+1) in epoch_pool) and (not config.resume) and (config.local_rank == 0):
torch.save({
"model_main":controller.model_main.module.state_dict(),
"optimizer":optimizer.state_dict(),
"lr_scheduler":lr_scheduler.state_dict(),
"best_top1":best_top1,
"best_top5":best_top5,
"sta_epoch":epoch + 1
}, os.path.join(config.path, "epoch_{}.pth.tar".format(epoch+1)))
utils.save_checkpoint(controller.model_main.module.state_dict(), config.path, is_best)
torch.save({
"model_main":controller.model_main.module.state_dict(),
"optimizer":optimizer.state_dict(),
"lr_scheduler":lr_scheduler.state_dict(),
"best_top1":best_top1,
"best_top5":best_top5,
"sta_epoch":epoch + 1
}, os.path.join(config.path, "retrain_resume.pth.tar"))
utils.save_checkpoint(controller.model_main.module.state_dict(), config.path, is_best)
if config.local_rank == 0:
logger.info("Final best Prec@1 = {:.4%}, Prec@5 = {:.4%}".format(best_top1, best_top5))
if __name__ == "__main__":
main()
|
Cream/CDARTS/CDARTS/retrain.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS/retrain.py",
"repo_id": "Cream",
"token_count": 3503
}
| 268 |
from abc import ABCMeta, abstractmethod
class BaseFileHandler(object):
__metaclass__ = ABCMeta # python 2 compatibility
@abstractmethod
def load_from_fileobj(self, file, **kwargs):
pass
@abstractmethod
def dump_to_fileobj(self, obj, file, **kwargs):
pass
@abstractmethod
def dump_to_str(self, obj, **kwargs):
pass
def load_from_path(self, filepath, mode='r', **kwargs):
with open(filepath, mode) as f:
return self.load_from_fileobj(f, **kwargs)
def dump_to_path(self, obj, filepath, mode='w', **kwargs):
with open(filepath, mode) as f:
self.dump_to_fileobj(obj, f, **kwargs)
|
Cream/CDARTS/CDARTS_detection/mmcv/fileio/handlers/base.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_detection/mmcv/fileio/handlers/base.py",
"repo_id": "Cream",
"token_count": 293
}
| 269 |
import collections
import torch
import torch.nn.functional as F
from torch.utils.data.dataloader import default_collate
from .data_container import DataContainer
def collate(batch, samples_per_gpu=1):
"""Puts each data field into a tensor/DataContainer with outer dimension
batch size.
Extend default_collate to add support for
:type:`~mmcv.parallel.DataContainer`. There are 3 cases.
1. cpu_only = True, e.g., meta data
2. cpu_only = False, stack = True, e.g., images tensors
3. cpu_only = False, stack = False, e.g., gt bboxes
"""
if not isinstance(batch, collections.Sequence):
raise TypeError("{} is not supported.".format(batch.dtype))
if isinstance(batch[0], DataContainer):
assert len(batch) % samples_per_gpu == 0
stacked = []
if batch[0].cpu_only:
for i in range(0, len(batch), samples_per_gpu):
stacked.append(
[sample.data for sample in batch[i:i + samples_per_gpu]])
return DataContainer(
stacked, batch[0].stack, batch[0].padding_value, cpu_only=True)
elif batch[0].stack:
for i in range(0, len(batch), samples_per_gpu):
assert isinstance(batch[i].data, torch.Tensor)
if batch[i].pad_dims is not None:
ndim = batch[i].dim()
assert ndim > batch[i].pad_dims
max_shape = [0 for _ in range(batch[i].pad_dims)]
for dim in range(1, batch[i].pad_dims + 1):
max_shape[dim - 1] = batch[i].size(-dim)
for sample in batch[i:i + samples_per_gpu]:
for dim in range(0, ndim - batch[i].pad_dims):
assert batch[i].size(dim) == sample.size(dim)
for dim in range(1, batch[i].pad_dims + 1):
max_shape[dim - 1] = max(max_shape[dim - 1],
sample.size(-dim))
padded_samples = []
for sample in batch[i:i + samples_per_gpu]:
pad = [0 for _ in range(batch[i].pad_dims * 2)]
for dim in range(1, batch[i].pad_dims + 1):
pad[2 * dim -
1] = max_shape[dim - 1] - sample.size(-dim)
padded_samples.append(
F.pad(
sample.data, pad, value=sample.padding_value))
stacked.append(default_collate(padded_samples))
elif batch[i].pad_dims is None:
stacked.append(
default_collate([
sample.data
for sample in batch[i:i + samples_per_gpu]
]))
else:
raise ValueError(
'pad_dims should be either None or integers (1-3)')
else:
for i in range(0, len(batch), samples_per_gpu):
stacked.append(
[sample.data for sample in batch[i:i + samples_per_gpu]])
return DataContainer(stacked, batch[0].stack, batch[0].padding_value)
elif isinstance(batch[0], collections.Sequence):
transposed = zip(*batch)
return [collate(samples, samples_per_gpu) for samples in transposed]
elif isinstance(batch[0], collections.Mapping):
return {
key: collate([d[key] for d in batch], samples_per_gpu)
for key in batch[0]
}
else:
return default_collate(batch)
|
Cream/CDARTS/CDARTS_detection/mmcv/parallel/collate.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_detection/mmcv/parallel/collate.py",
"repo_id": "Cream",
"token_count": 1935
}
| 270 |
import os.path as osp
import torch
from ...utils import master_only
from .base import LoggerHook
class TensorboardLoggerHook(LoggerHook):
def __init__(self,
log_dir=None,
interval=10,
ignore_last=True,
reset_flag=True):
super(TensorboardLoggerHook, self).__init__(interval, ignore_last,
reset_flag)
self.log_dir = log_dir
@master_only
def before_run(self, runner):
if torch.__version__ >= '1.1':
try:
from torch.utils.tensorboard import SummaryWriter
except ImportError:
raise ImportError(
'Please run "pip install future tensorboard" to install '
'the dependencies to use torch.utils.tensorboard '
'(applicable to PyTorch 1.1 or higher)')
else:
try:
from tensorboardX import SummaryWriter
except ImportError:
raise ImportError('Please install tensorboardX to use '
'TensorboardLoggerHook.')
if self.log_dir is None:
self.log_dir = osp.join(runner.work_dir, 'tf_logs')
self.writer = SummaryWriter(self.log_dir)
@master_only
def log(self, runner):
for var in runner.log_buffer.output:
if var in ['time', 'data_time']:
continue
tag = '{}/{}'.format(var, runner.mode)
record = runner.log_buffer.output[var]
if isinstance(record, str):
self.writer.add_text(tag, record, runner.iter)
else:
self.writer.add_scalar(tag, runner.log_buffer.output[var],
runner.iter)
@master_only
def after_run(self, runner):
self.writer.close()
|
Cream/CDARTS/CDARTS_detection/mmcv/runner/hooks/logger/tensorboard.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_detection/mmcv/runner/hooks/logger/tensorboard.py",
"repo_id": "Cream",
"token_count": 996
}
| 271 |
from time import time
class TimerError(Exception):
def __init__(self, message):
self.message = message
super(TimerError, self).__init__(message)
class Timer(object):
"""A flexible Timer class.
:Example:
>>> import time
>>> import mmcv
>>> with mmcv.Timer():
>>> # simulate a code block that will run for 1s
>>> time.sleep(1)
1.000
>>> with mmcv.Timer(print_tmpl='it takes {:.1f} seconds'):
>>> # simulate a code block that will run for 1s
>>> time.sleep(1)
it takes 1.0 seconds
>>> timer = mmcv.Timer()
>>> time.sleep(0.5)
>>> print(timer.since_start())
0.500
>>> time.sleep(0.5)
>>> print(timer.since_last_check())
0.500
>>> print(timer.since_start())
1.000
"""
def __init__(self, start=True, print_tmpl=None):
self._is_running = False
self.print_tmpl = print_tmpl if print_tmpl else '{:.3f}'
if start:
self.start()
@property
def is_running(self):
"""bool: indicate whether the timer is running"""
return self._is_running
def __enter__(self):
self.start()
return self
def __exit__(self, type, value, traceback):
print(self.print_tmpl.format(self.since_last_check()))
self._is_running = False
def start(self):
"""Start the timer."""
if not self._is_running:
self._t_start = time()
self._is_running = True
self._t_last = time()
def since_start(self):
"""Total time since the timer is started.
Returns (float): Time in seconds.
"""
if not self._is_running:
raise TimerError('timer is not running')
self._t_last = time()
return self._t_last - self._t_start
def since_last_check(self):
"""Time since the last checking.
Either :func:`since_start` or :func:`since_last_check` is a checking
operation.
Returns (float): Time in seconds.
"""
if not self._is_running:
raise TimerError('timer is not running')
dur = time() - self._t_last
self._t_last = time()
return dur
_g_timers = {} # global timers
def check_time(timer_id):
"""Add check points in a single line.
This method is suitable for running a task on a list of items. A timer will
be registered when the method is called for the first time.
:Example:
>>> import time
>>> import mmcv
>>> for i in range(1, 6):
>>> # simulate a code block
>>> time.sleep(i)
>>> mmcv.check_time('task1')
2.000
3.000
4.000
5.000
Args:
timer_id (str): Timer identifier.
"""
if timer_id not in _g_timers:
_g_timers[timer_id] = Timer()
return 0
else:
return _g_timers[timer_id].since_last_check()
|
Cream/CDARTS/CDARTS_detection/mmcv/utils/timer.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_detection/mmcv/utils/timer.py",
"repo_id": "Cream",
"token_count": 1263
}
| 272 |
import mmcv
from . import assigners, samplers
def build_assigner(cfg, **kwargs):
if isinstance(cfg, assigners.BaseAssigner):
return cfg
elif isinstance(cfg, dict):
return mmcv.runner.obj_from_dict(cfg, assigners, default_args=kwargs)
else:
raise TypeError('Invalid type {} for building a sampler'.format(
type(cfg)))
def build_sampler(cfg, **kwargs):
if isinstance(cfg, samplers.BaseSampler):
return cfg
elif isinstance(cfg, dict):
return mmcv.runner.obj_from_dict(cfg, samplers, default_args=kwargs)
else:
raise TypeError('Invalid type {} for building a sampler'.format(
type(cfg)))
def assign_and_sample(bboxes, gt_bboxes, gt_bboxes_ignore, gt_labels, cfg):
bbox_assigner = build_assigner(cfg.assigner)
bbox_sampler = build_sampler(cfg.sampler)
assign_result = bbox_assigner.assign(bboxes, gt_bboxes, gt_bboxes_ignore,
gt_labels)
sampling_result = bbox_sampler.sample(assign_result, bboxes, gt_bboxes,
gt_labels)
return assign_result, sampling_result
|
Cream/CDARTS/CDARTS_detection/mmdet/core/bbox/assign_sampling.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_detection/mmdet/core/bbox/assign_sampling.py",
"repo_id": "Cream",
"token_count": 533
}
| 273 |
import torch
class SamplingResult(object):
def __init__(self, pos_inds, neg_inds, bboxes, gt_bboxes, assign_result,
gt_flags):
self.pos_inds = pos_inds
self.neg_inds = neg_inds
self.pos_bboxes = bboxes[pos_inds]
self.neg_bboxes = bboxes[neg_inds]
self.pos_is_gt = gt_flags[pos_inds]
self.num_gts = gt_bboxes.shape[0]
self.pos_assigned_gt_inds = assign_result.gt_inds[pos_inds] - 1
self.pos_gt_bboxes = gt_bboxes[self.pos_assigned_gt_inds, :]
if assign_result.labels is not None:
self.pos_gt_labels = assign_result.labels[pos_inds]
else:
self.pos_gt_labels = None
@property
def bboxes(self):
return torch.cat([self.pos_bboxes, self.neg_bboxes])
|
Cream/CDARTS/CDARTS_detection/mmdet/core/bbox/samplers/sampling_result.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_detection/mmdet/core/bbox/samplers/sampling_result.py",
"repo_id": "Cream",
"token_count": 403
}
| 274 |
from .bbox_nms import multiclass_nms
from .merge_augs import (merge_aug_proposals, merge_aug_bboxes,
merge_aug_scores, merge_aug_masks)
__all__ = [
'multiclass_nms', 'merge_aug_proposals', 'merge_aug_bboxes',
'merge_aug_scores', 'merge_aug_masks'
]
|
Cream/CDARTS/CDARTS_detection/mmdet/core/post_processing/__init__.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_detection/mmdet/core/post_processing/__init__.py",
"repo_id": "Cream",
"token_count": 141
}
| 275 |
import collections
from mmdet.utils import build_from_cfg
from ..registry import PIPELINES
@PIPELINES.register_module
class Compose(object):
def __init__(self, transforms):
assert isinstance(transforms, collections.abc.Sequence)
self.transforms = []
for transform in transforms:
if isinstance(transform, dict):
transform = build_from_cfg(transform, PIPELINES)
self.transforms.append(transform)
elif callable(transform):
self.transforms.append(transform)
else:
raise TypeError('transform must be callable or a dict')
def __call__(self, data):
for t in self.transforms:
data = t(data)
if data is None:
return None
return data
def __repr__(self):
format_string = self.__class__.__name__ + '('
for t in self.transforms:
format_string += '\n'
format_string += ' {0}'.format(t)
format_string += '\n)'
return format_string
|
Cream/CDARTS/CDARTS_detection/mmdet/datasets/pipelines/compose.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_detection/mmdet/datasets/pipelines/compose.py",
"repo_id": "Cream",
"token_count": 492
}
| 276 |
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import normal_init
from mmdet.core import delta2bbox
from mmdet.ops import nms
from .guided_anchor_head import GuidedAnchorHead
from ..registry import HEADS
@HEADS.register_module
class GARPNHead(GuidedAnchorHead):
"""Guided-Anchor-based RPN head."""
def __init__(self, in_channels, **kwargs):
super(GARPNHead, self).__init__(2, in_channels, **kwargs)
def _init_layers(self):
self.rpn_conv = nn.Conv2d(self.in_channels,
self.feat_channels,
3,
padding=1)
super(GARPNHead, self)._init_layers()
def init_weights(self):
normal_init(self.rpn_conv, std=0.01)
super(GARPNHead, self).init_weights()
def forward_single(self, x):
x = self.rpn_conv(x)
x = F.relu(x, inplace=True)
(cls_score, bbox_pred, shape_pred,
loc_pred) = super(GARPNHead, self).forward_single(x)
return cls_score, bbox_pred, shape_pred, loc_pred
def loss(self,
cls_scores,
bbox_preds,
shape_preds,
loc_preds,
gt_bboxes,
img_metas,
cfg,
gt_bboxes_ignore=None):
losses = super(GARPNHead, self).loss(cls_scores,
bbox_preds,
shape_preds,
loc_preds,
gt_bboxes,
None,
img_metas,
cfg,
gt_bboxes_ignore=gt_bboxes_ignore)
return dict(loss_rpn_cls=losses['loss_cls'],
loss_rpn_bbox=losses['loss_bbox'],
loss_anchor_shape=losses['loss_shape'],
loss_anchor_loc=losses['loss_loc'])
def get_bboxes_single(self,
cls_scores,
bbox_preds,
mlvl_anchors,
mlvl_masks,
img_shape,
scale_factor,
cfg,
rescale=False):
mlvl_proposals = []
for idx in range(len(cls_scores)):
rpn_cls_score = cls_scores[idx]
rpn_bbox_pred = bbox_preds[idx]
anchors = mlvl_anchors[idx]
mask = mlvl_masks[idx]
assert rpn_cls_score.size()[-2:] == rpn_bbox_pred.size()[-2:]
# if no location is kept, end.
if mask.sum() == 0:
continue
rpn_cls_score = rpn_cls_score.permute(1, 2, 0)
if self.use_sigmoid_cls:
rpn_cls_score = rpn_cls_score.reshape(-1)
scores = rpn_cls_score.sigmoid()
else:
rpn_cls_score = rpn_cls_score.reshape(-1, 2)
scores = rpn_cls_score.softmax(dim=1)[:, 1]
# filter scores, bbox_pred w.r.t. mask.
# anchors are filtered in get_anchors() beforehand.
scores = scores[mask]
rpn_bbox_pred = rpn_bbox_pred.permute(1, 2, 0).reshape(-1,
4)[mask, :]
if scores.dim() == 0:
rpn_bbox_pred = rpn_bbox_pred.unsqueeze(0)
anchors = anchors.unsqueeze(0)
scores = scores.unsqueeze(0)
# filter anchors, bbox_pred, scores w.r.t. scores
if cfg.nms_pre > 0 and scores.shape[0] > cfg.nms_pre:
_, topk_inds = scores.topk(cfg.nms_pre)
rpn_bbox_pred = rpn_bbox_pred[topk_inds, :]
anchors = anchors[topk_inds, :]
scores = scores[topk_inds]
# get proposals w.r.t. anchors and rpn_bbox_pred
proposals = delta2bbox(anchors, rpn_bbox_pred, self.target_means,
self.target_stds, img_shape)
# filter out too small bboxes
if cfg.min_bbox_size > 0:
w = proposals[:, 2] - proposals[:, 0] + 1
h = proposals[:, 3] - proposals[:, 1] + 1
valid_inds = torch.nonzero((w >= cfg.min_bbox_size) &
(h >= cfg.min_bbox_size)).squeeze()
proposals = proposals[valid_inds, :]
scores = scores[valid_inds]
proposals = torch.cat([proposals, scores.unsqueeze(-1)], dim=-1)
# NMS in current level
proposals, _ = nms(proposals, cfg.nms_thr)
proposals = proposals[:cfg.nms_post, :]
mlvl_proposals.append(proposals)
proposals = torch.cat(mlvl_proposals, 0)
if cfg.nms_across_levels:
# NMS across multi levels
proposals, _ = nms(proposals, cfg.nms_thr)
proposals = proposals[:cfg.max_num, :]
else:
scores = proposals[:, 4]
num = min(cfg.max_num, proposals.shape[0])
_, topk_inds = scores.topk(num)
proposals = proposals[topk_inds, :]
return proposals
|
Cream/CDARTS/CDARTS_detection/mmdet/models/anchor_heads/ga_rpn_head.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_detection/mmdet/models/anchor_heads/ga_rpn_head.py",
"repo_id": "Cream",
"token_count": 3227
}
| 277 |
import math
import torch
import torch.nn as nn
from torch.autograd import Variable
from .dropblock import DropBlockScheduled, DropBlock2D
import logging
from torch.nn.modules.batchnorm import _BatchNorm
import torch.nn.functional as F
import time
import numpy as np
from ..registry import BACKBONES
def Conv_3x3(inp, oup, stride, activation=nn.ReLU6, act_params={"inplace": True}):
return nn.Sequential(
nn.Conv2d(inp, oup, 3, stride, 1, bias=False),
nn.BatchNorm2d(oup),
activation(**act_params)
)
def Conv_1x1(inp, oup, activation=nn.ReLU6, act_params={"inplace": True}):
return nn.Sequential(
nn.Conv2d(inp, oup, 1, 1, 0, bias=False),
nn.BatchNorm2d(oup),
activation(**act_params)
)
def SepConv_3x3(inp, oup, activation=nn.ReLU6, act_params={"inplace": True}): # input=32, output=16
return nn.Sequential(
# dw
nn.Conv2d(inp, inp, 3, 1, 1, groups=inp, bias=False),
nn.BatchNorm2d(inp),
activation(**act_params),
# pw-linear
nn.Conv2d(inp, oup, 1, 1, 0, bias=False),
nn.BatchNorm2d(oup),
)
class InvertedResidual(nn.Module):
def __init__(self, inp, oup, stride, expand_ratio, kernel, drop_prob=0.0, num_steps=3e5, activation=nn.ReLU6,
act_params={"inplace": True}):
super(InvertedResidual, self).__init__()
self.stride = stride
assert stride in [1, 2]
self.use_res_connect = self.stride == 1 and inp == oup
self.conv = nn.Sequential(
# pw
nn.Conv2d(inp, inp * expand_ratio, 1, 1, 0, bias=False),
nn.BatchNorm2d(inp * expand_ratio),
DropBlockScheduled(
DropBlock2D(drop_prob=drop_prob, block_size=7),
start_value=0.,
stop_value=drop_prob,
nr_steps=num_steps),
activation(**act_params),
# dw
nn.Conv2d(inp * expand_ratio, inp * expand_ratio, kernel, stride, kernel // 2, groups=inp * expand_ratio,
bias=False),
nn.BatchNorm2d(inp * expand_ratio),
DropBlockScheduled(
DropBlock2D(drop_prob=drop_prob, block_size=7),
start_value=0.,
stop_value=drop_prob,
nr_steps=num_steps),
activation(**act_params),
# pw-linear
nn.Conv2d(inp * expand_ratio, oup, 1, 1, 0, bias=False),
nn.BatchNorm2d(oup),
DropBlockScheduled(
DropBlock2D(drop_prob=drop_prob, block_size=7),
start_value=0.,
stop_value=drop_prob,
nr_steps=num_steps),
)
if self.use_res_connect:
self.skip_drop = DropBlockScheduled(
DropBlock2D(drop_prob=drop_prob, block_size=7),
start_value=0.,
stop_value=drop_prob,
nr_steps=num_steps)
def init_weights(self, pretrained=None):
if isinstance(pretrained, str):
logger = logging.getLogger()
load_checkpoint(self, pretrained, strict=False, logger=logger)
elif pretrained is None:
for m in self.modules():
if isinstance(m, nn.Conv2d):
kaiming_init(m)
elif isinstance(m, (_BatchNorm, nn.GroupNorm)):
constant_init(m, 1)
else:
raise TypeError('pretrained must be a str or None')
def forward(self, x):
if self.use_res_connect:
return self.skip_drop(x + self.conv(x))
else:
return self.conv(x)
@BACKBONES.register_module
class MnasNet(nn.Module):
def __init__(self, out_indices=(1,2,3,4), width_mult=1., drop_prob=0.0, num_steps=3e5, activation=nn.ReLU6,
act_params={"inplace": True}):
super(MnasNet, self).__init__()
self.out_indices = out_indices
self.activation = activation
self.act_params = act_params
# setting of inverted residual blocks
self.interverted_residual_setting = [
# t, c, n, s, k, dp
[3, 24, 3, 2, 3, 0], # -> 56x56
[3, 40, 3, 2, 5, 0], # -> 28x28
[6, 80, 3, 2, 5, 0], # -> 14x14
[6, 96, 2, 1, 3, drop_prob], # -> 14x14
[6, 192, 4, 2, 5, drop_prob], # -> 7x7
[6, 320, 1, 1, 3, drop_prob], # -> 7x7
]
self.num_steps = num_steps
input_channel = int(32 * width_mult)
self.last_channel = int(1280 * width_mult) if width_mult > 1.0 else 1280
# building first two layer
self.features = [Conv_3x3(3, input_channel, 2, self.activation, self.act_params),
SepConv_3x3(input_channel, 16, self.activation, self.act_params)]
input_channel = 16
# building inverted residual blocks (MBConv)
for t, c, n, s, k, dp in self.interverted_residual_setting:
output_channel = int(c * width_mult)
for i in range(n):
if i == 0:
self.features.append(InvertedResidual(input_channel, output_channel, s, t, k, dp, self.num_steps,
self.activation, self.act_params))
else:
self.features.append(InvertedResidual(input_channel, output_channel, 1, t, k, dp, self.num_steps,
self.activation, self.act_params))
input_channel = output_channel
# building last several layers
self.features.append(Conv_1x1(input_channel, self.last_channel, self.activation, self.act_params))
# make it nn.Sequential
self.features = nn.Sequential(*self.features)
def forward(self, x):
import pdb
pdb.set_trace()
outs = []
cnt = 0
for i, layer in enumerate(self.features):
x = layer(x)
if i in self.out_indices:
outs.append(x)
return outs
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
n = m.weight.size(1)
m.weight.data.normal_(0, 0.01)
m.bias.data.zero_()
if __name__ == '__main__':
net = MnasNet()
print(net)
x_image = Variable(torch.randn(1, 3, 224, 224))
y = net(x_image)
# print(y)
|
Cream/CDARTS/CDARTS_detection/mmdet/models/backbones/mnasnet.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_detection/mmdet/models/backbones/mnasnet.py",
"repo_id": "Cream",
"token_count": 3658
}
| 278 |
from .base import BaseDetector
from .double_head_rcnn import DoubleHeadRCNN
from .single_stage import SingleStageDetector
from .two_stage import TwoStageDetector
from .rpn import RPN
from .fast_rcnn import FastRCNN
from .faster_rcnn import FasterRCNN
from .mask_rcnn import MaskRCNN
from .cascade_rcnn import CascadeRCNN
from .htc import HybridTaskCascade
from .retinanet import RetinaNet
from .fcos import FCOS
from .grid_rcnn import GridRCNN
from .mask_scoring_rcnn import MaskScoringRCNN
__all__ = [
'BaseDetector', 'SingleStageDetector', 'TwoStageDetector', 'RPN',
'FastRCNN', 'FasterRCNN', 'MaskRCNN', 'CascadeRCNN', 'HybridTaskCascade',
'RetinaNet', 'FCOS', 'GridRCNN', 'MaskScoringRCNN', 'DoubleHeadRCNN'
]
|
Cream/CDARTS/CDARTS_detection/mmdet/models/detectors/__init__.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_detection/mmdet/models/detectors/__init__.py",
"repo_id": "Cream",
"token_count": 251
}
| 279 |
from .accuracy import accuracy, Accuracy
from .cross_entropy_loss import (cross_entropy, binary_cross_entropy,
mask_cross_entropy, CrossEntropyLoss)
from .focal_loss import sigmoid_focal_loss, FocalLoss
from .smooth_l1_loss import smooth_l1_loss, SmoothL1Loss
from .ghm_loss import GHMC, GHMR
from .balanced_l1_loss import balanced_l1_loss, BalancedL1Loss
from .mse_loss import mse_loss, MSELoss
from .iou_loss import iou_loss, bounded_iou_loss, IoULoss, BoundedIoULoss
from .utils import reduce_loss, weight_reduce_loss, weighted_loss
__all__ = [
'accuracy', 'Accuracy', 'cross_entropy', 'binary_cross_entropy',
'mask_cross_entropy', 'CrossEntropyLoss', 'sigmoid_focal_loss',
'FocalLoss', 'smooth_l1_loss', 'SmoothL1Loss', 'balanced_l1_loss',
'BalancedL1Loss', 'mse_loss', 'MSELoss', 'iou_loss', 'bounded_iou_loss',
'IoULoss', 'BoundedIoULoss', 'GHMC', 'GHMR', 'reduce_loss',
'weight_reduce_loss', 'weighted_loss'
]
|
Cream/CDARTS/CDARTS_detection/mmdet/models/losses/__init__.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_detection/mmdet/models/losses/__init__.py",
"repo_id": "Cream",
"token_count": 409
}
| 280 |
from .fpn import FPN
from .fpn_panet import PAFPN
from .bfp import BFP
from .hrfpn import HRFPN
from .nas_fpn import NASFPN
from .search_pafpn import SearchPAFPN
__all__ = ['FPN', 'BFP', 'HRFPN', 'NASFPN',
'PAFPN', 'SearchPAFPN']
|
Cream/CDARTS/CDARTS_detection/mmdet/models/necks/__init__.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_detection/mmdet/models/necks/__init__.py",
"repo_id": "Cream",
"token_count": 99
}
| 281 |
from __future__ import division
import torch
import torch.nn as nn
from mmdet import ops
from mmdet.core import force_fp32
from ..registry import ROI_EXTRACTORS
@ROI_EXTRACTORS.register_module
class SingleRoIExtractor(nn.Module):
"""Extract RoI features from a single level feature map.
If there are mulitple input feature levels, each RoI is mapped to a level
according to its scale.
Args:
roi_layer (dict): Specify RoI layer type and arguments.
out_channels (int): Output channels of RoI layers.
featmap_strides (int): Strides of input feature maps.
finest_scale (int): Scale threshold of mapping to level 0.
"""
def __init__(self,
roi_layer,
out_channels,
featmap_strides,
finest_scale=56):
super(SingleRoIExtractor, self).__init__()
self.roi_layers = self.build_roi_layers(roi_layer, featmap_strides)
self.out_channels = out_channels
self.featmap_strides = featmap_strides
self.finest_scale = finest_scale
self.fp16_enabled = False
@property
def num_inputs(self):
"""int: Input feature map levels."""
return len(self.featmap_strides)
def init_weights(self):
pass
def build_roi_layers(self, layer_cfg, featmap_strides):
cfg = layer_cfg.copy()
layer_type = cfg.pop('type')
assert hasattr(ops, layer_type)
layer_cls = getattr(ops, layer_type)
roi_layers = nn.ModuleList(
[layer_cls(spatial_scale=1 / s, **cfg) for s in featmap_strides])
return roi_layers
def map_roi_levels(self, rois, num_levels):
"""Map rois to corresponding feature levels by scales.
- scale < finest_scale * 2: level 0
- finest_scale * 2 <= scale < finest_scale * 4: level 1
- finest_scale * 4 <= scale < finest_scale * 8: level 2
- scale >= finest_scale * 8: level 3
Args:
rois (Tensor): Input RoIs, shape (k, 5).
num_levels (int): Total level number.
Returns:
Tensor: Level index (0-based) of each RoI, shape (k, )
"""
scale = torch.sqrt(
(rois[:, 3] - rois[:, 1] + 1) * (rois[:, 4] - rois[:, 2] + 1))
target_lvls = torch.floor(torch.log2(scale / self.finest_scale + 1e-6))
target_lvls = target_lvls.clamp(min=0, max=num_levels - 1).long()
return target_lvls
def roi_rescale(self, rois, scale_factor):
cx = (rois[:, 1] + rois[:, 3]) * 0.5
cy = (rois[:, 2] + rois[:, 4]) * 0.5
w = rois[:, 3] - rois[:, 1] + 1
h = rois[:, 4] - rois[:, 2] + 1
new_w = w * scale_factor
new_h = h * scale_factor
x1 = cx - new_w * 0.5 + 0.5
x2 = cx + new_w * 0.5 - 0.5
y1 = cy - new_h * 0.5 + 0.5
y2 = cy + new_h * 0.5 - 0.5
new_rois = torch.stack((rois[:, 0], x1, y1, x2, y2), dim=-1)
return new_rois
@force_fp32(apply_to=('feats', ), out_fp16=True)
def forward(self, feats, rois, roi_scale_factor=None):
if len(feats) == 1:
return self.roi_layers[0](feats[0], rois)
out_size = self.roi_layers[0].out_size
num_levels = len(feats)
target_lvls = self.map_roi_levels(rois, num_levels)
roi_feats = feats[0].new_zeros(
rois.size(0), self.out_channels, *out_size)
if roi_scale_factor is not None:
rois = self.roi_rescale(rois, roi_scale_factor)
for i in range(num_levels):
inds = target_lvls == i
if inds.any():
rois_ = rois[inds, :]
roi_feats_t = self.roi_layers[i](feats[i], rois_)
roi_feats[inds] = roi_feats_t
return roi_feats
|
Cream/CDARTS/CDARTS_detection/mmdet/models/roi_extractors/single_level.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_detection/mmdet/models/roi_extractors/single_level.py",
"repo_id": "Cream",
"token_count": 1852
}
| 282 |
import math
import torch
import torch.nn as nn
from torch.nn.modules.utils import _pair
from ..functions.deform_conv import deform_conv, modulated_deform_conv
class DeformConv(nn.Module):
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
groups=1,
deformable_groups=1,
bias=False):
super(DeformConv, self).__init__()
assert not bias
assert in_channels % groups == 0, \
'in_channels {} cannot be divisible by groups {}'.format(
in_channels, groups)
assert out_channels % groups == 0, \
'out_channels {} cannot be divisible by groups {}'.format(
out_channels, groups)
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = _pair(kernel_size)
self.stride = _pair(stride)
self.padding = _pair(padding)
self.dilation = _pair(dilation)
self.groups = groups
self.deformable_groups = deformable_groups
self.weight = nn.Parameter(
torch.Tensor(out_channels, in_channels // self.groups,
*self.kernel_size))
self.reset_parameters()
def reset_parameters(self):
n = self.in_channels
for k in self.kernel_size:
n *= k
stdv = 1. / math.sqrt(n)
self.weight.data.uniform_(-stdv, stdv)
def forward(self, x, offset):
return deform_conv(x, offset, self.weight, self.stride, self.padding,
self.dilation, self.groups, self.deformable_groups)
class DeformConvPack(DeformConv):
def __init__(self, *args, **kwargs):
super(DeformConvPack, self).__init__(*args, **kwargs)
self.conv_offset = nn.Conv2d(
self.in_channels,
self.deformable_groups * 2 * self.kernel_size[0] *
self.kernel_size[1],
kernel_size=self.kernel_size,
stride=_pair(self.stride),
padding=_pair(self.padding),
bias=True)
self.init_offset()
def init_offset(self):
self.conv_offset.weight.data.zero_()
self.conv_offset.bias.data.zero_()
def forward(self, x):
offset = self.conv_offset(x)
return deform_conv(x, offset, self.weight, self.stride, self.padding,
self.dilation, self.groups, self.deformable_groups)
class ModulatedDeformConv(nn.Module):
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
groups=1,
deformable_groups=1,
bias=True):
super(ModulatedDeformConv, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = _pair(kernel_size)
self.stride = stride
self.padding = padding
self.dilation = dilation
self.groups = groups
self.deformable_groups = deformable_groups
self.with_bias = bias
self.weight = nn.Parameter(
torch.Tensor(out_channels, in_channels // groups,
*self.kernel_size))
if bias:
self.bias = nn.Parameter(torch.Tensor(out_channels))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
n = self.in_channels
for k in self.kernel_size:
n *= k
stdv = 1. / math.sqrt(n)
self.weight.data.uniform_(-stdv, stdv)
if self.bias is not None:
self.bias.data.zero_()
def forward(self, x, offset, mask):
return modulated_deform_conv(x, offset, mask, self.weight, self.bias,
self.stride, self.padding, self.dilation,
self.groups, self.deformable_groups)
class ModulatedDeformConvPack(ModulatedDeformConv):
def __init__(self, *args, **kwargs):
super(ModulatedDeformConvPack, self).__init__(*args, **kwargs)
self.conv_offset_mask = nn.Conv2d(
self.in_channels,
self.deformable_groups * 3 * self.kernel_size[0] *
self.kernel_size[1],
kernel_size=self.kernel_size,
stride=_pair(self.stride),
padding=_pair(self.padding),
bias=True)
self.init_offset()
def init_offset(self):
self.conv_offset_mask.weight.data.zero_()
self.conv_offset_mask.bias.data.zero_()
def forward(self, x):
out = self.conv_offset_mask(x)
o1, o2, mask = torch.chunk(out, 3, dim=1)
offset = torch.cat((o1, o2), dim=1)
mask = torch.sigmoid(mask)
return modulated_deform_conv(x, offset, mask, self.weight, self.bias,
self.stride, self.padding, self.dilation,
self.groups, self.deformable_groups)
|
Cream/CDARTS/CDARTS_detection/mmdet/ops/dcn/modules/deform_conv.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_detection/mmdet/ops/dcn/modules/deform_conv.py",
"repo_id": "Cream",
"token_count": 2682
}
| 283 |
#include <ATen/ATen.h>
#include <THC/THCAtomics.cuh>
#define CUDA_1D_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; \
i += blockDim.x * gridDim.x)
#define THREADS_PER_BLOCK 1024
inline int GET_BLOCKS(const int N) {
int optimal_block_num = (N + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
int max_block_num = 65000;
return min(optimal_block_num, max_block_num);
}
template <typename scalar_t>
__global__ void MaskedIm2colForward(const int n, const scalar_t *data_im,
const int height, const int width,
const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const long *mask_h_idx,
const long *mask_w_idx, const int mask_cnt,
scalar_t *data_col) {
// mask_cnt * channels
CUDA_1D_KERNEL_LOOP(index, n) {
const int m_index = index % mask_cnt;
const int h_col = mask_h_idx[m_index];
const int w_col = mask_w_idx[m_index];
const int c_im = index / mask_cnt;
const int c_col = c_im * kernel_h * kernel_w;
const int h_offset = h_col - pad_h;
const int w_offset = w_col - pad_w;
scalar_t *data_col_ptr = data_col + c_col * mask_cnt + m_index;
for (int i = 0; i < kernel_h; ++i) {
int h_im = h_offset + i;
for (int j = 0; j < kernel_w; ++j) {
int w_im = w_offset + j;
if (h_im >= 0 && w_im >= 0 && h_im < height && w_im < width) {
*data_col_ptr =
(scalar_t)data_im[(c_im * height + h_im) * width + w_im];
} else {
*data_col_ptr = 0.0;
}
data_col_ptr += mask_cnt;
}
}
}
}
int MaskedIm2colForwardLaucher(const at::Tensor bottom_data, const int height,
const int width, const int channels,
const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const at::Tensor mask_h_idx,
const at::Tensor mask_w_idx, const int mask_cnt,
at::Tensor top_data) {
const int output_size = mask_cnt * channels;
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
bottom_data.type(), "MaskedIm2colLaucherForward", ([&] {
const scalar_t *bottom_data_ = bottom_data.data<scalar_t>();
const long *mask_h_idx_ = mask_h_idx.data<long>();
const long *mask_w_idx_ = mask_w_idx.data<long>();
scalar_t *top_data_ = top_data.data<scalar_t>();
MaskedIm2colForward<scalar_t>
<<<GET_BLOCKS(output_size), THREADS_PER_BLOCK>>>(
output_size, bottom_data_, height, width, kernel_h, kernel_w,
pad_h, pad_w, mask_h_idx_, mask_w_idx_, mask_cnt, top_data_);
}));
THCudaCheck(cudaGetLastError());
return 1;
}
template <typename scalar_t>
__global__ void MaskedCol2imForward(const int n, const scalar_t *data_col,
const int height, const int width,
const int channels, const long *mask_h_idx,
const long *mask_w_idx, const int mask_cnt,
scalar_t *data_im) {
CUDA_1D_KERNEL_LOOP(index, n) {
const int m_index = index % mask_cnt;
const int h_im = mask_h_idx[m_index];
const int w_im = mask_w_idx[m_index];
const int c_im = index / mask_cnt;
// int kernel_extent_w = (kernel_w - 1) + 1;
// int kernel_extent_h = (kernel_h - 1) + 1;
// compute the start and end of the output
data_im[(c_im * height + h_im) * width + w_im] = data_col[index];
}
}
int MaskedCol2imForwardLaucher(const at::Tensor bottom_data, const int height,
const int width, const int channels,
const at::Tensor mask_h_idx,
const at::Tensor mask_w_idx, const int mask_cnt,
at::Tensor top_data) {
const int output_size = mask_cnt * channels;
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
bottom_data.type(), "MaskedCol2imLaucherForward", ([&] {
const scalar_t *bottom_data_ = bottom_data.data<scalar_t>();
const long *mask_h_idx_ = mask_h_idx.data<long>();
const long *mask_w_idx_ = mask_w_idx.data<long>();
scalar_t *top_data_ = top_data.data<scalar_t>();
MaskedCol2imForward<scalar_t>
<<<GET_BLOCKS(output_size), THREADS_PER_BLOCK>>>(
output_size, bottom_data_, height, width, channels, mask_h_idx_,
mask_w_idx_, mask_cnt, top_data_);
}));
THCudaCheck(cudaGetLastError());
return 1;
}
|
Cream/CDARTS/CDARTS_detection/mmdet/ops/masked_conv/src/masked_conv2d_kernel.cu/0
|
{
"file_path": "Cream/CDARTS/CDARTS_detection/mmdet/ops/masked_conv/src/masked_conv2d_kernel.cu",
"repo_id": "Cream",
"token_count": 2595
}
| 284 |
from setuptools import setup
from torch.utils.cpp_extension import BuildExtension, CUDAExtension
setup(
name='roi_align_cuda',
ext_modules=[
CUDAExtension('roi_align_cuda', [
'src/roi_align_cuda.cpp',
'src/roi_align_kernel.cu',
]),
],
cmdclass={'build_ext': BuildExtension})
|
Cream/CDARTS/CDARTS_detection/mmdet/ops/roi_align/setup.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_detection/mmdet/ops/roi_align/setup.py",
"repo_id": "Cream",
"token_count": 154
}
| 285 |
from torch import nn
from ..functions.sigmoid_focal_loss import sigmoid_focal_loss
# TODO: remove this module
class SigmoidFocalLoss(nn.Module):
def __init__(self, gamma, alpha):
super(SigmoidFocalLoss, self).__init__()
self.gamma = gamma
self.alpha = alpha
def forward(self, logits, targets):
assert logits.is_cuda
loss = sigmoid_focal_loss(logits, targets, self.gamma, self.alpha)
return loss.sum()
def __repr__(self):
tmpstr = self.__class__.__name__ + "("
tmpstr += "gamma=" + str(self.gamma)
tmpstr += ", alpha=" + str(self.alpha)
tmpstr += ")"
return tmpstr
|
Cream/CDARTS/CDARTS_detection/mmdet/ops/sigmoid_focal_loss/modules/sigmoid_focal_loss.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_detection/mmdet/ops/sigmoid_focal_loss/modules/sigmoid_focal_loss.py",
"repo_id": "Cream",
"token_count": 298
}
| 286 |
from argparse import ArgumentParser
from mmdet.core import coco_eval
def main():
parser = ArgumentParser(description='COCO Evaluation')
parser.add_argument('result', help='result file path')
parser.add_argument('--ann', help='annotation file path')
parser.add_argument(
'--types',
type=str,
nargs='+',
choices=['proposal_fast', 'proposal', 'bbox', 'segm', 'keypoint'],
default=['bbox'],
help='result types')
parser.add_argument(
'--max-dets',
type=int,
nargs='+',
default=[100, 300, 1000],
help='proposal numbers, only used for recall evaluation')
args = parser.parse_args()
coco_eval(args.result, args.types, args.ann, args.max_dets)
if __name__ == '__main__':
main()
|
Cream/CDARTS/CDARTS_detection/tools/coco_eval.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_detection/tools/coco_eval.py",
"repo_id": "Cream",
"token_count": 330
}
| 287 |
# ------------------------------------------------------------------------------
# Generates targets for Panoptic-DeepLab.
# Written by Bowen Cheng ([email protected])
# ------------------------------------------------------------------------------
import numpy as np
import torch
class PanopticTargetGenerator(object):
"""
Generates panoptic training target for Panoptic-DeepLab.
Annotation is assumed to have Cityscapes format.
Arguments:
ignore_label: Integer, the ignore label for semantic segmentation.
rgb2id: Function, panoptic label is encoded in a colored image, this function convert color to the
corresponding panoptic label.
thing_list: List, a list of thing classes
sigma: the sigma for Gaussian kernel.
ignore_stuff_in_offset: Boolean, whether to ignore stuff region when training the offset branch.
small_instance_area: Integer, indicates largest area for small instances.
small_instance_weight: Integer, indicates semantic loss weights for small instances.
ignore_crowd_in_semantic: Boolean, whether to ignore crowd region in semantic segmentation branch,
crowd region is ignored in the original TensorFlow implementation.
"""
def __init__(self, ignore_label, rgb2id, thing_list, sigma=8, ignore_stuff_in_offset=False,
small_instance_area=0, small_instance_weight=1, ignore_crowd_in_semantic=False):
self.ignore_label = ignore_label
self.rgb2id = rgb2id
self.thing_list = thing_list
self.ignore_stuff_in_offset = ignore_stuff_in_offset
self.small_instance_area = small_instance_area
self.small_instance_weight = small_instance_weight
self.ignore_crowd_in_semantic = ignore_crowd_in_semantic
self.sigma = sigma
size = 6 * sigma + 3
x = np.arange(0, size, 1, float)
y = x[:, np.newaxis]
x0, y0 = 3 * sigma + 1, 3 * sigma + 1
self.g = np.exp(- ((x - x0) ** 2 + (y - y0) ** 2) / (2 * sigma ** 2))
def __call__(self, panoptic, segments):
"""Generates the training target.
reference: https://github.com/mcordts/cityscapesScripts/blob/master/cityscapesscripts/preparation/createPanopticImgs.py
reference: https://github.com/facebookresearch/detectron2/blob/master/datasets/prepare_panoptic_fpn.py#L18
Args:
panoptic: numpy.array, colored image encoding panoptic label.
segments: List, a list of dictionary containing information of every segment, it has fields:
- id: panoptic id, after decoding `panoptic`.
- category_id: semantic class id.
- area: segment area.
- bbox: segment bounding box.
- iscrowd: crowd region.
Returns:
A dictionary with fields:
- semantic: Tensor, semantic label, shape=(H, W).
- foreground: Tensor, foreground mask label, shape=(H, W).
- center: Tensor, center heatmap, shape=(1, H, W).
- center_points: List, center coordinates, with tuple (y-coord, x-coord).
- offset: Tensor, offset, shape=(2, H, W), first dim is (offset_y, offset_x).
- semantic_weights: Tensor, loss weight for semantic prediction, shape=(H, W).
- center_weights: Tensor, ignore region of center prediction, shape=(H, W), used as weights for center
regression 0 is ignore, 1 is has instance. Multiply this mask to loss.
- offset_weights: Tensor, ignore region of offset prediction, shape=(H, W), used as weights for offset
regression 0 is ignore, 1 is has instance. Multiply this mask to loss.
"""
panoptic = self.rgb2id(panoptic)
height, width = panoptic.shape[0], panoptic.shape[1]
semantic = np.zeros_like(panoptic, dtype=np.uint8) + self.ignore_label
foreground = np.zeros_like(panoptic, dtype=np.uint8)
center = np.zeros((1, height, width), dtype=np.float32)
center_pts = []
offset = np.zeros((2, height, width), dtype=np.float32)
y_coord = np.ones_like(panoptic, dtype=np.float32)
x_coord = np.ones_like(panoptic, dtype=np.float32)
y_coord = np.cumsum(y_coord, axis=0) - 1
x_coord = np.cumsum(x_coord, axis=1) - 1
# Generate pixel-wise loss weights
semantic_weights = np.ones_like(panoptic, dtype=np.uint8)
# 0: ignore, 1: has instance
# three conditions for a region to be ignored for instance branches:
# (1) It is labeled as `ignore_label`
# (2) It is crowd region (iscrowd=1)
# (3) (Optional) It is stuff region (for offset branch)
center_weights = np.zeros_like(panoptic, dtype=np.uint8)
offset_weights = np.zeros_like(panoptic, dtype=np.uint8)
for seg in segments:
cat_id = seg["category_id"]
if self.ignore_crowd_in_semantic:
if not seg['iscrowd']:
semantic[panoptic == seg["id"]] = cat_id
else:
semantic[panoptic == seg["id"]] = cat_id
if cat_id in self.thing_list:
foreground[panoptic == seg["id"]] = 1
if not seg['iscrowd']:
# Ignored regions are not in `segments`.
# Handle crowd region.
center_weights[panoptic == seg["id"]] = 1
if self.ignore_stuff_in_offset:
# Handle stuff region.
if cat_id in self.thing_list:
offset_weights[panoptic == seg["id"]] = 1
else:
offset_weights[panoptic == seg["id"]] = 1
if cat_id in self.thing_list:
# find instance center
mask_index = np.where(panoptic == seg["id"])
if len(mask_index[0]) == 0:
# the instance is completely cropped
continue
# Find instance area
ins_area = len(mask_index[0])
if ins_area < self.small_instance_area:
semantic_weights[panoptic == seg["id"]] = self.small_instance_weight
center_y, center_x = np.mean(mask_index[0]), np.mean(mask_index[1])
center_pts.append([center_y, center_x])
# generate center heatmap
y, x = int(center_y), int(center_x)
# outside image boundary
if x < 0 or y < 0 or \
x >= width or y >= height:
continue
sigma = self.sigma
# upper left
ul = int(np.round(x - 3 * sigma - 1)), int(np.round(y - 3 * sigma - 1))
# bottom right
br = int(np.round(x + 3 * sigma + 2)), int(np.round(y + 3 * sigma + 2))
c, d = max(0, -ul[0]), min(br[0], width) - ul[0]
a, b = max(0, -ul[1]), min(br[1], height) - ul[1]
cc, dd = max(0, ul[0]), min(br[0], width)
aa, bb = max(0, ul[1]), min(br[1], height)
center[0, aa:bb, cc:dd] = np.maximum(
center[0, aa:bb, cc:dd], self.g[a:b, c:d])
# generate offset (2, h, w) -> (y-dir, x-dir)
offset_y_index = (np.zeros_like(mask_index[0]), mask_index[0], mask_index[1])
offset_x_index = (np.ones_like(mask_index[0]), mask_index[0], mask_index[1])
offset[offset_y_index] = center_y - y_coord[mask_index]
offset[offset_x_index] = center_x - x_coord[mask_index]
return dict(
semantic=torch.as_tensor(semantic.astype('long')),
foreground=torch.as_tensor(foreground.astype('long')),
center=torch.as_tensor(center.astype(np.float32)),
center_points=center_pts,
offset=torch.as_tensor(offset.astype(np.float32)),
semantic_weights=torch.as_tensor(semantic_weights.astype(np.float32)),
center_weights=torch.as_tensor(center_weights.astype(np.float32)),
offset_weights=torch.as_tensor(offset_weights.astype(np.float32))
)
class SemanticTargetGenerator(object):
"""
Generates semantic training target only for Panoptic-DeepLab (no instance).
Annotation is assumed to have Cityscapes format.
Arguments:
ignore_label: Integer, the ignore label for semantic segmentation.
rgb2id: Function, panoptic label is encoded in a colored image, this function convert color to the
corresponding panoptic label.
thing_list: List, a list of thing classes
sigma: the sigma for Gaussian kernel.
"""
def __init__(self, ignore_label, rgb2id):
self.ignore_label = ignore_label
self.rgb2id = rgb2id
def __call__(self, panoptic, segments):
"""Generates the training target.
reference: https://github.com/mcordts/cityscapesScripts/blob/master/cityscapesscripts/preparation/createPanopticImgs.py
reference: https://github.com/facebookresearch/detectron2/blob/master/datasets/prepare_panoptic_fpn.py#L18
Args:
panoptic: numpy.array, colored image encoding panoptic label.
segments: List, a list of dictionary containing information of every segment, it has fields:
- id: panoptic id, after decoding `panoptic`.
- category_id: semantic class id.
- area: segment area.
- bbox: segment bounding box.
- iscrowd: crowd region.
Returns:
A dictionary with fields:
- semantic: Tensor, semantic label, shape=(H, W).
"""
panoptic = self.rgb2id(panoptic)
semantic = np.zeros_like(panoptic, dtype=np.uint8) + self.ignore_label
for seg in segments:
cat_id = seg["category_id"]
semantic[panoptic == seg["id"]] = cat_id
return dict(
semantic=torch.as_tensor(semantic.astype('long'))
)
|
Cream/CDARTS/CDARTS_segmentation/dataloaders/transforms/target_transforms.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_segmentation/dataloaders/transforms/target_transforms.py",
"repo_id": "Cream",
"token_count": 4642
}
| 288 |
# ------------------------------------------------------------------------------
# Reference: https://github.com/pytorch/vision/blob/master/torchvision/models/resnet.py
# Modified by Bowen Cheng ([email protected])
# ------------------------------------------------------------------------------
import torch.nn as nn
from torchvision.models.utils import load_state_dict_from_url
__all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101',
'resnet152', 'resnext50_32x4d', 'resnext101_32x8d',
'wide_resnet50_2', 'wide_resnet101_2']
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
'resnext50_32x4d': 'https://download.pytorch.org/models/resnext50_32x4d-7cdf4587.pth',
'resnext101_32x8d': 'https://download.pytorch.org/models/resnext101_32x8d-8ba56ff5.pth',
'wide_resnet50_2': 'https://download.pytorch.org/models/wide_resnet50_2-95faca4d.pth',
'wide_resnet101_2': 'https://download.pytorch.org/models/wide_resnet101_2-32ee1156.pth',
}
def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=dilation, groups=groups, bias=False, dilation=dilation)
def conv1x1(in_planes, out_planes, stride=1):
"""1x1 convolution"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1, norm_layer=None):
super(BasicBlock, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
if groups != 1 or base_width != 64:
raise ValueError('BasicBlock only supports groups=1 and base_width=64')
if dilation > 1:
raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
# Both self.conv1 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = norm_layer(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = norm_layer(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class Bottleneck(nn.Module):
# Bottleneck in torchvision places the stride for downsampling at 3x3 convolution(self.conv2)
# while original implementation places the stride at the first 1x1 convolution(self.conv1)
# according to "Deep residual learning for image recognition"https://arxiv.org/abs/1512.03385.
# This variant is also known as ResNet V1.5 and improves accuracy according to
# https://ngc.nvidia.com/catalog/model-scripts/nvidia:resnet_50_v1_5_for_pytorch.
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1, norm_layer=None):
super(Bottleneck, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
width = int(planes * (base_width / 64.)) * groups
# Both self.conv2 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv1x1(inplanes, width)
self.bn1 = norm_layer(width)
self.conv2 = conv3x3(width, width, stride, groups, dilation)
self.bn2 = norm_layer(width)
self.conv3 = conv1x1(width, planes * self.expansion)
self.bn3 = norm_layer(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers, zero_init_residual=False,
groups=1, width_per_group=64, replace_stride_with_dilation=None,
norm_layer=None):
super(ResNet, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
self._norm_layer = norm_layer
self.inplanes = 64
self.dilation = 1
if replace_stride_with_dilation is None:
# each element in the tuple indicates if we should replace
# the 2x2 stride with a dilated convolution instead
replace_stride_with_dilation = [False, False, False]
if len(replace_stride_with_dilation) != 3:
raise ValueError("replace_stride_with_dilation should be None "
"or a 3-element tuple, got {}".format(replace_stride_with_dilation))
self.groups = groups
self.base_width = width_per_group
self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = norm_layer(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2,
dilate=replace_stride_with_dilation[0])
self.layer3 = self._make_layer(block, 256, layers[2], stride=2,
dilate=replace_stride_with_dilation[1])
self.layer4 = self._make_layer(block, 512, layers[3], stride=2,
dilate=replace_stride_with_dilation[2])
# self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
# self.fc = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# Zero-initialize the last BN in each residual branch,
# so that the residual branch starts with zeros, and each residual block behaves like an identity.
# This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0)
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0)
def _make_layer(self, block, planes, blocks, stride=1, dilate=False):
norm_layer = self._norm_layer
downsample = None
previous_dilation = self.dilation
if dilate:
self.dilation *= stride
stride = 1
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
norm_layer(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, self.groups,
self.base_width, previous_dilation, norm_layer))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes, groups=self.groups,
base_width=self.base_width, dilation=self.dilation,
norm_layer=norm_layer))
return nn.Sequential(*layers)
def _forward_impl(self, x):
outputs = {}
# See note [TorchScript super()]
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
outputs['stem'] = x
x = self.layer1(x) # 1/4
outputs['res2'] = x
x = self.layer2(x) # 1/8
outputs['res3'] = x
x = self.layer3(x) # 1/16
outputs['res4'] = x
x = self.layer4(x) # 1/32
outputs['res5'] = x
return outputs
def forward(self, x):
return self._forward_impl(x)
def _resnet(arch, block, layers, pretrained, progress, **kwargs):
model = ResNet(block, layers, **kwargs)
if pretrained:
state_dict = load_state_dict_from_url(model_urls[arch],
progress=progress)
model.load_state_dict(state_dict, strict=False)
return model
def resnet18(pretrained=False, progress=True, **kwargs):
r"""ResNet-18 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet18', BasicBlock, [2, 2, 2, 2], pretrained, progress,
**kwargs)
def resnet34(pretrained=False, progress=True, **kwargs):
r"""ResNet-34 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet34', BasicBlock, [3, 4, 6, 3], pretrained, progress,
**kwargs)
def resnet50(pretrained=False, progress=True, **kwargs):
r"""ResNet-50 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet50', Bottleneck, [3, 4, 6, 3], pretrained, progress,
**kwargs)
def resnet101(pretrained=False, progress=True, **kwargs):
r"""ResNet-101 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet101', Bottleneck, [3, 4, 23, 3], pretrained, progress,
**kwargs)
def resnet152(pretrained=False, progress=True, **kwargs):
r"""ResNet-152 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet152', Bottleneck, [3, 8, 36, 3], pretrained, progress,
**kwargs)
def resnext50_32x4d(pretrained=False, progress=True, **kwargs):
r"""ResNeXt-50 32x4d model from
`"Aggregated Residual Transformation for Deep Neural Networks" <https://arxiv.org/pdf/1611.05431.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs['groups'] = 32
kwargs['width_per_group'] = 4
return _resnet('resnext50_32x4d', Bottleneck, [3, 4, 6, 3],
pretrained, progress, **kwargs)
def resnext101_32x8d(pretrained=False, progress=True, **kwargs):
r"""ResNeXt-101 32x8d model from
`"Aggregated Residual Transformation for Deep Neural Networks" <https://arxiv.org/pdf/1611.05431.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs['groups'] = 32
kwargs['width_per_group'] = 8
return _resnet('resnext101_32x8d', Bottleneck, [3, 4, 23, 3],
pretrained, progress, **kwargs)
def wide_resnet50_2(pretrained=False, progress=True, **kwargs):
r"""Wide ResNet-50-2 model from
`"Wide Residual Networks" <https://arxiv.org/pdf/1605.07146.pdf>`_
The model is the same as ResNet except for the bottleneck number of channels
which is twice larger in every block. The number of channels in outer 1x1
convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048
channels, and in Wide ResNet-50-2 has 2048-1024-2048.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs['width_per_group'] = 64 * 2
return _resnet('wide_resnet50_2', Bottleneck, [3, 4, 6, 3],
pretrained, progress, **kwargs)
def wide_resnet101_2(pretrained=False, progress=True, **kwargs):
r"""Wide ResNet-101-2 model from
`"Wide Residual Networks" <https://arxiv.org/pdf/1605.07146.pdf>`_
The model is the same as ResNet except for the bottleneck number of channels
which is twice larger in every block. The number of channels in outer 1x1
convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048
channels, and in Wide ResNet-50-2 has 2048-1024-2048.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs['width_per_group'] = 64 * 2
return _resnet('wide_resnet101_2', Bottleneck, [3, 4, 23, 3],
pretrained, progress, **kwargs)
|
Cream/CDARTS/CDARTS_segmentation/segmentation/model/backbone/resnet.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_segmentation/segmentation/model/backbone/resnet.py",
"repo_id": "Cream",
"token_count": 6389
}
| 289 |
from .semantic_post_processing import get_semantic_segmentation
from .instance_post_processing import get_panoptic_segmentation
from .evaluation_format import get_cityscapes_instance_format
|
Cream/CDARTS/CDARTS_segmentation/segmentation/model/post_processing/__init__.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_segmentation/segmentation/model/post_processing/__init__.py",
"repo_id": "Cream",
"token_count": 53
}
| 290 |
# ------------------------------------------------------------------------------
# Utility functions.
# Written by Bowen Cheng ([email protected])
# ------------------------------------------------------------------------------
import torch
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count if self.count != 0 else 0
def get_loss_info_str(loss_meter_dict):
msg = ''
for key in loss_meter_dict.keys():
msg += '{name}: {meter.val:.3e} ({meter.avg:.3e})\t'.format(
name=key, meter=loss_meter_dict[key]
)
return msg
def to_cuda(batch, device):
if type(batch) == torch.Tensor:
batch = batch.to(device)
elif type(batch) == dict:
for key in batch.keys():
batch[key] = to_cuda(batch[key], device)
elif type(batch) == list:
for i in range(len(batch)):
batch[i] = to_cuda(batch[i], device)
return batch
def get_module(model, distributed):
if distributed:
return model.module
else:
return model
|
Cream/CDARTS/CDARTS_segmentation/segmentation/utils/utils.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_segmentation/segmentation/utils/utils.py",
"repo_id": "Cream",
"token_count": 554
}
| 291 |
import os
import cv2
import numpy as np
import time
from tqdm import tqdm
import torch
import torch.multiprocessing as mp
from engine.logger import get_logger
from utils.pyt_utils import load_model, link_file, ensure_dir
from utils.img_utils import pad_image_to_shape, normalize
logger = get_logger()
class Evaluator(object):
def __init__(self, dataset, class_num, image_mean, image_std, network,
multi_scales, is_flip, devices=0, out_idx=0, threds=5, config=None, logger=None,
verbose=False, save_path=None, show_image=False, show_prediction=False):
self.dataset = dataset
self.ndata = self.dataset.get_length()
self.class_num = class_num
self.image_mean = image_mean
self.image_std = image_std
self.multi_scales = multi_scales
self.is_flip = is_flip
self.network = network
self.devices = devices
if type(self.devices) == int: self.devices = [self.devices]
self.out_idx = out_idx
self.threds = threds
self.config = config
self.logger = logger
self.context = mp.get_context('spawn')
self.val_func = None
self.results_queue = self.context.Queue(self.ndata)
self.verbose = verbose
self.save_path = save_path
if save_path is not None:
ensure_dir(save_path)
self.show_image = show_image
self.show_prediction = show_prediction
def run(self, model_path, model_indice, log_file, log_file_link):
"""There are four evaluation modes:
1.only eval a .pth model: -e *.pth
2.only eval a certain epoch: -e epoch
3.eval all epochs in a given section: -e start_epoch-end_epoch
4.eval all epochs from a certain started epoch: -e start_epoch-
"""
if '.pth' in model_indice:
models = [model_indice, ]
elif "-" in model_indice:
start_epoch = int(model_indice.split("-")[0])
end_epoch = model_indice.split("-")[1]
models = os.listdir(model_path)
models.remove("epoch-last.pth")
sorted_models = [None] * len(models)
model_idx = [0] * len(models)
for idx, m in enumerate(models):
num = m.split(".")[0].split("-")[1]
model_idx[idx] = num
sorted_models[idx] = m
model_idx = np.array([int(i) for i in model_idx])
down_bound = model_idx >= start_epoch
up_bound = [True] * len(sorted_models)
if end_epoch:
end_epoch = int(end_epoch)
assert start_epoch < end_epoch
up_bound = model_idx <= end_epoch
bound = up_bound * down_bound
model_slice = np.array(sorted_models)[bound]
models = [os.path.join(model_path, model) for model in
model_slice]
else:
models = [os.path.join(model_path,
'epoch-%s.pth' % model_indice), ]
results = open(log_file, 'a')
link_file(log_file, log_file_link)
for model in models:
logger.info("Load Model: %s" % model)
self.val_func = load_model(self.network, model)
result_line, mIoU = self.multi_process_evaluation()
results.write('Model: ' + model + '\n')
results.write(result_line)
results.write('\n')
results.flush()
results.close()
def run_online(self):
"""
eval during training
"""
self.val_func = self.network
result_line, mIoU = self.single_process_evaluation()
return result_line, mIoU
def single_process_evaluation(self):
all_results = []
from pdb import set_trace as bp
with torch.no_grad():
for idx in tqdm(range(self.ndata)):
dd = self.dataset[idx]
results_dict = self.func_per_iteration(dd, self.devices[0], iter=idx)
all_results.append(results_dict)
_, _mIoU = self.compute_metric([results_dict])
result_line, mIoU = self.compute_metric(all_results)
return result_line, mIoU
def run_online_multiprocess(self):
"""
eval during training
"""
self.val_func = self.network
result_line, mIoU = self.multi_process_single_gpu_evaluation()
return result_line, mIoU
def multi_process_single_gpu_evaluation(self):
# start_eval_time = time.perf_counter()
stride = int(np.ceil(self.ndata / self.threds))
# start multi-process on single-gpu
procs = []
for d in range(self.threds):
e_record = min((d + 1) * stride, self.ndata)
shred_list = list(range(d * stride, e_record))
device = self.devices[0]
logger.info('Thread %d handle %d data.' % (d, len(shred_list)))
p = self.context.Process(target=self.worker, args=(shred_list, device))
procs.append(p)
for p in procs:
p.start()
all_results = []
for _ in tqdm(range(self.ndata)):
t = self.results_queue.get()
all_results.append(t)
if self.verbose:
self.compute_metric(all_results)
for p in procs:
p.join()
result_line, mIoU = self.compute_metric(all_results)
# logger.info('Evaluation Elapsed Time: %.2fs' % (time.perf_counter() - start_eval_time))
return result_line, mIoU
def multi_process_evaluation(self):
start_eval_time = time.perf_counter()
nr_devices = len(self.devices)
stride = int(np.ceil(self.ndata / nr_devices))
# start multi-process on multi-gpu
procs = []
for d in range(nr_devices):
e_record = min((d + 1) * stride, self.ndata)
shred_list = list(range(d * stride, e_record))
device = self.devices[d]
logger.info('GPU %s handle %d data.' % (device, len(shred_list)))
p = self.context.Process(target=self.worker, args=(shred_list, device))
procs.append(p)
for p in procs:
p.start()
all_results = []
for _ in tqdm(range(self.ndata)):
t = self.results_queue.get()
all_results.append(t)
if self.verbose:
self.compute_metric(all_results)
for p in procs:
p.join()
result_line, mIoU = self.compute_metric(all_results)
logger.info('Evaluation Elapsed Time: %.2fs' % (time.perf_counter() - start_eval_time))
return result_line, mIoU
def worker(self, shred_list, device):
# start_load_time = time.time()
# logger.info('Load Model on Device %d: %.2fs' % (device, time.time() - start_load_time))
for idx in shred_list:
dd = self.dataset[idx]
results_dict = self.func_per_iteration(dd, device, iter=idx)
self.results_queue.put(results_dict)
def func_per_iteration(self, data, device, iter=None):
raise NotImplementedError
def compute_metric(self, results):
raise NotImplementedError
# evaluate the whole image at once
def whole_eval(self, img, output_size, input_size=None, device=None):
if input_size is not None:
img, margin = self.process_image(img, input_size)
else:
img = self.process_image(img, input_size)
pred = self.val_func_process(img, device)
if input_size is not None:
pred = pred[:, margin[0]:(pred.shape[1] - margin[1]),
margin[2]:(pred.shape[2] - margin[3])]
pred = pred.permute(1, 2, 0)
pred = pred.cpu().numpy()
if output_size is not None:
pred = cv2.resize(pred,
(output_size[1], output_size[0]),
interpolation=cv2.INTER_LINEAR)
pred = pred.argmax(2)
return pred
# slide the window to evaluate the image
def sliding_eval(self, img, crop_size, stride_rate, device=None):
ori_rows, ori_cols, c = img.shape
processed_pred = np.zeros((ori_rows, ori_cols, self.class_num))
for s in self.multi_scales:
img_scale = cv2.resize(img, None, fx=s, fy=s,
interpolation=cv2.INTER_LINEAR)
new_rows, new_cols, _ = img_scale.shape
processed_pred += self.scale_process(img_scale,
(ori_rows, ori_cols),
crop_size, stride_rate, device)
pred = processed_pred.argmax(2)
return pred
def scale_process(self, img, ori_shape, crop_size, stride_rate,
device=None):
new_rows, new_cols, c = img.shape
long_size = new_cols if new_cols > new_rows else new_rows
if long_size <= crop_size:
input_data, margin = self.process_image(img, crop_size)
score = self.val_func_process(input_data, device)
score = score[:, margin[0]:(score.shape[1] - margin[1]),
margin[2]:(score.shape[2] - margin[3])]
else:
stride = int(np.ceil(crop_size * stride_rate))
img_pad, margin = pad_image_to_shape(img, crop_size,
cv2.BORDER_CONSTANT, value=0)
pad_rows = img_pad.shape[0]
pad_cols = img_pad.shape[1]
r_grid = int(np.ceil((pad_rows - crop_size) / stride)) + 1
c_grid = int(np.ceil((pad_cols - crop_size) / stride)) + 1
data_scale = torch.zeros(self.class_num, pad_rows, pad_cols).cuda(
device)
count_scale = torch.zeros(self.class_num, pad_rows, pad_cols).cuda(
device)
for grid_yidx in range(r_grid):
for grid_xidx in range(c_grid):
s_x = grid_xidx * stride
s_y = grid_yidx * stride
e_x = min(s_x + crop_size, pad_cols)
e_y = min(s_y + crop_size, pad_rows)
s_x = e_x - crop_size
s_y = e_y - crop_size
img_sub = img_pad[s_y:e_y, s_x: e_x, :]
count_scale[:, s_y: e_y, s_x: e_x] += 1
input_data, tmargin = self.process_image(img_sub, crop_size)
temp_score = self.val_func_process(input_data, device)
temp_score = temp_score[:,
tmargin[0]:(temp_score.shape[1] - tmargin[1]),
tmargin[2]:(temp_score.shape[2] - tmargin[3])]
data_scale[:, s_y: e_y, s_x: e_x] += temp_score
# score = data_scale / count_scale
score = data_scale
score = score[:, margin[0]:(score.shape[1] - margin[1]),
margin[2]:(score.shape[2] - margin[3])]
score = score.permute(1, 2, 0)
data_output = cv2.resize(score.cpu().numpy(),
(ori_shape[1], ori_shape[0]),
interpolation=cv2.INTER_LINEAR)
return data_output
def val_func_process(self, input_data, device=None):
input_data = np.ascontiguousarray(input_data[None, :, :, :], dtype=np.float32)
input_data = torch.FloatTensor(input_data).cuda(device)
with torch.cuda.device(input_data.get_device()):
self.val_func.eval()
self.val_func.to(input_data.get_device())
with torch.no_grad():
score = self.val_func(input_data)
if (isinstance(score, tuple) or isinstance(score, list)) and len(score) > 1:
score = score[self.out_idx]
score = score[0] # a single image pass, ignore batch dim
if self.is_flip:
input_data = input_data.flip(-1)
score_flip = self.val_func(input_data)
score_flip = score_flip[0]
score += score_flip.flip(-1)
score = torch.exp(score)
# score = score.data
return score
def process_image(self, img, crop_size=None):
p_img = img
if img.shape[2] < 3:
im_b = p_img
im_g = p_img
im_r = p_img
p_img = np.concatenate((im_b, im_g, im_r), axis=2)
p_img = normalize(p_img, self.image_mean, self.image_std)
if crop_size is not None:
p_img, margin = pad_image_to_shape(p_img, crop_size, cv2.BORDER_CONSTANT, value=0)
p_img = p_img.transpose(2, 0, 1)
return p_img, margin
p_img = p_img.transpose(2, 0, 1)
return p_img
|
Cream/CDARTS/CDARTS_segmentation/tools/engine/evaluator.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_segmentation/tools/engine/evaluator.py",
"repo_id": "Cream",
"token_count": 6706
}
| 292 |
import numpy as np
import cv2
import scipy.io as sio
def set_img_color(colors, background, img, gt, show255=False, weight_foreground=0.55):
origin = np.array(img)
for i in range(len(colors)):
if i != background:
img[np.where(gt == i)] = colors[i]
if show255:
img[np.where(gt == 255)] = 0
cv2.addWeighted(img, weight_foreground, origin, (1 - weight_foreground), 0, img)
return img
def show_prediction(colors, background, img, pred, weight_foreground=1):
im = np.array(img, np.uint8)
set_img_color(colors, background, im, pred, weight_foreground=weight_foreground)
final = np.array(im)
return final
def show_img(colors, background, img, clean, gt, *pds):
im1 = np.array(img, np.uint8)
# set_img_color(colors, background, im1, clean)
final = np.array(im1)
# the pivot black bar
pivot = np.zeros((im1.shape[0], 15, 3), dtype=np.uint8)
for pd in pds:
im = np.array(img, np.uint8)
# pd[np.where(gt == 255)] = 255
set_img_color(colors, background, im, pd)
final = np.column_stack((final, pivot))
final = np.column_stack((final, im))
im = np.array(img, np.uint8)
set_img_color(colors, background, im, gt, True)
final = np.column_stack((final, pivot))
final = np.column_stack((final, im))
return final
def get_colors(class_num):
colors = []
for i in range(class_num):
colors.append((np.random.random((1, 3)) * 255).tolist()[0])
return colors
def get_ade_colors():
colors = sio.loadmat('./color150.mat')['colors']
colors = colors[:, ::-1, ]
colors = np.array(colors).astype(int).tolist()
colors.insert(0, [0, 0, 0])
return colors
def print_iou(iu, mean_pixel_acc, class_names=None, show_no_back=False,
no_print=False):
n = iu.size
lines = []
for i in range(n):
if class_names is None:
cls = 'Class %d:' % (i + 1)
else:
cls = '%d %s' % (i + 1, class_names[i])
lines.append('%-8s\t%.3f%%' % (cls, iu[i] * 100))
mean_IU = np.nanmean(iu)
# mean_IU_no_back = np.nanmean(iu[1:])
mean_IU_no_back = np.nanmean(iu[:-1])
if show_no_back:
lines.append(
'---------------------------- %-8s\t%.3f%%\t%-8s\t%.3f%%\t%-8s\t%.3f%%' % (
'mean_IU', mean_IU * 100, 'mean_IU_no_back',
mean_IU_no_back * 100,
'mean_pixel_ACC', mean_pixel_acc * 100))
else:
print(mean_pixel_acc)
lines.append(
'---------------------------- %-8s\t%.3f%%\t%-8s\t%.3f%%' % (
'mean_IU', mean_IU * 100, 'mean_pixel_ACC',
mean_pixel_acc * 100))
line = "\n".join(lines)
if not no_print:
print(line)
return line
|
Cream/CDARTS/CDARTS_segmentation/tools/utils/visualize.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_segmentation/tools/utils/visualize.py",
"repo_id": "Cream",
"token_count": 1350
}
| 293 |
import torch
import torch.nn as nn
from torch.nn import functional as F
from builder import *
from operations import *
from operations import DropPath_
from genotypes import PRIMITIVES
from pdb import set_trace as bp
from seg_oprs import FeatureFusion, Head, Decoder
from layers import NaiveSyncBatchNorm
# BatchNorm2d = nn.BatchNorm2d
BatchNorm2d = NaiveSyncBatchNorm
IMAGENET_DEFAULT_MEAN = (0.485, 0.456, 0.406)
IMAGENET_DEFAULT_STD = (0.229, 0.224, 0.225)
IMAGENET_INCEPTION_MEAN = (0.5, 0.5, 0.5)
IMAGENET_INCEPTION_STD = (0.5, 0.5, 0.5)
def hard_sigmoid(x, inplace: bool = False):
if inplace:
return x.add_(3.).clamp_(0., 6.).div_(6.)
else:
return F.relu6(x + 3.) / 6.
class HardSigmoid(nn.Module):
def __init__(self, inplace: bool = False):
super(HardSigmoid, self).__init__()
self.inplace = inplace
def forward(self, x):
return hard_sigmoid(x, self.inplace)
class SelectAdaptivePool2d(nn.Module):
"""Selectable global pooling layer with dynamic input kernel size
"""
def __init__(self, output_size=1, pool_type='avg', flatten=False):
super(SelectAdaptivePool2d, self).__init__()
self.output_size = output_size
self.pool_type = pool_type
self.flatten = flatten
if pool_type == 'avgmax':
self.pool = AdaptiveAvgMaxPool2d(output_size)
elif pool_type == 'catavgmax':
self.pool = AdaptiveCatAvgMaxPool2d(output_size)
elif pool_type == 'max':
self.pool = nn.AdaptiveMaxPool2d(output_size)
else:
if pool_type != 'avg':
assert False, 'Invalid pool type: %s' % pool_type
self.pool = nn.AdaptiveAvgPool2d(output_size)
def forward(self, x):
x = self.pool(x)
if self.flatten:
x = x.flatten(1)
return x
def feat_mult(self):
return adaptive_pool_feat_mult(self.pool_type)
def __repr__(self):
return self.__class__.__name__ + ' (' \
+ 'output_size=' + str(self.output_size) \
+ ', pool_type=' + self.pool_type + ')'
def create_conv2d(in_chs, out_chs, kernel_size, **kwargs):
""" Select a 2d convolution implementation based on arguments
Creates and returns one of torch.nn.Conv2d, Conv2dSame, MixedConv2d, or CondConv2d.
Used extensively by EfficientNet, MobileNetv3 and related networks.
"""
assert 'groups' not in kwargs # only use 'depthwise' bool arg
if isinstance(kernel_size, list):
assert 'num_experts' not in kwargs # MixNet + CondConv combo not supported currently
# We're going to use only lists for defining the MixedConv2d kernel groups,
# ints, tuples, other iterables will continue to pass to normal conv and specify h, w.
m = MixedConv2d(in_chs, out_chs, kernel_size, **kwargs)
else:
depthwise = kwargs.pop('depthwise', False)
groups = out_chs if depthwise else 1
if 'num_experts' in kwargs and kwargs['num_experts'] > 0:
m = CondConv2d(in_chs, out_chs, kernel_size, groups=groups, **kwargs)
else:
m = create_conv2d_pad(in_chs, out_chs, kernel_size, groups=groups, **kwargs)
return m
def _cfg(url='', **kwargs):
return {
'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7),
'crop_pct': 0.875, 'interpolation': 'bilinear',
'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,
'first_conv': 'conv_stem', 'classifier': 'classifier',
**kwargs
}
def conv_bn(inp, oup, stride, groups=1, act_fn=nn.ReLU):
return nn.Sequential(
nn.Conv2d(inp, oup, 3, stride, 1, bias=False, groups=groups),
nn.BatchNorm2d(oup),
act_fn(inplace=True)
)
def conv_1x1_bn(inp, oup, groups=1, act_fn=nn.ReLU):
return nn.Sequential(
nn.Conv2d(inp, oup, 1, 1, 0, bias=False, groups=groups),
nn.BatchNorm2d(oup),
act_fn(inplace=True)
)
default_cfgs = {
'mobilenetv3_large_075': _cfg(url=''),
'mobilenetv3_large_100': _cfg(
interpolation='bicubic',
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mobilenetv3_large_100_ra-f55367f5.pth'),
'mobilenetv3_small_075': _cfg(url=''),
'mobilenetv3_small_100': _cfg(url=''),
'mobilenetv3_rw': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mobilenetv3_100-35495452.pth',
interpolation='bicubic'),
'tf_mobilenetv3_large_075': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mobilenetv3_large_075-150ee8b0.pth',
mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD),
'tf_mobilenetv3_large_100': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mobilenetv3_large_100-427764d5.pth',
mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD),
'tf_mobilenetv3_large_minimal_100': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mobilenetv3_large_minimal_100-8596ae28.pth',
mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD),
'tf_mobilenetv3_small_075': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mobilenetv3_small_075-da427f52.pth',
mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD),
'tf_mobilenetv3_small_100': _cfg(
url= 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mobilenetv3_small_100-37f49e2b.pth',
mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD),
'tf_mobilenetv3_small_minimal_100': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mobilenetv3_small_minimal_100-922a7843.pth',
mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD),
}
_DEBUG = False
class ChildNet(nn.Module):
def __init__(self, block_args, num_classes=1000, in_chans=3, stem_size=16, num_features=1280, head_bias=True,
channel_multiplier=1.0, pad_type='', act_layer=nn.ReLU, drop_rate=0., drop_path_rate=0.,
se_kwargs=None, norm_layer=nn.BatchNorm2d, norm_kwargs=None, global_pool='avg', pool_bn=False, zero_gamma=False):
super(ChildNet, self).__init__()
norm_layer = BatchNorm2d
self.num_classes = num_classes
self.num_features = num_features
self.drop_rate = drop_rate
self._in_chs = in_chans
self.pool_bn = pool_bn
# Stem
stem_size = round_channels(stem_size, channel_multiplier)
self.conv_stem = create_conv2d(self._in_chs, stem_size, 3, stride=2, padding=pad_type)
self.bn1 = norm_layer(stem_size, **norm_kwargs)
self.act1 = act_layer(inplace=True)
self._in_chs = stem_size
# Middle stages (IR/ER/DS Blocks)
builder = ChildNetBuilder(
channel_multiplier, 8, None, 32, pad_type, act_layer, se_kwargs,
norm_layer, norm_kwargs, drop_path_rate, verbose=_DEBUG)
self.blocks = nn.Sequential(*builder(self._in_chs, block_args))
# self.blocks = builder(self._in_chs, block_args)
self._in_chs = builder.in_chs
# Head + Pooling
self.global_pool = SelectAdaptivePool2d(pool_type=global_pool)
self.conv_head = create_conv2d(self._in_chs, self.num_features, 1, padding=pad_type, bias=head_bias)
self.act2 = act_layer(inplace=True)
# Classifier
self.classifier = nn.Linear(self.num_features * self.global_pool.feat_mult(), self.num_classes)
if pool_bn:
self.pool_bn = nn.BatchNorm1d(1)
efficientnet_init_weights(self, zero_gamma=zero_gamma)
def get_classifier(self):
return self.classifier
def reset_classifier(self, num_classes, global_pool='avg'):
self.global_pool = SelectAdaptivePool2d(pool_type=global_pool)
self.num_classes = num_classes
self.classifier = nn.Linear(
self.num_features * self.global_pool.feat_mult(), num_classes) if self.num_classes else None
def forward_features(self, x):
# architecture = [[0], [], [], [], [], [0]]
x = self.conv_stem(x)
x = self.bn1(x)
x = self.act1(x)
outputs = []
# 16, 24, 40, 96, 320
# block_idxs = [0, 1, 2, 4, 6]
block_idxs = [1, 2, 4, 6]
for i, block in enumerate(self.blocks):
x = block(x)
if i in block_idxs:
outputs.append(x)
# x = self.blocks(x)
return tuple(outputs)
def forward(self, x):
x = self.forward_features(x)
return x
def modify_block_args(block_args, kernel_size, exp_ratio):
# kernel_size: 3,5,7
# exp_ratio: 4,6
block_type = block_args['block_type']
# each type of block has different valid arguments, fill accordingly
if block_type == 'cn':
block_args['kernel_size'] = kernel_size
elif block_type == 'er':
block_args['exp_kernel_size'] = kernel_size
else:
block_args['dw_kernel_size'] = kernel_size
if block_type == 'ir' or block_type == 'er':
block_args['exp_ratio'] = exp_ratio
return block_args
def _gen_childnet(**kwargs):
# arch_list = [[0], [3, 2], [3, 2], [3, 3], [3, 3, 3], [3, 3, 3], [0]]
# arch_list = [[0], [3, 2, 3, 3], [3, 2, 3, 1], [3, 0, 3, 2], [3, 3, 3, 3], [3, 3, 3, 3], [0]]
# arch_list = [[0], [3,4,3,1],[3,2,3,0],[3,3,3,1],[3,3,3,3],[3,3,3,3],[0]]
arch_list = [[0], [3, 4, 2, 0], [5, 2, 4, 0], [4, 3, 2, 2], [1, 3, 0, 1], [2, 4, 4, 2], [0]]
# arch_list = [[0], [], [], [], [], [0]]
choices = {'kernel_size': [3, 5, 7], 'exp_ratio': [4, 6]}
choices_list = [[x,y] for x in choices['kernel_size'] for y in choices['exp_ratio']]
num_features = 1280
# act_layer = HardSwish
act_layer = Swish
'''
arch_def = [
# stage 0, 112x112 in
['ds_r1_k3_s1_e1_c16_se0.25'],
# stage 1, 112x112 in
['ir_r1_k3_s2_e4_c24_se0.25', 'ir_r1_k3_s1_e4_c24_se0.25'],
# stage 2, 56x56 in
['ir_r1_k5_s2_e4_c40_se0.25', 'ir_r1_k5_s2_e4_c40_se0.25'],
# stage 3, 28x28 in
['ir_r1_k3_s2_e6_c80_se0.25', 'ir_r2_k3_s1_e4_c80_se0.25'],
# stage 4, 14x14in
['ir_r1_k3_s1_e6_c96_se0.25', 'ir_r1_k3_s1_e6_c96_se0.25', 'ir_r1_k3_s1_e6_c96_se0.25'],
# stage 5, 14x14in
['ir_r1_k5_s2_e6_c192_se0.25', 'ir_r1_k5_s1_e6_c192_se0.25', 'ir_r1_k5_s2_e6_c192_se0.25'],
# stage 6, 7x7 in
['cn_r1_k1_s1_c320_se0.25'],
]
'''
arch_def = [
# stage 0, 112x112 in
['ds_r1_k3_s1_e1_c16_se0.25'],
# stage 1, 112x112 in
['ir_r1_k3_s2_e4_c24_se0.25', 'ir_r1_k3_s1_e4_c24_se0.25', 'ir_r1_k3_s1_e4_c24_se0.25', 'ir_r1_k3_s1_e4_c24_se0.25'],
# stage 2, 56x56 in
['ir_r1_k5_s2_e4_c40_se0.25', 'ir_r1_k5_s1_e4_c40_se0.25', 'ir_r1_k5_s2_e4_c40_se0.25', 'ir_r1_k5_s2_e4_c40_se0.25'],
# stage 3, 28x28 in
['ir_r1_k3_s2_e6_c80_se0.25', 'ir_r1_k3_s1_e4_c80_se0.25', 'ir_r1_k3_s1_e4_c80_se0.25', 'ir_r2_k3_s1_e4_c80_se0.25'],
# stage 4, 14x14in
['ir_r1_k3_s1_e6_c96_se0.25', 'ir_r1_k3_s1_e6_c96_se0.25', 'ir_r1_k3_s1_e6_c96_se0.25', 'ir_r1_k3_s1_e6_c96_se0.25'],
# stage 5, 14x14in
['ir_r1_k5_s2_e6_c192_se0.25', 'ir_r1_k5_s1_e6_c192_se0.25', 'ir_r1_k5_s2_e6_c192_se0.25', 'ir_r1_k5_s2_e6_c192_se0.25'],
# stage 6, 7x7 in
['cn_r1_k1_s1_c320_se0.25'],
]
#arch_def = [
# # stage 0, 112x112 in
# ['ds_r1_k3_s1_e1_c16_se0.25'],
# # stage 1, 112x112 in
# ['ir_r1_k3_s2_e4_c24_se0.25'],
# # stage 2, 56x56 in
# ['ir_r1_k5_s2_e4_c40_se0.25', 'ir_r1_k5_s2_e4_c40_se0.25'],
# # stage 3, 28x28 in
# ['ir_r1_k3_s2_e6_c80_se0.25', 'ir_r1_k3_s2_e6_c80_se0.25', 'ir_r1_k3_s2_e6_c80_se0.25'],
# # stage 4, 14x14in
# ['ir_r1_k3_s1_e6_c96_se0.25', 'ir_r1_k3_s1_e6_c96_se0.25', 'ir_r1_k3_s1_e6_c96_se0.25', 'ir_r1_k3_s1_e6_c96_se0.25'],
# # stage 5, 14x14in
# ['ir_r1_k5_s2_e6_c192_se0.25', 'ir_r1_k5_s2_e6_c192_se0.25', 'ir_r1_k5_s2_e6_c192_se0.25'],
# # stage 6, 7x7 in
# ['cn_r1_k1_s1_c320_se0.25'],
#]
new_arch = []
# change to child arch_def
for i, (layer_choice, layer_arch) in enumerate(zip(arch_list, arch_def)):
if len(layer_arch) == 1:
new_arch.append(layer_arch)
continue
else:
new_layer = []
for j, (block_choice, block_arch) in enumerate(zip(layer_choice, layer_arch)):
kernel_size, exp_ratio = choices_list[block_choice]
elements = block_arch.split('_')
block_arch = block_arch.replace(elements[2], 'k{}'.format(str(kernel_size)))
block_arch = block_arch.replace(elements[4], 'e{}'.format(str(exp_ratio)))
new_layer.append(block_arch)
new_arch.append(new_layer)
model_kwargs = dict(
block_args=decode_arch_def(new_arch),
num_features=num_features,
stem_size=16,
# channel_multiplier=channel_multiplier,
norm_kwargs=resolve_bn_args(kwargs),
act_layer=act_layer,
se_kwargs=dict(act_layer=nn.ReLU, gate_fn=hard_sigmoid, reduce_mid=True, divisor=8),
num_classes=1000,
drop_rate=0.2,
drop_path_rate=0.2,
global_pool='avg'
)
model = ChildNet(**model_kwargs)
return model
class CyDASseg(nn.Module):
def __init__(self, Fch=12, num_classes=19, stem_head_width=(1., 1.)):
super(CyDASseg, self).__init__()
self._num_classes = num_classes
self._stem_head_width = stem_head_width
self.backbone = _gen_childnet()
# self.f_channels = [16, 24, 40, 96]
self.f_channels = [24, 40, 96, 320]
self._Fch = Fch
# del self.backbone.blocks[3][2]
#for m in self.backbone.modules():
# if isinstance(m, nn.BatchNorm2d):
# m.eval()
# m.weight.requires_grad = False
# m.bias.requires_grad = False
self.last_channel = self.backbone.blocks[-1][-1].conv.out_channels # self.backbone.blocks[-1][-1]
# building decoder
self.build_arm_ffm_head()
def init_weights(self, pretrained=None):
if pretrained:
state_dict = torch.load(pretrained)
state_dict = state_dict['state_dict']
# resume_checkpoint(self.backbone, pretrained)
self.backbone.load_state_dict(state_dict, strict=True)
else:
print("No pretrained model!")
return
def build_arm_ffm_head(self):
# 24, 40, 96, 320
if self.training:
self.heads32 = Head(self.f_channels[-1], self._num_classes, True, norm_layer=BatchNorm2d)
self.heads16 = Head(self.f_channels[-2], self._num_classes, True, norm_layer=BatchNorm2d)
self.heads8 = Decoder(self.num_filters(8, self._stem_head_width[1]), self.f_channels[0], self._num_classes, Fch=self._Fch, scale=4, branch=1, is_aux=False, norm_layer=BatchNorm2d)
self.arms32 = nn.ModuleList([
ConvNorm(self.f_channels[-1], self.num_filters(16, self._stem_head_width[1]), 1, 1, 0, slimmable=False),
ConvNorm(self.num_filters(16, self._stem_head_width[1]), self.num_filters(8, self._stem_head_width[1]), 1, 1, 0, slimmable=False),
])
self.refines32 = nn.ModuleList([
ConvNorm(self.num_filters(16, self._stem_head_width[1])+self.f_channels[-2], self.num_filters(16, self._stem_head_width[1]), 3, 1, 1, slimmable=False),
ConvNorm(self.num_filters(8, self._stem_head_width[1])+self.f_channels[-3], self.num_filters(8, self._stem_head_width[1]), 3, 1, 1, slimmable=False),
])
self.ffm = FeatureFusion(self.num_filters(8, self._stem_head_width[1]), self.num_filters(8, self._stem_head_width[1]), reduction=1, Fch=self._Fch, scale=8, branch=1, norm_layer=BatchNorm2d)
def agg_ffm(self, outputs8, outputs16, outputs32, outputs4):
pred32 = []; pred16 = []; pred8 = [] # order of predictions is not important
if self.training: pred32.append(outputs32)
out = self.arms32[0](outputs32)
out = F.interpolate(out, size=(int(out.size(2))*2, int(out.size(3))*2), mode='bilinear', align_corners=False)
out = self.refines32[0](torch.cat([out, outputs16], dim=1))
if self.training: pred16.append(outputs16)
out = self.arms32[1](out)
out = F.interpolate(out, size=(int(out.size(2))*2, int(out.size(3))*2), mode='bilinear', align_corners=False)
out = self.refines32[1](torch.cat([out, outputs8], dim=1))
pred8.append(out)
if len(pred32) > 0:
pred32 = self.heads32(torch.cat(pred32, dim=1))
else:
pred32 = None
if len(pred16) > 0:
pred16 = self.heads16(torch.cat(pred16, dim=1))
else:
pred16 = None
pred8 = self.heads8(self.ffm(torch.cat(pred8, dim=1)), outputs4)
if self.training:
return pred8, pred16, pred32
else:
return pred8
def num_filters(self, scale, width=1.0):
return int(np.round(scale * self._Fch * width))
def forward(self, x):
b,c,h,w = x.shape
outputs = self.backbone(x)
outputs4, outputs8, outputs16, outputs32 =outputs[0], outputs[1], outputs[2], outputs[3]
if self.training:
pred8, pred16, pred32 = self.agg_ffm(outputs8, outputs16, outputs32, outputs4)
pred8 = F.interpolate(pred8, size=(h,w), mode='bilinear', align_corners=False)
if pred16 is not None: pred16 = F.interpolate(pred16, size=(h,w), mode='bilinear', align_corners=False)
if pred32 is not None: pred32 = F.interpolate(pred32, size=(h,w), mode='bilinear', align_corners=False)
return pred8, pred16, pred32
else:
pred8 = self.agg_ffm(outputs8, outputs16, outputs32, outputs4)
out = F.interpolate(pred8, size=(int(pred8.size(2))*4, int(pred8.size(3))*4), mode='bilinear', align_corners=False)
return out
|
Cream/CDARTS/CDARTS_segmentation/train/cydas.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_segmentation/train/cydas.py",
"repo_id": "Cream",
"token_count": 9355
}
| 294 |
from __future__ import division
import os
import shutil
import sys
import time
import glob
import json
import logging
import argparse
import _init_paths
from utils.darts_utils import create_exp_dir, save, plot_op, plot_path_width, objective_acc_lat
parser = argparse.ArgumentParser(description='parameters for sampling')
parser.add_argument('--arch_loc', default='./jsons', type=str, help='resumed model')
parser.add_argument('--save_dir', default='./archs', type=str, help='saved dict')
parser.add_argument("--Fch", default=12, type=int, help='Fch')
parser.add_argument('--stem_head_width', type=float, default=0.6666666666666666, help='base learning rate')
args = parser.parse_args()
def main():
width_mult_list = [4./12, 6./12, 8./12, 10./12, 1.,]
json_files = glob.glob(os.path.join(args.arch_loc, "*.json"))
for json_file in json_files:
with open(json_file, 'r') as f:
model_dict = json.loads(f.read())
last = model_dict["lasts"]
save_dir = os.path.join(args.save_dir, os.path.basename(json_file).strip('.json'))
os.makedirs(save_dir, exist_ok=True)
try:
for b in range(len(last)):
if len(width_mult_list) > 1:
plot_op(model_dict["ops"][b], model_dict["paths"][b], width=model_dict["widths"][b], head_width=args.stem_head_width, F_base=args.Fch).savefig(os.path.join(save_dir, "ops_%d_%d.png"%(0,b)), bbox_inches="tight")
else:
plot_op(model_dict["ops"][b], model_dict["paths"][b], F_base=args.Fch).savefig(os.path.join(save_dir, "ops_%d_%d.png"%(0,b)), bbox_inches="tight")
plot_path_width(model_dict["lasts"], model_dict["paths"], model_dict["widths"]).savefig(os.path.join(save_dir, "path_width%d.png"%0))
except:
print("Arch: {} is invalid".format(json_file))
shutil.rmtree(save_dir)
if __name__ == '__main__':
main()
|
Cream/CDARTS/CDARTS_segmentation/train/vis_arch.py/0
|
{
"file_path": "Cream/CDARTS/CDARTS_segmentation/train/vis_arch.py",
"repo_id": "Cream",
"token_count": 852
}
| 295 |
""" Config class for search/augment """
import argparse
import os
from functools import partial
import torch
def get_parser(name):
""" make default formatted parser """
parser = argparse.ArgumentParser(name, formatter_class=argparse.ArgumentDefaultsHelpFormatter)
# print default value always
parser.add_argument = partial(parser.add_argument, help=' ')
return parser
def parse_gpus(gpus):
if gpus == 'all':
return list(range(torch.cuda.device_count()))
else:
return [int(s) for s in gpus.split(',')]
class BaseConfig(argparse.Namespace):
def print_params(self, prtf=print):
prtf("")
prtf("Parameters:")
for attr, value in sorted(vars(self).items()):
prtf("{}={}".format(attr.upper(), value))
prtf("")
def as_markdown(self):
""" Return configs as markdown format """
text = "|name|value| \n|-|-| \n"
for attr, value in sorted(vars(self).items()):
text += "|{}|{}| \n".format(attr, value)
return text
class SearchConfig(BaseConfig):
def build_parser(self):
parser = get_parser("Search config")
parser.add_argument('--name', required=True)
########### basic settings ############
parser.add_argument('--dataset', default='imagenet', help='CIFAR10 / MNIST / FashionMNIST / imagenet')
parser.add_argument('--model_type', type=str, default='cifar', help='cifar or imagenet')
parser.add_argument('--data_dir', type=str, default='experiments/data/cifar', help='cifar dataset')
parser.add_argument('--train_dir', type=str, default='experiments/data/imagenet/train', help='')
parser.add_argument('--val_dir', type=str, default='experiments/data/imagenet/train', help='')
parser.add_argument('--test_dir', type=str, default='experiments/data/imagenet/val', help='')
parser.add_argument('--param_pool_path', type=str, default=None, help='')
parser.add_argument('--input_channels', type=int, default=3)
parser.add_argument('--init_channels', type=int, default=16)
parser.add_argument('--stem_multiplier', type=int, default=3)
parser.add_argument('--n_classes', type=int, default=10)
parser.add_argument('--batch_size', type=int, default=128, help='batch size')
parser.add_argument('--print_freq', type=int, default=50, help='print frequency')
parser.add_argument('--seed', type=int, default=0, help='random seed')
parser.add_argument('--workers', type=int, default=4, help='# of workers')
parser.add_argument('--gpus', default='0', help='gpu device ids separated by comma. '
'`all` indicates use all gpus.')
parser.add_argument('--sample_ratio', type=float, default=0.2, help='imagenet sample ratio')
parser.add_argument('--resume', action='store_true', default=False, help='resnet stem(pretrain)')
########### learning rate ############
parser.add_argument('--w_lr', type=float, default=0.05, help='lr for weights')
parser.add_argument('--lr_ratio', type=float, default=0.5, help='lr for trained layers')
parser.add_argument('--w_lr_min', type=float, default=0.001, help='minimum lr for weights')
parser.add_argument('--w_momentum', type=float, default=0.9, help='momentum for weights')
parser.add_argument('--w_weight_decay', type=float, default=3e-4,
help='weight decay for weights')
parser.add_argument('--w_grad_clip', type=float, default=5.,
help='gradient clipping for weights')
parser.add_argument('--alpha_lr', type=float, default=6e-4, help='lr for alpha')
parser.add_argument('--alpha_weight_decay', type=float, default=1e-3,
help='weight decay for alpha')
########### alternate training ############
parser.add_argument('--res_stem', action='store_true', default=False, help='resnet stem(pretrain)')
parser.add_argument('--layer_num', type=int, default=3, help='layer need to be replaced')
parser.add_argument('--cells_num', type=int, default=3, help='cells num of one layer')
parser.add_argument('--pretrain_epochs', type=int, default=5, help='# of training epochs')
parser.add_argument('--pretrain_decay', type=int, default=5, help='pretrain epochs')
parser.add_argument('--random_times', type=int, default=10, help='# of training epochs')
parser.add_argument('--random_epochs', type=int, default=3, help='# of training epochs')
parser.add_argument('--search_iter', type=int, default=5, help='times of search')
parser.add_argument('--search_iter_epochs', type=int, default=5, help='# of training epochs')
parser.add_argument('--unrolled', action='store_true', default=False, help='use one-step unrolled validation loss')
parser.add_argument('--one_stage', action='store_true', default=False, help='one_stage search')
parser.add_argument('--same_structure', action='store_true', default=False, help='same_structure search and retrain')
parser.add_argument('--clean_arch', action='store_true', default=False, help='clean archs each epoch')
parser.add_argument('--sync_param', action='store_true', default=False, help='whether to sync param')
parser.add_argument('--ensemble_sum', action='store_true', default=False, help='ensemble sum or concat')
parser.add_argument('--ensemble_param', action='store_true', default=False, help='whether to learn ensemble params')
parser.add_argument('--use_beta', action='store_true', default=False, help='whether to use beta arch param')
parser.add_argument('--bn_affine', action='store_true', default=False, help='main bn affine')
parser.add_argument('--sync_bn', action='store_true', default=False, help='whether to sync bn')
parser.add_argument('--use_apex', action='store_true', default=False, help='whether to apex')
parser.add_argument('--regular', action='store_true', default=False, help='resnet stem(pretrain)')
parser.add_argument('--regular_ratio', type=float, default=0.5, help='regular ratio')
parser.add_argument('--regular_coeff', type=float, default=5, help='regular coefficient')
parser.add_argument('--repeat_cell', action='store_true', default=False, help='use repeat cell')
parser.add_argument('--fix_head', action='store_true', default=False, help='whether to fix head')
parser.add_argument('--share_fc', action='store_true', default=False, help='whether to share fc')
parser.add_argument('--nasnet_lr', type=float, default=0.1, help='lr of nasnet')
parser.add_argument('--nasnet_warmup', type=int, default=5, help='warm up of nasnet')
parser.add_argument('--loss_alpha', type=float, default=1, help='loss alpha')
parser.add_argument('--loss_T', type=float, default=2, help='loss T')
parser.add_argument('--interactive_type', type=int, default=0, help='0 kl 1 cosine 2 mse 3 sl1')
parser.add_argument('--gumbel_sample', action='store_true', default=False, help='whether to use gumbel sample')
parser.add_argument('--sample_pretrain', action='store_true', default=False, help='sample_pretrain')
########### data augument ############
parser.add_argument('--aux_weight', type=float, default=0.4, help='auxiliary loss weight')
parser.add_argument('--cutout_length', type=int, default=16, help='cutout length')
parser.add_argument('--drop_path_prob', type=float, default=0.2, help='drop path prob')
parser.add_argument('--use_aa', action='store_true', default=False, help='whether to use aa')
parser.add_argument('--mixup_alpha', default=0., type=float,
help='mixup interpolation coefficient (default: 1)')
########### distributed ############
parser.add_argument("--local_rank", default=0, type=int)
parser.add_argument("--world_size", default=1, type=int)
parser.add_argument('--dist_url', default='tcp://127.0.0.1:23456', type=str, help='url used to set up distributed training')
parser.add_argument('--distributed', action='store_true', help='Run model distributed mode.')
return parser
def __init__(self):
parser = self.build_parser()
args = parser.parse_args()
super().__init__(**vars(args))
self.data_path = './experiments/data/'
self.path = os.path.join('experiments', 'search', self.name)
self.resume_path = os.path.join(self.path, 'search_resume.pth.tar')
self.plot_path = os.path.join(self.path, 'plots')
self.retrain_path = os.path.join(self.path, 'retrain')
self.gpus = parse_gpus(self.gpus)
class AugmentConfig(BaseConfig):
def build_parser(self):
parser = get_parser("Augment config")
parser.add_argument('--name', required=True)
parser.add_argument('--dataset', required=True, help='cifar10 / cifar100 / imagenet')
parser.add_argument('--model_type', type=str, default='cifar', help='cifar or imagenet')
parser.add_argument('--data_dir', type=str, default='experiments/data/cifar', help='cifar dataset')
parser.add_argument('--train_dir', type=str, default='experiments/data/imagenet/train', help='')
parser.add_argument('--test_dir', type=str, default='experiments/data/imagenet/val', help='')
parser.add_argument('--cell_file', type=str, default='CDARTS/cells/cifar_genotype.json', help='')
parser.add_argument('--resume', action='store_true', default=False, help='resnet stem(pretrain)')
parser.add_argument('--n_classes', type=int, default=10)
parser.add_argument('--input_channels', type=int, default=3)
parser.add_argument('--stem_multiplier', type=int, default=3)
########### alternate training ############
parser.add_argument('--res_stem', action='store_true', default=False, help='resnet stem(pretrain)')
parser.add_argument('--layer_num', type=int, default=3, help='layer need to be replaced')
parser.add_argument('--cells_num', type=int, default=3, help='cells num of one layer')
parser.add_argument('--same_structure', action='store_true', default=False, help='same_structure search and retrain')
parser.add_argument('--ensemble_sum', action='store_true', default=False, help='whether to ensemble')
parser.add_argument('--ensemble_param', action='store_true', default=False, help='whether to learn ensemble params')
parser.add_argument('--use_beta', action='store_true', default=False, help='whether to use beta arch param')
parser.add_argument('--bn_affine', action='store_true', default=False, help='main bn affine')
parser.add_argument('--repeat_cell', action='store_true', default=False, help='use repeat cell')
parser.add_argument('--fix_head', action='store_true', default=False, help='whether to fix head')
parser.add_argument('--share_fc', action='store_true', default=False, help='whether to share fc')
parser.add_argument('--sample_pretrain', action='store_true', default=False, help='sample_pretrain')
parser.add_argument('--use_aa', action='store_true', default=False, help='whether to use aa')
parser.add_argument('--mixup_alpha', default=0., type=float,
help='mixup interpolation coefficient (default: 1)')
parser.add_argument('--resume_name', type=str, default='retrain_resume.pth.tar')
parser.add_argument('--batch_size', type=int, default=128, help='batch size')
parser.add_argument('--lr', type=float, default=0.025, help='lr for weights')
parser.add_argument('--momentum', type=float, default=0.9, help='momentum')
parser.add_argument('--weight_decay', type=float, default=5e-4, help='weight decay')
parser.add_argument('--grad_clip', type=float, default=5.,
help='gradient clipping for weights')
parser.add_argument('--print_freq', type=int, default=200, help='print frequency')
parser.add_argument('--gpus', default='0', help='gpu device ids separated by comma. '
'`all` indicates use all gpus.')
parser.add_argument('--epochs', type=int, default=600, help='# of training epochs')
parser.add_argument('--warmup_epochs', type=int, default=5, help='# warmup')
parser.add_argument('--init_channels', type=int, default=36)
parser.add_argument('--layers', type=int, default=20, help='# of layers')
parser.add_argument('--seed', type=int, default=0, help='random seed')
parser.add_argument('--workers', type=int, default=4, help='# of workers')
parser.add_argument('--aux_weight', type=float, default=0.4, help='auxiliary loss weight')
parser.add_argument('--cutout_length', type=int, default=16, help='cutout length')
parser.add_argument('--sample_archs', type=int, default=1, help='sample arch num')
parser.add_argument('--label_smooth', type=float, default=0.1, help='label smoothing')
parser.add_argument('--drop_path_prob', type=float, default=0.3, help='drop path prob')
########### distributed ############
parser.add_argument("--local_rank", default=0, type=int)
parser.add_argument("--world_size", default=1, type=int)
parser.add_argument('--use_amp', action='store_true', default=False, help='whether to use amp')
parser.add_argument('--opt-level', type=str, default='O1')
parser.add_argument('--dist_url', default='tcp://127.0.0.1:23456', type=str, help='url used to set up distributed training')
parser.add_argument('--fp16', action='store_true',
help='Run model fp16 mode.')
parser.add_argument('--distributed', action='store_true',
help='Run model distributed mode.')
parser.add_argument('--static-loss-scale', type=float, default=1,
help='Static loss scale, positive power of 2 values can improve fp16 convergence.')
parser.add_argument('--dynamic-loss-scale', action='store_true',
help='Use dynamic loss scaling. If supplied, this argument supersedes ' +
'--static-loss-scale.')
return parser
def __init__(self):
parser = self.build_parser()
args = parser.parse_args()
super().__init__(**vars(args))
self.data_path = './experiments/data/'
self.path = os.path.join('experiments', 'retrain', self.name)
self.gpus = parse_gpus(self.gpus)
self.resume_path = os.path.join(self.path, self.resume_name)
|
Cream/CDARTS/lib/config.py/0
|
{
"file_path": "Cream/CDARTS/lib/config.py",
"repo_id": "Cream",
"token_count": 5765
}
| 296 |
""" Genotypes
- Genotype: normal/reduce gene + normal/reduce cell output connection (concat)
- gene: discrete ops information (w/o output connection)
- dag: real ops (can be mixed or discrete, but Genotype has only discrete information itself)
"""
from collections import namedtuple
import torch
import torch.nn as nn
import torch.nn.functional as F
from lib.models import ops
from lib.models.ops import PRIMITIVES
Genotype = namedtuple('Genotype', 'normal normal_concat reduce reduce_concat')
def to_dag(C_in, gene, reduction, bn_affine=True):
""" generate discrete ops from gene """
dag = nn.ModuleList()
for edges in gene:
row = nn.ModuleList()
for op_name, s_idx in edges:
# reduction cell & from input nodes => stride = 2
stride = 2 if reduction and s_idx < 2 else 1
op = ops.OPS[op_name](C_in, stride, bn_affine)
if not isinstance(op, ops.Identity): # Identity does not use drop path
op = nn.Sequential(
op,
ops.DropPath_()
)
op.s_idx = s_idx
row.append(op)
dag.append(row)
return dag
def from_str(s):
""" generate genotype from string
e.g. "Genotype(
normal=[[('sep_conv_3x3', 0), ('sep_conv_3x3', 1)],
[('sep_conv_3x3', 1), ('dil_conv_3x3', 2)],
[('sep_conv_3x3', 1), ('sep_conv_3x3', 2)],
[('sep_conv_3x3', 1), ('dil_conv_3x3', 4)]],
normal_concat=range(2, 6),
reduce=[[('max_pool_3x3', 0), ('max_pool_3x3', 1)],
[('max_pool_3x3', 0), ('skip_connect', 2)],
[('max_pool_3x3', 0), ('skip_connect', 2)],
[('max_pool_3x3', 0), ('skip_connect', 2)]],
reduce_concat=range(2, 6))"
"""
genotype = eval(s)
return genotype
def parse(alpha, beta, k):
"""
parse continuous alpha to discrete gene.
alpha is ParameterList:
ParameterList [
Parameter(n_edges1, n_ops),
Parameter(n_edges2, n_ops),
...
]
beta is ParameterList:
ParameterList [
Parameter(n_edges1),
Parameter(n_edges2),
...
]
gene is list:
[
[('node1_ops_1', node_idx), ..., ('node1_ops_k', node_idx)],
[('node2_ops_1', node_idx), ..., ('node2_ops_k', node_idx)],
...
]
each node has two edges (k=2) in CNN.
"""
gene = []
assert PRIMITIVES[-1] == 'none' # assume last PRIMITIVE is 'none'
# 1) Convert the mixed op to discrete edge (single op) by choosing top-1 weight edge
# 2) Choose top-k edges per node by edge score (top-1 weight in edge)
# output the connect idx[(node_idx, connect_idx, op_idx).... () ()]
connect_idx = []
for edges, w in zip(alpha, beta):
# edges: Tensor(n_edges, n_ops)
edge_max, primitive_indices = torch.topk((w.view(-1, 1) * edges)[:, :-1], 1) # ignore 'none'
topk_edge_values, topk_edge_indices = torch.topk(edge_max.view(-1), k)
node_gene = []
node_idx = []
for edge_idx in topk_edge_indices:
prim_idx = primitive_indices[edge_idx]
prim = PRIMITIVES[prim_idx]
node_gene.append((prim, edge_idx.item()))
node_idx.append((edge_idx.item(), prim_idx.item()))
gene.append(node_gene)
connect_idx.append(node_idx)
return gene, connect_idx
def parse_gumbel(alpha, beta, k):
"""
parse continuous alpha to discrete gene.
alpha is ParameterList:
ParameterList [
Parameter(n_edges1, n_ops),
Parameter(n_edges2, n_ops),
...
]
beta is ParameterList:
ParameterList [
Parameter(n_edges1),
Parameter(n_edges2),
...
]
gene is list:
[
[('node1_ops_1', node_idx), ..., ('node1_ops_k', node_idx)],
[('node2_ops_1', node_idx), ..., ('node2_ops_k', node_idx)],
...
]
each node has two edges (k=2) in CNN.
"""
gene = []
assert PRIMITIVES[-1] == 'none' # assume last PRIMITIVE is 'none'
# 1) Convert the mixed op to discrete edge (single op) by choosing top-1 weight edge
# 2) Choose top-k edges per node by edge score (top-1 weight in edge)
# output the connect idx[(node_idx, connect_idx, op_idx).... () ()]
connect_idx = []
for edges, w in zip(alpha, beta):
# edges: Tensor(n_edges, n_ops)
discrete_a = F.gumbel_softmax(edges[:, :-1].reshape(-1), tau=1, hard=True)
for i in range(k-1):
discrete_a = discrete_a + F.gumbel_softmax(edges[:, :-1].reshape(-1), tau=1, hard=True)
discrete_a = discrete_a.reshape(-1, len(PRIMITIVES)-1)
reserved_edge = (discrete_a>0).nonzero()
node_gene = []
node_idx = []
for i in range(reserved_edge.shape[0]):
edge_idx = reserved_edge[i][0].item()
prim_idx = reserved_edge[i][1].item()
prim = PRIMITIVES[prim_idx]
node_gene.append((prim, edge_idx))
node_idx.append((edge_idx, prim_idx))
gene.append(node_gene)
connect_idx.append(node_idx)
return gene, connect_idx
|
Cream/CDARTS/lib/utils/genotypes.py/0
|
{
"file_path": "Cream/CDARTS/lib/utils/genotypes.py",
"repo_id": "Cream",
"token_count": 2551
}
| 297 |
# This file is downloaded from https://github.com/rwightman/pytorch-image-models
# This file is to define the inverted residual block which is the base operation in our search space.
import torch.nn as nn
from timm.models.layers import create_conv2d
from timm.models.efficientnet_blocks import make_divisible, resolve_se_args, \
SqueezeExcite, drop_path
class InvertedResidual(nn.Module):
""" Inverted residual block w/ optional SE and CondConv routing"""
def __init__(self, in_chs, out_chs, dw_kernel_size=3,
stride=1, dilation=1, pad_type='', act_layer=nn.ReLU, noskip=False,
exp_ratio=1.0, exp_kernel_size=1, pw_kernel_size=1,
se_ratio=0., se_kwargs=None, norm_layer=nn.BatchNorm2d, norm_kwargs=None,
conv_kwargs=None, drop_path_rate=0.):
super(InvertedResidual, self).__init__()
norm_kwargs = norm_kwargs or {}
conv_kwargs = conv_kwargs or {}
mid_chs = make_divisible(in_chs * exp_ratio)
has_se = se_ratio is not None and se_ratio > 0.
self.has_residual = (in_chs == out_chs and stride == 1) and not noskip
self.drop_path_rate = drop_path_rate
# Point-wise expansion
self.conv_pw = create_conv2d(in_chs, mid_chs, exp_kernel_size, padding=pad_type, **conv_kwargs)
self.bn1 = norm_layer(mid_chs, **norm_kwargs)
self.act1 = act_layer(inplace=True)
# Depth-wise convolution
self.conv_dw = create_conv2d(
mid_chs, mid_chs, dw_kernel_size, stride=stride, dilation=dilation,
padding=pad_type, depthwise=True, **conv_kwargs)
self.bn2 = norm_layer(mid_chs, **norm_kwargs)
self.act2 = act_layer(inplace=True)
# Squeeze-and-excitation
if has_se:
se_kwargs = resolve_se_args(se_kwargs, in_chs, act_layer)
self.se = SqueezeExcite(mid_chs, se_ratio=se_ratio, **se_kwargs)
else:
self.se = None
# Point-wise linear projection
self.conv_pwl = create_conv2d(mid_chs, out_chs, pw_kernel_size, padding=pad_type, **conv_kwargs)
self.bn3 = norm_layer(out_chs, **norm_kwargs)
def feature_info(self, location):
if location == 'expansion': # after SE, input to PWL
info = dict(module='conv_pwl', hook_type='forward_pre', num_chs=self.conv_pwl.in_channels)
else: # location == 'bottleneck', block output
info = dict(module='', hook_type='', num_chs=self.conv_pwl.out_channels)
return info
def forward(self, x):
residual = x
# Point-wise expansion
x = self.conv_pw(x)
x = self.bn1(x)
x = self.act1(x)
# Depth-wise convolution
x = self.conv_dw(x)
x = self.bn2(x)
x = self.act2(x)
# Squeeze-and-excitation
if self.se is not None:
x = self.se(x)
# Point-wise linear projection
x = self.conv_pwl(x)
x = self.bn3(x)
if self.has_residual:
if self.drop_path_rate > 0.:
x = drop_path(x, self.drop_path_rate, self.training)
x += residual
return x
|
Cream/Cream/lib/models/blocks/inverted_residual_block.py/0
|
{
"file_path": "Cream/Cream/lib/models/blocks/inverted_residual_block.py",
"repo_id": "Cream",
"token_count": 1519
}
| 298 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# Written by Hao Du and Houwen Peng
# email: [email protected] and [email protected]
import os
import warnings
import datetime
import torch
import torch.nn as nn
import _init_paths
from torch.utils.tensorboard import SummaryWriter
# import timm packages
from timm.utils import ModelEma
from timm.models import resume_checkpoint
from timm.data import Dataset, create_loader
# import apex as distributed package otherwise we use torch.nn.parallel.distributed as distributed package
try:
from apex.parallel import convert_syncbn_model
from apex.parallel import DistributedDataParallel as DDP
HAS_APEX = True
except ImportError:
from torch.nn.parallel import DistributedDataParallel as DDP
HAS_APEX = False
# import models and training functions
from lib.core.test import validate
from lib.models.structures.childnet import gen_childnet
from lib.utils.util import parse_config_args, get_logger, get_model_flops_params
from lib.config import DEFAULT_CROP_PCT, IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
def main():
args, cfg = parse_config_args('child net testing')
# resolve logging
output_dir = os.path.join(cfg.SAVE_PATH,
"{}-{}".format(datetime.date.today().strftime('%m%d'),
cfg.MODEL))
if args.local_rank == 0:
logger = get_logger(os.path.join(output_dir, 'test.log'))
writer = SummaryWriter(os.path.join(output_dir, 'runs'))
else:
writer, logger = None, None
# retrain model selection
if cfg.NET.SELECTION == 481:
arch_list = [
[0], [
3, 4, 3, 1], [
3, 2, 3, 0], [
3, 3, 3, 1, 1], [
3, 3, 3, 3], [
3, 3, 3, 3], [0]]
cfg.DATASET.IMAGE_SIZE = 224
elif cfg.NET.SELECTION == 43:
arch_list = [[0], [3], [3, 1], [3, 1], [3, 3, 3], [3, 3], [0]]
cfg.DATASET.IMAGE_SIZE = 96
elif cfg.NET.SELECTION == 14:
arch_list = [[0], [3], [3, 3], [3, 3], [3], [3], [0]]
cfg.DATASET.IMAGE_SIZE = 64
elif cfg.NET.SELECTION == 114:
arch_list = [[0], [3], [3, 3], [3, 3], [3, 3, 3], [3, 3], [0]]
cfg.DATASET.IMAGE_SIZE = 160
elif cfg.NET.SELECTION == 287:
arch_list = [[0], [3], [3, 3], [3, 1, 3], [3, 3, 3, 3], [3, 3, 3], [0]]
cfg.DATASET.IMAGE_SIZE = 224
elif cfg.NET.SELECTION == 604:
arch_list = [[0], [3, 3, 2, 3, 3], [3, 2, 3, 2, 3], [3, 2, 3, 2, 3],
[3, 3, 2, 2, 3, 3], [3, 3, 2, 3, 3, 3], [0]]
cfg.DATASET.IMAGE_SIZE = 224
else:
raise ValueError("Model Test Selection is not Supported!")
# define childnet architecture from arch_list
stem = ['ds_r1_k3_s1_e1_c16_se0.25', 'cn_r1_k1_s1_c320_se0.25']
choice_block_pool = ['ir_r1_k3_s2_e4_c24_se0.25',
'ir_r1_k5_s2_e4_c40_se0.25',
'ir_r1_k3_s2_e6_c80_se0.25',
'ir_r1_k3_s1_e6_c96_se0.25',
'ir_r1_k5_s2_e6_c192_se0.25']
arch_def = [[stem[0]]] + [[choice_block_pool[idx]
for repeat_times in range(len(arch_list[idx + 1]))]
for idx in range(len(choice_block_pool))] + [[stem[1]]]
# generate childnet
model = gen_childnet(
arch_list,
arch_def,
num_classes=cfg.DATASET.NUM_CLASSES,
drop_rate=cfg.NET.DROPOUT_RATE,
global_pool=cfg.NET.GP)
if args.local_rank == 0:
macs, params = get_model_flops_params(model, input_size=(
1, 3, cfg.DATASET.IMAGE_SIZE, cfg.DATASET.IMAGE_SIZE))
logger.info(
'[Model-{}] Flops: {} Params: {}'.format(cfg.NET.SELECTION, macs, params))
# initialize distributed parameters
torch.cuda.set_device(args.local_rank)
torch.distributed.init_process_group(backend='nccl', init_method='env://')
if args.local_rank == 0:
logger.info(
"Training on Process {} with {} GPUs.".format(
args.local_rank, cfg.NUM_GPU))
# resume model from checkpoint
assert cfg.AUTO_RESUME is True and os.path.exists(cfg.RESUME_PATH)
_, __ = resume_checkpoint(model, cfg.RESUME_PATH)
model = model.cuda()
model_ema = None
if cfg.NET.EMA.USE:
# Important to create EMA model after cuda(), DP wrapper, and AMP but
# before SyncBN and DDP wrapper
model_ema = ModelEma(
model,
decay=cfg.NET.EMA.DECAY,
device='cpu' if cfg.NET.EMA.FORCE_CPU else '',
resume=cfg.RESUME_PATH)
# imagenet validation dataset
eval_dir = os.path.join(cfg.DATA_DIR, 'val')
if not os.path.exists(eval_dir) and args.local_rank == 0:
logger.error(
'Validation folder does not exist at: {}'.format(eval_dir))
exit(1)
dataset_eval = Dataset(eval_dir)
loader_eval = create_loader(
dataset_eval,
input_size=(3, cfg.DATASET.IMAGE_SIZE, cfg.DATASET.IMAGE_SIZE),
batch_size=cfg.DATASET.VAL_BATCH_MUL * cfg.DATASET.BATCH_SIZE,
is_training=False,
num_workers=cfg.WORKERS,
distributed=True,
interpolation='bicubic',
pin_memory=cfg.DATASET.PIN_MEM,
crop_pct=DEFAULT_CROP_PCT,
mean=IMAGENET_DEFAULT_MEAN,
std=IMAGENET_DEFAULT_STD
)
# only test accuracy of model-EMA
validate_loss_fn = nn.CrossEntropyLoss().cuda()
validate(0, model_ema.ema, loader_eval, validate_loss_fn, cfg,
log_suffix='_EMA', logger=logger,
writer=writer, local_rank=args.local_rank)
if __name__ == '__main__':
main()
|
Cream/Cream/tools/test.py/0
|
{
"file_path": "Cream/Cream/tools/test.py",
"repo_id": "Cream",
"token_count": 2845
}
| 299 |
# dataset settings
dataset_type = 'WIDERFaceDataset'
data_root = 'data/WIDERFace/'
img_norm_cfg = dict(mean=[123.675, 116.28, 103.53], std=[1, 1, 1], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile', to_float32=True),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PhotoMetricDistortion',
brightness_delta=32,
contrast_range=(0.5, 1.5),
saturation_range=(0.5, 1.5),
hue_delta=18),
dict(
type='Expand',
mean=img_norm_cfg['mean'],
to_rgb=img_norm_cfg['to_rgb'],
ratio_range=(1, 4)),
dict(
type='MinIoURandomCrop',
min_ious=(0.1, 0.3, 0.5, 0.7, 0.9),
min_crop_size=0.3),
dict(type='Resize', img_scale=(300, 300), keep_ratio=False),
dict(type='Normalize', **img_norm_cfg),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(300, 300),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=False),
dict(type='Normalize', **img_norm_cfg),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
samples_per_gpu=60,
workers_per_gpu=2,
train=dict(
type='RepeatDataset',
times=2,
dataset=dict(
type=dataset_type,
ann_file=data_root + 'train.txt',
img_prefix=data_root + 'WIDER_train/',
min_size=17,
pipeline=train_pipeline)),
val=dict(
type=dataset_type,
ann_file=data_root + 'val.txt',
img_prefix=data_root + 'WIDER_val/',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
ann_file=data_root + 'val.txt',
img_prefix=data_root + 'WIDER_val/',
pipeline=test_pipeline))
|
Cream/EfficientViT/downstream/configs/_base_/datasets/wider_face.py/0
|
{
"file_path": "Cream/EfficientViT/downstream/configs/_base_/datasets/wider_face.py",
"repo_id": "Cream",
"token_count": 1019
}
| 300 |
# Copyright (c) Open-MMLab. All rights reserved.
import os.path as osp
import time
from tempfile import TemporaryDirectory
import torch
from torch.optim import Optimizer
import mmcv
from mmcv.parallel import is_module_wrapper
from mmcv.runner.checkpoint import weights_to_cpu, get_state_dict
try:
import apex
except:
print('apex is not installed')
def save_checkpoint(model, filename, optimizer=None, meta=None):
"""Save checkpoint to file.
The checkpoint will have 4 fields: ``meta``, ``state_dict`` and
``optimizer``, ``amp``. By default ``meta`` will contain version
and time info.
Args:
model (Module): Module whose params are to be saved.
filename (str): Checkpoint filename.
optimizer (:obj:`Optimizer`, optional): Optimizer to be saved.
meta (dict, optional): Metadata to be saved in checkpoint.
"""
if meta is None:
meta = {}
elif not isinstance(meta, dict):
raise TypeError(f'meta must be a dict or None, but got {type(meta)}')
meta.update(mmcv_version=mmcv.__version__, time=time.asctime())
if is_module_wrapper(model):
model = model.module
if hasattr(model, 'CLASSES') and model.CLASSES is not None:
# save class name to the meta
meta.update(CLASSES=model.CLASSES)
checkpoint = {
'meta': meta,
'state_dict': weights_to_cpu(get_state_dict(model))
}
# save optimizer state dict in the checkpoint
if isinstance(optimizer, Optimizer):
checkpoint['optimizer'] = optimizer.state_dict()
elif isinstance(optimizer, dict):
checkpoint['optimizer'] = {}
for name, optim in optimizer.items():
checkpoint['optimizer'][name] = optim.state_dict()
# save amp state dict in the checkpoint
checkpoint['amp'] = apex.amp.state_dict()
if filename.startswith('pavi://'):
try:
from pavi import modelcloud
from pavi.exception import NodeNotFoundError
except ImportError:
raise ImportError(
'Please install pavi to load checkpoint from modelcloud.')
model_path = filename[7:]
root = modelcloud.Folder()
model_dir, model_name = osp.split(model_path)
try:
model = modelcloud.get(model_dir)
except NodeNotFoundError:
model = root.create_training_model(model_dir)
with TemporaryDirectory() as tmp_dir:
checkpoint_file = osp.join(tmp_dir, model_name)
with open(checkpoint_file, 'wb') as f:
torch.save(checkpoint, f)
f.flush()
model.create_file(checkpoint_file, name=model_name)
else:
mmcv.mkdir_or_exist(osp.dirname(filename))
# immediately flush buffer
with open(filename, 'wb') as f:
torch.save(checkpoint, f)
f.flush()
|
Cream/EfficientViT/downstream/mmcv_custom/runner/checkpoint.py/0
|
{
"file_path": "Cream/EfficientViT/downstream/mmcv_custom/runner/checkpoint.py",
"repo_id": "Cream",
"token_count": 1165
}
| 301 |
import torch
from timm.models.registry import register_model
from models import deit_tiny_patch16_224,\
deit_small_patch16_224,\
deit_base_patch16_224,\
deit_base_patch16_384
def get_deit_rpe_config():
from irpe import get_rpe_config as _get_rpe_config
rpe_config = _get_rpe_config(
ratio=1.9,
method="product",
mode='ctx',
shared_head=True,
skip=0,
rpe_on='k',
)
return rpe_config
@register_model
def mini_deit_tiny_patch16_224(pretrained=False, **kwargs):
return deit_tiny_patch16_224(pretrained=pretrained,
rpe_config=get_deit_rpe_config(),
use_cls_token=False,
repeated_times=2,
use_transform=True,
**kwargs)
@register_model
def mini_deit_small_patch16_224(pretrained=False, **kwargs):
return deit_small_patch16_224(pretrained=pretrained,
rpe_config=get_deit_rpe_config(),
use_cls_token=False,
repeated_times=2,
use_transform=True,
**kwargs)
@register_model
def mini_deit_base_patch16_224(pretrained=False, **kwargs):
return deit_base_patch16_224(pretrained=pretrained,
rpe_config=get_deit_rpe_config(),
use_cls_token=False,
repeated_times=2,
use_transform=True,
**kwargs)
@register_model
def mini_deit_base_patch16_384(pretrained=False, **kwargs):
return deit_base_patch16_384(pretrained=pretrained,
rpe_config=get_deit_rpe_config(),
use_cls_token=False,
repeated_times=2,
use_transform=True,
**kwargs)
|
Cream/MiniViT/Mini-DeiT/mini_deit_models.py/0
|
{
"file_path": "Cream/MiniViT/Mini-DeiT/mini_deit_models.py",
"repo_id": "Cream",
"token_count": 1244
}
| 302 |
MODEL:
TYPE: swin_minivit_distill
NAME: swin_base_patch4_window7_224_minivit
DROP_PATH_RATE: 0.2
SWIN:
EMBED_DIM: 128
DEPTHS: [ 2, 2, 18, 2 ]
NUM_HEADS: [ 4, 8, 16, 32 ]
WINDOW_SIZE: 7
MINIVIT:
SEPARATE_LAYERNUM_LIST: [1, 1, 9, 1]
|
Cream/MiniViT/Mini-Swin/configs/swin_base_patch4_window7_224_minivit_sharenum2.yaml/0
|
{
"file_path": "Cream/MiniViT/Mini-Swin/configs/swin_base_patch4_window7_224_minivit_sharenum2.yaml",
"repo_id": "Cream",
"token_count": 140
}
| 303 |
from .build import build_model
|
Cream/MiniViT/Mini-Swin/models/__init__.py/0
|
{
"file_path": "Cream/MiniViT/Mini-Swin/models/__init__.py",
"repo_id": "Cream",
"token_count": 7
}
| 304 |
include src/open_clip/bpe_simple_vocab_16e6.txt.gz
include src/open_clip/model_configs/*.json
|
Cream/TinyCLIP/MANIFEST.in/0
|
{
"file_path": "Cream/TinyCLIP/MANIFEST.in",
"repo_id": "Cream",
"token_count": 38
}
| 305 |
import ast
import json
import logging
import math
import os
import random
import sys
import braceexpand
from dataclasses import dataclass
from multiprocessing import Value
import numpy as np
import pandas as pd
import torch
import torchvision.datasets as datasets
import webdataset as wds
from PIL import Image
from torch.utils.data import Dataset, DataLoader, SubsetRandomSampler, IterableDataset, get_worker_info
from torch.utils.data.distributed import DistributedSampler
from webdataset.filters import _shuffle
from webdataset.tariterators import base_plus_ext, url_opener, tar_file_expander, valid_sample
try:
import horovod.torch as hvd
except ImportError:
hvd = None
try:
from timm.data import TimmDatasetTar
except ImportError:
# for higher version of timm
from timm.data import ImageDataset as TimmDatasetTar
class CsvDataset(Dataset):
def __init__(self, input_filename, transforms, img_key, caption_key, sep="\t", tokenizer=None):
logging.debug(f'Loading csv data from {input_filename}.')
df = pd.read_csv(input_filename, sep=sep)
self.images = df[img_key].tolist()
self.captions = df[caption_key].tolist()
self.transforms = transforms
logging.debug('Done loading data.')
self.tokenize = tokenizer
def __len__(self):
return len(self.captions)
def __getitem__(self, idx):
images = self.transforms(Image.open(str(self.images[idx])))
texts = self.tokenize([str(self.captions[idx])])[0]
return images, texts
class SharedEpoch:
def __init__(self, epoch: int = 0):
self.shared_epoch = Value('i', epoch)
def set_value(self, epoch):
self.shared_epoch.value = epoch
def get_value(self):
return self.shared_epoch.value
@dataclass
class DataInfo:
dataloader: DataLoader
sampler: DistributedSampler = None
shared_epoch: SharedEpoch = None
def set_epoch(self, epoch):
if self.shared_epoch is not None:
self.shared_epoch.set_value(epoch)
if self.sampler is not None and isinstance(self.sampler, DistributedSampler):
self.sampler.set_epoch(epoch)
def expand_urls(urls, weights=None):
if weights is None:
expanded_urls = wds.shardlists.expand_urls(urls)
return expanded_urls, None
if isinstance(urls, str):
urllist = urls.split("::")
weights = weights.split('::')
assert len(weights) == len(urllist), \
f"Expected the number of data components ({len(urllist)}) and weights({len(weights)}) to match."
weights = [float(weight) for weight in weights]
all_urls, all_weights = [], []
for url, weight in zip(urllist, weights):
expanded_url = list(braceexpand.braceexpand(url))
expanded_weights = [weight for _ in expanded_url]
all_urls.extend(expanded_url)
all_weights.extend(expanded_weights)
return all_urls, all_weights
else:
all_urls = list(urls)
return all_urls, weights
def get_dataset_size(shards):
shards_list, _ = expand_urls(shards)
dir_path = os.path.dirname(shards_list[0])
sizes_filename = os.path.join(dir_path, 'sizes.json')
len_filename = os.path.join(dir_path, '__len__')
if os.path.exists(sizes_filename):
sizes = json.load(open(sizes_filename, 'r'))
total_size = sum([int(sizes[os.path.basename(shard)])
for shard in shards_list])
elif os.path.exists(len_filename):
# FIXME this used to be eval(open(...)) but that seemed rather unsafe
total_size = ast.literal_eval(open(len_filename, 'r').read())
else:
total_size = None # num samples undefined
# some common dataset sizes (at time of authors last download)
# CC3M (train): 2905954
# CC12M: 10968539
# LAION-400M: 407332084
# LAION-2B (english): 2170337258
num_shards = len(shards_list)
return total_size, num_shards
def get_imagenet(args, preprocess_fns, split):
assert split in ["train", "val", "v2"]
is_train = split == "train"
preprocess_train, preprocess_val = preprocess_fns
if split == "v2":
from imagenetv2_pytorch import ImageNetV2Dataset
dataset = ImageNetV2Dataset(
location=args.imagenet_v2, transform=preprocess_val)
else:
if is_train:
data_path = args.imagenet_train
preprocess_fn = preprocess_train
else:
data_path = args.imagenet_val
preprocess_fn = preprocess_val
assert data_path
data_dir = os.path.join(data_path, f'val.tar')
if os.path.exists(data_dir):
dataset = TimmDatasetTar(data_dir, transform=preprocess_fn)
else:
val_data_path = os.path.join(data_path, 'val')
if os.path.exists(val_data_path):
data_path = val_data_path
dataset = datasets.ImageFolder(data_path, transform=preprocess_fn)
if is_train:
idxs = np.zeros(len(dataset.targets))
target_array = np.array(dataset.targets)
k = 50
for c in range(1000):
m = target_array == c
n = len(idxs[m])
arr = np.zeros(n)
arr[:k] = 1
np.random.shuffle(arr)
idxs[m] = arr
idxs = idxs.astype('int')
sampler = SubsetRandomSampler(np.where(idxs)[0])
else:
indices = np.arange(args.rank, len(dataset), args.world_size)
sampler = SubsetRandomSampler(indices)
dataloader = torch.utils.data.DataLoader(
dataset,
batch_size=args.batch_size,
num_workers=args.workers,
sampler=sampler,
)
return DataInfo(dataloader=dataloader, sampler=sampler)
def count_samples(dataloader):
os.environ["WDS_EPOCH"] = "0"
n_elements, n_batches = 0, 0
for images, texts in dataloader:
n_batches += 1
n_elements += len(images)
assert len(images) == len(texts)
return n_elements, n_batches
def filter_no_caption_or_no_image(sample):
has_caption = ('txt' in sample)
has_image = (
'png' in sample or 'jpg' in sample or 'jpeg' in sample or 'webp' in sample)
return has_caption and has_image
def log_and_continue(exn):
"""Call in an exception handler to ignore any exception, issue a warning, and continue."""
logging.warning(f'Handling webdataset error ({repr(exn)}). Ignoring.')
return True
def group_by_keys_nothrow(data, keys=base_plus_ext, lcase=True, suffixes=None, handler=None):
"""Return function over iterator that groups key, value pairs into samples.
:param keys: function that splits the key into key and extension (base_plus_ext)
:param lcase: convert suffixes to lower case (Default value = True)
"""
current_sample = None
for filesample in data:
assert isinstance(filesample, dict)
fname, value = filesample["fname"], filesample["data"]
prefix, suffix = keys(fname)
if prefix is None:
continue
if lcase:
suffix = suffix.lower()
# FIXME webdataset version throws if suffix in current_sample, but we have a potential for
# this happening in the current LAION400m dataset if a tar ends with same prefix as the next
# begins, rare, but can happen since prefix aren't unique across tar files in that dataset
if current_sample is None or prefix != current_sample["__key__"] or suffix in current_sample:
if valid_sample(current_sample):
yield current_sample
current_sample = dict(
__key__=prefix, __url__=filesample["__url__"])
if suffixes is None or suffix in suffixes:
current_sample[suffix] = value
if valid_sample(current_sample):
yield current_sample
def tarfile_to_samples_nothrow(src, handler=log_and_continue):
# NOTE this is a re-impl of the webdataset impl with group_by_keys that doesn't throw
streams = url_opener(src, handler=handler)
files = tar_file_expander(streams, handler=handler)
samples = group_by_keys_nothrow(files, handler=handler)
return samples
def pytorch_worker_seed(increment=0):
"""get dataloader worker seed from pytorch"""
worker_info = get_worker_info()
if worker_info is not None:
# favour using the seed already created for pytorch dataloader workers if it exists
seed = worker_info.seed
if increment:
# space out seed increments so they can't overlap across workers in different iterations
seed += increment * max(1, worker_info.num_workers)
return seed
# fallback to wds rank based seed
return wds.utils.pytorch_worker_seed()
_SHARD_SHUFFLE_SIZE = 2000
_SHARD_SHUFFLE_INITIAL = 500
_SAMPLE_SHUFFLE_SIZE = 5000
_SAMPLE_SHUFFLE_INITIAL = 1000
class detshuffle2(wds.PipelineStage):
def __init__(
self,
bufsize=1000,
initial=100,
seed=0,
epoch=-1,
):
self.bufsize = bufsize
self.initial = initial
self.seed = seed
self.epoch = epoch
def run(self, src):
if isinstance(self.epoch, SharedEpoch):
epoch = self.epoch.get_value()
else:
# NOTE: this is epoch tracking is problematic in a multiprocess (dataloader workers or train)
# situation as different workers may wrap at different times (or not at all).
self.epoch += 1
epoch = self.epoch
rng = random.Random()
if self.seed < 0:
# If seed is negative, we use the worker's seed, this will be different across all nodes/workers
seed = pytorch_worker_seed(epoch)
else:
# This seed to be deterministic AND the same across all nodes/workers in each epoch
seed = self.seed + epoch
rng.seed(seed)
return _shuffle(src, self.bufsize, self.initial, rng)
class ResampledShards2(IterableDataset):
"""An iterable dataset yielding a list of urls."""
def __init__(
self,
urls,
weights=None,
nshards=sys.maxsize,
worker_seed=None,
deterministic=False,
epoch=-1,
):
"""Sample shards from the shard list with replacement.
:param urls: a list of URLs as a Python list or brace notation string
"""
super().__init__()
urls, weights = expand_urls(urls, weights)
self.urls = urls
self.weights = weights
if self.weights is not None:
assert len(self.urls) == len(self.weights), \
f"Number of urls {len(self.urls)} and weights {len(self.weights)} should match."
assert isinstance(self.urls[0], str)
self.nshards = nshards
self.rng = random.Random()
self.worker_seed = worker_seed
self.deterministic = deterministic
self.epoch = epoch
def __iter__(self):
"""Return an iterator over the shards."""
if isinstance(self.epoch, SharedEpoch):
epoch = self.epoch.get_value()
else:
# NOTE: this is epoch tracking is problematic in a multiprocess (dataloader workers or train)
# situation as different workers may wrap at different times (or not at all).
self.epoch += 1
epoch = self.epoch
if self.deterministic:
# reset seed w/ epoch if deterministic
if self.worker_seed is None:
# pytorch worker seed should be deterministic due to being init by arg.seed + rank + worker id
seed = pytorch_worker_seed(epoch)
else:
seed = self.worker_seed() + epoch
self.rng.seed(seed)
for _ in range(self.nshards):
if self.weights is None:
yield dict(url=self.rng.choice(self.urls))
else:
yield dict(url=self.rng.choices(self.urls, weights=self.weights, k=1)[0])
def get_wds_dataset(args, preprocess_img, is_train, epoch=0, floor=False, tokenizer=None):
input_shards = args.train_data if is_train else args.val_data
assert input_shards is not None
resampled = getattr(args, 'dataset_resampled', False) and is_train
num_shards = None
if is_train:
if args.train_num_samples is not None:
num_samples = args.train_num_samples
else:
num_samples, num_shards = get_dataset_size(input_shards)
if not num_samples:
raise RuntimeError(
'Currently, the number of dataset samples must be specified for the training dataset. '
'Please specify it via `--train-num-samples` if no dataset length info is present.')
else:
# Eval will just exhaust the iterator if the size is not specified.
num_samples = args.val_num_samples or 0
# create a shared epoch store to sync epoch to dataloader worker proc
shared_epoch = SharedEpoch(epoch=epoch)
if is_train and args.train_data_upsampling_factors is not None:
assert resampled, "--train_data_upsampling_factors is only supported when sampling with replacement (with --dataset-resampled)."
if resampled:
pipeline = [ResampledShards2(
input_shards,
weights=args.train_data_upsampling_factors,
deterministic=True,
epoch=shared_epoch,
)]
else:
pipeline = [wds.SimpleShardList(input_shards)]
# at this point we have an iterator over all the shards
if is_train:
if not resampled:
pipeline.extend([
detshuffle2(
bufsize=_SHARD_SHUFFLE_SIZE,
initial=_SHARD_SHUFFLE_INITIAL,
seed=args.seed,
epoch=shared_epoch,
),
wds.split_by_node,
wds.split_by_worker,
])
pipeline.extend([
# at this point, we have an iterator over the shards assigned to each worker at each node
# wds.tarfile_to_samples(handler=log_and_continue),
tarfile_to_samples_nothrow,
wds.shuffle(
bufsize=_SAMPLE_SHUFFLE_SIZE,
initial=_SAMPLE_SHUFFLE_INITIAL,
),
])
else:
pipeline.extend([
wds.split_by_worker,
# at this point, we have an iterator over the shards assigned to each worker
wds.tarfile_to_samples(handler=log_and_continue),
])
pipeline.extend([
wds.select(filter_no_caption_or_no_image),
wds.decode("pilrgb", handler=log_and_continue),
wds.rename(image="jpg;png;jpeg;webp", text="txt"),
wds.map_dict(image=preprocess_img,
text=lambda text: tokenizer(text)[0]),
wds.to_tuple("image", "text"),
wds.batched(args.batch_size, partial=not is_train)
])
dataset = wds.DataPipeline(*pipeline)
if is_train:
if not resampled:
num_shards = num_shards or len(expand_urls(input_shards)[0])
assert num_shards >= args.workers * \
args.world_size, 'number of shards must be >= total workers'
# roll over and repeat a few samples to get same number of full batches on each node
round_fn = math.floor if floor else math.ceil
global_batch_size = args.batch_size * args.world_size
num_batches = round_fn(num_samples / global_batch_size)
num_workers = max(1, args.workers)
num_worker_batches = round_fn(
num_batches / num_workers) # per dataloader worker
num_batches = num_worker_batches * num_workers
num_samples = num_batches * global_batch_size
# each worker is iterating over this
dataset = dataset.with_epoch(num_worker_batches)
else:
# last batches are partial, eval is done on single (master) node
num_batches = math.ceil(num_samples / args.batch_size)
dataloader = wds.WebLoader(
dataset,
batch_size=None,
shuffle=False,
num_workers=args.workers,
persistent_workers=args.workers > 0,
)
# FIXME not clear which approach is better, with_epoch before vs after dataloader?
# hoping to resolve via https://github.com/webdataset/webdataset/issues/169
# if is_train:
# # roll over and repeat a few samples to get same number of full batches on each node
# global_batch_size = args.batch_size * args.world_size
# num_batches = math.ceil(num_samples / global_batch_size)
# num_workers = max(1, args.workers)
# num_batches = math.ceil(num_batches / num_workers) * num_workers
# num_samples = num_batches * global_batch_size
# dataloader = dataloader.with_epoch(num_batches)
# else:
# # last batches are partial, eval is done on single (master) node
# num_batches = math.ceil(num_samples / args.batch_size)
# add meta-data to dataloader instance for convenience
dataloader.num_batches = num_batches
dataloader.num_samples = num_samples
return DataInfo(dataloader=dataloader, shared_epoch=shared_epoch)
def get_csv_dataset(args, preprocess_fn, is_train, epoch=0, tokenizer=None):
input_filename = args.train_data if is_train else args.val_data
assert input_filename
dataset = CsvDataset(
input_filename,
preprocess_fn,
img_key=args.csv_img_key,
caption_key=args.csv_caption_key,
sep=args.csv_separator,
tokenizer=tokenizer
)
num_samples = len(dataset)
sampler = DistributedSampler(
dataset) if args.distributed and is_train else None
shuffle = is_train and sampler is None
dataloader = DataLoader(
dataset,
batch_size=args.batch_size,
shuffle=shuffle,
num_workers=args.workers,
pin_memory=True,
sampler=sampler,
drop_last=is_train,
)
dataloader.num_samples = num_samples
dataloader.num_batches = len(dataloader)
return DataInfo(dataloader, sampler)
class SyntheticDataset(Dataset):
def __init__(
self,
transform=None,
image_size=(224, 224),
caption="Dummy caption",
dataset_size=100,
tokenizer=None,
):
self.transform = transform
self.image_size = image_size
self.caption = caption
self.image = Image.new('RGB', image_size)
self.dataset_size = dataset_size
self.preprocess_txt = lambda text: tokenizer(text)[0]
def __len__(self):
return self.dataset_size
def __getitem__(self, idx):
if self.transform is not None:
image = self.transform(self.image)
return image, self.preprocess_txt(self.caption)
def get_synthetic_dataset(args, preprocess_fn, is_train, epoch=0, tokenizer=None):
image_size = preprocess_fn.transforms[0].size
dataset = SyntheticDataset(
transform=preprocess_fn, image_size=image_size, dataset_size=args.train_num_samples, tokenizer=tokenizer)
num_samples = len(dataset)
sampler = DistributedSampler(
dataset) if args.distributed and is_train else None
shuffle = is_train and sampler is None
dataloader = DataLoader(
dataset,
batch_size=args.batch_size,
shuffle=shuffle,
num_workers=args.workers,
pin_memory=True,
sampler=sampler,
drop_last=is_train,
)
dataloader.num_samples = num_samples
dataloader.num_batches = len(dataloader)
return DataInfo(dataloader, sampler)
def get_dataset_fn(data_path, dataset_type):
if dataset_type == "webdataset":
return get_wds_dataset
elif dataset_type == "csv":
return get_csv_dataset
elif dataset_type == "synthetic":
return get_synthetic_dataset
elif dataset_type == "auto":
ext = data_path.split('.')[-1]
if ext in ['csv', 'tsv']:
return get_csv_dataset
elif ext in ['tar']:
return get_wds_dataset
else:
raise ValueError(
f"Tried to figure out dataset type, but failed for extension {ext}.")
else:
raise ValueError(f"Unsupported dataset type: {dataset_type}")
def get_data(args, preprocess_fns, epoch=0, tokenizer=None):
preprocess_train, preprocess_val = preprocess_fns
data = {}
if args.train_data or args.dataset_type == "synthetic":
data["train"] = get_dataset_fn(args.train_data, args.dataset_type)(
args, preprocess_train, is_train=True, epoch=epoch, tokenizer=tokenizer)
if args.val_data:
data["val"] = get_dataset_fn(args.val_data, args.dataset_type)(
args, preprocess_val, is_train=False, tokenizer=tokenizer)
if args.imagenet_val is not None:
data["imagenet-val"] = get_imagenet(args, preprocess_fns, "val")
if args.imagenet_v2 is not None:
data["imagenet-v2"] = get_imagenet(args, preprocess_fns, "v2")
return data
|
Cream/TinyCLIP/src/training/data.py/0
|
{
"file_path": "Cream/TinyCLIP/src/training/data.py",
"repo_id": "Cream",
"token_count": 9288
}
| 306 |
MODEL:
TYPE: clip_vit_large14_224
TRAIN:
EPOCHS: 90
DATA:
MEAN_AND_STD_TYPE: clip
DATASET: imagenet22k
AUG:
MIXUP: 0.0
CUTMIX: 0.0
|
Cream/TinyViT/configs/teacher/clip_vit_large_patch14_22k.yaml/0
|
{
"file_path": "Cream/TinyViT/configs/teacher/clip_vit_large_patch14_22k.yaml",
"repo_id": "Cream",
"token_count": 83
}
| 307 |
from .parser_factory import create_parser
|
Cream/TinyViT/data/augmentation/parsers/__init__.py/0
|
{
"file_path": "Cream/TinyViT/data/augmentation/parsers/__init__.py",
"repo_id": "Cream",
"token_count": 11
}
| 308 |
# --------------------------------------------------------
# TinyViT Utils
# Copyright (c) 2022 Microsoft
# --------------------------------------------------------
import torch
import torch.distributed as dist
def get_dist_backend():
if not dist.is_available():
return None
if not dist.is_initialized():
return None
return dist.get_backend()
class AverageMeter:
"""Computes and stores the average and current value"""
def __init__(self):
self._use_gpu = get_dist_backend() == 'nccl'
self.reset()
def reset(self):
# local
self._val = 0
self._sum = 0
self._count = 0
# global
self._history_avg = 0
self._history_count = 0
self._avg = None
def update(self, val, n=1):
self._val = val
self._sum += val * n
self._count += n
self._avg = None
@property
def val(self):
return self._val
@property
def count(self):
return self._count + self._history_count
@property
def avg(self):
if self._avg is None:
# compute avg
r = self._history_count / max(1, self._history_count + self._count)
_avg = self._sum / max(1, self._count)
self._avg = r * self._history_avg + (1.0 - r) * _avg
return self._avg
def sync(self):
buf = torch.tensor([self._sum, self._count],
dtype=torch.float32)
if self._use_gpu:
buf = buf.cuda()
dist.all_reduce(buf, op=dist.ReduceOp.SUM)
_sum, _count = buf.tolist()
_avg = _sum / max(1, _count)
r = self._history_count / max(1, self._history_count + _count)
self._history_avg = r * self._history_avg + (1.0 - r) * _avg
self._history_count += _count
self._sum = 0
self._count = 0
self._avg = None
|
Cream/TinyViT/my_meter.py/0
|
{
"file_path": "Cream/TinyViT/my_meter.py",
"repo_id": "Cream",
"token_count": 866
}
| 309 |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
Train and eval functions used in main.py
"""
import math
import os
import sys
from typing import Iterable
import torch
import util.misc as utils
from datasets.coco_eval import CocoEvaluator
from datasets.panoptic_eval import PanopticEvaluator
def train_one_epoch(model: torch.nn.Module, criterion: torch.nn.Module,
data_loader: Iterable, optimizer: torch.optim.Optimizer,
device: torch.device, epoch: int, max_norm: float = 0):
model.train()
criterion.train()
metric_logger = utils.MetricLogger(delimiter=" ")
metric_logger.add_meter('lr', utils.SmoothedValue(window_size=1, fmt='{value:.6f}'))
metric_logger.add_meter('class_error', utils.SmoothedValue(window_size=1, fmt='{value:.2f}'))
header = 'Epoch: [{}]'.format(epoch)
print_freq = 10
for samples, targets in metric_logger.log_every(data_loader, print_freq, header):
samples = samples.to(device)
targets = [{k: v.to(device) for k, v in t.items()} for t in targets]
outputs = model(samples)
loss_dict = criterion(outputs, targets)
weight_dict = criterion.weight_dict
losses = sum(loss_dict[k] * weight_dict[k] for k in loss_dict.keys() if k in weight_dict)
# reduce losses over all GPUs for logging purposes
loss_dict_reduced = utils.reduce_dict(loss_dict)
loss_dict_reduced_unscaled = {f'{k}_unscaled': v
for k, v in loss_dict_reduced.items()}
loss_dict_reduced_scaled = {k: v * weight_dict[k]
for k, v in loss_dict_reduced.items() if k in weight_dict}
losses_reduced_scaled = sum(loss_dict_reduced_scaled.values())
loss_value = losses_reduced_scaled.item()
if not math.isfinite(loss_value):
print("Loss is {}, stopping training".format(loss_value))
print(loss_dict_reduced)
sys.exit(1)
optimizer.zero_grad()
losses.backward()
if max_norm > 0:
torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm)
optimizer.step()
metric_logger.update(loss=loss_value, **loss_dict_reduced_scaled, **loss_dict_reduced_unscaled)
metric_logger.update(class_error=loss_dict_reduced['class_error'])
metric_logger.update(lr=optimizer.param_groups[0]["lr"])
# gather the stats from all processes
metric_logger.synchronize_between_processes()
print("Averaged stats:", metric_logger)
return {k: meter.global_avg for k, meter in metric_logger.meters.items()}
@torch.no_grad()
def evaluate(model, criterion, postprocessors, data_loader, base_ds, device, output_dir):
model.eval()
criterion.eval()
metric_logger = utils.MetricLogger(delimiter=" ")
metric_logger.add_meter('class_error', utils.SmoothedValue(window_size=1, fmt='{value:.2f}'))
header = 'Test:'
iou_types = tuple(k for k in ('segm', 'bbox') if k in postprocessors.keys())
coco_evaluator = CocoEvaluator(base_ds, iou_types)
# coco_evaluator.coco_eval[iou_types[0]].params.iouThrs = [0, 0.1, 0.5, 0.75]
panoptic_evaluator = None
if 'panoptic' in postprocessors.keys():
panoptic_evaluator = PanopticEvaluator(
data_loader.dataset.ann_file,
data_loader.dataset.ann_folder,
output_dir=os.path.join(output_dir, "panoptic_eval"),
)
for samples, targets in metric_logger.log_every(data_loader, 10, header):
samples = samples.to(device)
targets = [{k: v.to(device) for k, v in t.items()} for t in targets]
outputs = model(samples)
loss_dict = criterion(outputs, targets)
weight_dict = criterion.weight_dict
# reduce losses over all GPUs for logging purposes
loss_dict_reduced = utils.reduce_dict(loss_dict)
loss_dict_reduced_scaled = {k: v * weight_dict[k]
for k, v in loss_dict_reduced.items() if k in weight_dict}
loss_dict_reduced_unscaled = {f'{k}_unscaled': v
for k, v in loss_dict_reduced.items()}
metric_logger.update(loss=sum(loss_dict_reduced_scaled.values()),
**loss_dict_reduced_scaled,
**loss_dict_reduced_unscaled)
metric_logger.update(class_error=loss_dict_reduced['class_error'])
orig_target_sizes = torch.stack([t["orig_size"] for t in targets], dim=0)
results = postprocessors['bbox'](outputs, orig_target_sizes)
if 'segm' in postprocessors.keys():
target_sizes = torch.stack([t["size"] for t in targets], dim=0)
results = postprocessors['segm'](results, outputs, orig_target_sizes, target_sizes)
res = {target['image_id'].item(): output for target, output in zip(targets, results)}
if coco_evaluator is not None:
coco_evaluator.update(res)
if panoptic_evaluator is not None:
res_pano = postprocessors["panoptic"](outputs, target_sizes, orig_target_sizes)
for i, target in enumerate(targets):
image_id = target["image_id"].item()
file_name = f"{image_id:012d}.png"
res_pano[i]["image_id"] = image_id
res_pano[i]["file_name"] = file_name
panoptic_evaluator.update(res_pano)
# gather the stats from all processes
metric_logger.synchronize_between_processes()
print("Averaged stats:", metric_logger)
if coco_evaluator is not None:
coco_evaluator.synchronize_between_processes()
if panoptic_evaluator is not None:
panoptic_evaluator.synchronize_between_processes()
# accumulate predictions from all images
if coco_evaluator is not None:
coco_evaluator.accumulate()
coco_evaluator.summarize()
panoptic_res = None
if panoptic_evaluator is not None:
panoptic_res = panoptic_evaluator.summarize()
stats = {k: meter.global_avg for k, meter in metric_logger.meters.items()}
if coco_evaluator is not None:
if 'bbox' in postprocessors.keys():
stats['coco_eval_bbox'] = coco_evaluator.coco_eval['bbox'].stats.tolist()
if 'segm' in postprocessors.keys():
stats['coco_eval_masks'] = coco_evaluator.coco_eval['segm'].stats.tolist()
if panoptic_res is not None:
stats['PQ_all'] = panoptic_res["All"]
stats['PQ_th'] = panoptic_res["Things"]
stats['PQ_st'] = panoptic_res["Stuff"]
return stats, coco_evaluator
|
Cream/iRPE/DETR-with-iRPE/engine.py/0
|
{
"file_path": "Cream/iRPE/DETR-with-iRPE/engine.py",
"repo_id": "Cream",
"token_count": 2985
}
| 310 |
OUTPUT_DIR: 'OUTPUT/'
WORKERS: 6
PRINT_FREQ: 500
AMP:
ENABLED: true
MODEL:
NAME: cls_cvt
SPEC:
INIT: 'trunc_norm'
NUM_STAGES: 3
PATCH_SIZE: [7, 3, 3]
PATCH_STRIDE: [4, 2, 2]
PATCH_PADDING: [2, 1, 1]
DIM_EMBED: [64, 192, 384]
NUM_HEADS: [1, 3, 6]
DEPTH: [1, 2, 10]
MLP_RATIO: [4.0, 4.0, 4.0]
ATTN_DROP_RATE: [0.0, 0.0, 0.0]
DROP_RATE: [0.0, 0.0, 0.0]
DROP_PATH_RATE: [0.0, 0.0, 0.1]
QKV_BIAS: [True, True, True]
CLS_TOKEN: [False, False, True]
POS_EMBED: [False, False, False]
QKV_PROJ_METHOD: ['dw_bn', 'dw_bn', 'dw_bn']
KERNEL_QKV: [3, 3, 3]
PADDING_KV: [1, 1, 1]
STRIDE_KV: [2, 2, 2]
PADDING_Q: [1, 1, 1]
STRIDE_Q: [1, 1, 1]
AUG:
MIXUP_PROB: 1.0
MIXUP: 0.8
MIXCUT: 1.0
TIMM_AUG:
USE_LOADER: true
RE_COUNT: 1
RE_MODE: pixel
RE_SPLIT: false
RE_PROB: 0.25
AUTO_AUGMENT: rand-m9-mstd0.5-inc1
HFLIP: 0.5
VFLIP: 0.0
COLOR_JITTER: 0.4
INTERPOLATION: bicubic
LOSS:
LABEL_SMOOTHING: 0.1
CUDNN:
BENCHMARK: true
DETERMINISTIC: false
ENABLED: true
DATASET:
DATASET: 'imagenet'
DATA_FORMAT: 'jpg'
ROOT: 'DATASET/imagenet/'
TEST_SET: 'val'
TRAIN_SET: 'train'
TEST:
BATCH_SIZE_PER_GPU: 32
IMAGE_SIZE: [224, 224]
MODEL_FILE: ''
INTERPOLATION: 3
TRAIN:
BATCH_SIZE_PER_GPU: 256
LR: 0.00025
IMAGE_SIZE: [224, 224]
BEGIN_EPOCH: 0
END_EPOCH: 300
LR_SCHEDULER:
METHOD: 'timm'
ARGS:
sched: 'cosine'
warmup_epochs: 5
warmup_lr: 0.000001
min_lr: 0.00001
cooldown_epochs: 10
decay_rate: 0.1
OPTIMIZER: adamW
WD: 0.05
WITHOUT_WD_LIST: ['bn', 'bias', 'ln']
SHUFFLE: true
DEBUG:
DEBUG: false
|
CvT/experiments/imagenet/cvt/cvt-13-224x224.yaml/0
|
{
"file_path": "CvT/experiments/imagenet/cvt/cvt-13-224x224.yaml",
"repo_id": "CvT",
"token_count": 981
}
| 311 |
from .build import build_transforms
|
CvT/lib/dataset/transformas/__init__.py/0
|
{
"file_path": "CvT/lib/dataset/transformas/__init__.py",
"repo_id": "CvT",
"token_count": 9
}
| 312 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import logging
import os
import pprint
import time
import torch
import torch.nn.parallel
import torch.optim
from torch.utils.collect_env import get_pretty_env_info
from tensorboardX import SummaryWriter
import _init_paths
from config import config
from config import update_config
from config import save_config
from core.loss import build_criterion
from core.function import train_one_epoch, test
from dataset import build_dataloader
from models import build_model
from optim import build_optimizer
from scheduler import build_lr_scheduler
from utils.comm import comm
from utils.utils import create_logger
from utils.utils import init_distributed
from utils.utils import setup_cudnn
from utils.utils import summary_model_on_master
from utils.utils import resume_checkpoint
from utils.utils import save_checkpoint_on_master
from utils.utils import save_model_on_master
def parse_args():
parser = argparse.ArgumentParser(
description='Train classification network')
parser.add_argument('--cfg',
help='experiment configure file name',
required=True,
type=str)
# distributed training
parser.add_argument("--local_rank", type=int, default=0)
parser.add_argument("--port", type=int, default=9000)
parser.add_argument('opts',
help="Modify config options using the command-line",
default=None,
nargs=argparse.REMAINDER)
args = parser.parse_args()
return args
def main():
args = parse_args()
init_distributed(args)
setup_cudnn(config)
update_config(config, args)
final_output_dir = create_logger(config, args.cfg, 'train')
tb_log_dir = final_output_dir
if comm.is_main_process():
logging.info("=> collecting env info (might take some time)")
logging.info("\n" + get_pretty_env_info())
logging.info(pprint.pformat(args))
logging.info(config)
logging.info("=> using {} GPUs".format(args.num_gpus))
output_config_path = os.path.join(final_output_dir, 'config.yaml')
logging.info("=> saving config into: {}".format(output_config_path))
save_config(config, output_config_path)
model = build_model(config)
model.to(torch.device('cuda'))
# copy model file
summary_model_on_master(model, config, final_output_dir, True)
if config.AMP.ENABLED and config.AMP.MEMORY_FORMAT == 'nhwc':
logging.info('=> convert memory format to nhwc')
model.to(memory_format=torch.channels_last)
writer_dict = {
'writer': SummaryWriter(logdir=tb_log_dir),
'train_global_steps': 0,
'valid_global_steps': 0,
}
best_perf = 0.0
best_model = True
begin_epoch = config.TRAIN.BEGIN_EPOCH
optimizer = build_optimizer(config, model)
best_perf, begin_epoch = resume_checkpoint(
model, optimizer, config, final_output_dir, True
)
train_loader = build_dataloader(config, True, args.distributed)
valid_loader = build_dataloader(config, False, args.distributed)
if args.distributed:
model = torch.nn.parallel.DistributedDataParallel(
model, device_ids=[args.local_rank],
output_device=args.local_rank,
find_unused_parameters=True
)
criterion = build_criterion(config)
criterion.cuda()
criterion_eval = build_criterion(config, train=False)
criterion_eval.cuda()
lr_scheduler = build_lr_scheduler(config, optimizer, begin_epoch)
scaler = torch.cuda.amp.GradScaler(enabled=config.AMP.ENABLED)
logging.info('=> start training')
for epoch in range(begin_epoch, config.TRAIN.END_EPOCH):
head = 'Epoch[{}]:'.format(epoch)
logging.info('=> {} epoch start'.format(head))
start = time.time()
if args.distributed:
train_loader.sampler.set_epoch(epoch)
# train for one epoch
logging.info('=> {} train start'.format(head))
with torch.autograd.set_detect_anomaly(config.TRAIN.DETECT_ANOMALY):
train_one_epoch(config, train_loader, model, criterion, optimizer,
epoch, final_output_dir, tb_log_dir, writer_dict,
scaler=scaler)
logging.info(
'=> {} train end, duration: {:.2f}s'
.format(head, time.time()-start)
)
# evaluate on validation set
logging.info('=> {} validate start'.format(head))
val_start = time.time()
if epoch >= config.TRAIN.EVAL_BEGIN_EPOCH:
perf = test(
config, valid_loader, model, criterion_eval,
final_output_dir, tb_log_dir, writer_dict,
args.distributed
)
best_model = (perf > best_perf)
best_perf = perf if best_model else best_perf
logging.info(
'=> {} validate end, duration: {:.2f}s'
.format(head, time.time()-val_start)
)
lr_scheduler.step(epoch=epoch+1)
if config.TRAIN.LR_SCHEDULER.METHOD == 'timm':
lr = lr_scheduler.get_epoch_values(epoch+1)[0]
else:
lr = lr_scheduler.get_last_lr()[0]
logging.info(f'=> lr: {lr}')
save_checkpoint_on_master(
model=model,
distributed=args.distributed,
model_name=config.MODEL.NAME,
optimizer=optimizer,
output_dir=final_output_dir,
in_epoch=True,
epoch_or_step=epoch,
best_perf=best_perf,
)
if best_model and comm.is_main_process():
save_model_on_master(
model, args.distributed, final_output_dir, 'model_best.pth'
)
if config.TRAIN.SAVE_ALL_MODELS and comm.is_main_process():
save_model_on_master(
model, args.distributed, final_output_dir, f'model_{epoch}.pth'
)
logging.info(
'=> {} epoch end, duration : {:.2f}s'
.format(head, time.time()-start)
)
save_model_on_master(
model, args.distributed, final_output_dir, 'final_state.pth'
)
if config.SWA.ENABLED and comm.is_main_process():
save_model_on_master(
args.distributed, final_output_dir, 'swa_state.pth'
)
writer_dict['writer'].close()
logging.info('=> finish training')
if __name__ == '__main__':
main()
|
CvT/tools/train.py/0
|
{
"file_path": "CvT/tools/train.py",
"repo_id": "CvT",
"token_count": 2935
}
| 313 |
import numpy as np
cimport numpy as np
import array
import bisect
cpdef float sorted_median(float[:] data, int i, int j):
cdef int n = j - i
cdef int mid
if n == 0:
raise Exception("no median for empty data")
if n % 2 == 1:
return data[i + n // 2]
else:
mid = i + n // 2
return (data[mid - 1] + data[mid])/2
cpdef median_filter(np.ndarray data, int window, bint need_two_end=False):
cdef int w_len = window // 2 * 2 + 1
cdef int t_len = len(data)
cdef float[:] val = array.array('f', [x for x in data])
cdef float[:] ans = array.array('f', [x for x in data])
cdef float[:] cur_windows = array.array('f', [0 for x in range(w_len)])
cdef int delete_id
cdef int add_id
cdef int index
if t_len < w_len:
return ans
for i in range(0, w_len):
index = i
add_id = bisect.bisect_right(cur_windows[:i], val[i])
while index > add_id:
cur_windows[index] = cur_windows[index - 1]
index -= 1
cur_windows[add_id] = data[i]
if i >= w_len // 2 and need_two_end:
ans[i - w_len // 2] = sorted_median(cur_windows, 0, i + 1)
ans[window // 2] = sorted_median(cur_windows, 0, w_len)
for i in range(window // 2 + 1, t_len - window // 2):
delete_id = bisect.bisect_right(cur_windows, val[i - window // 2 - 1]) - 1
index = delete_id
while index < w_len - 1:
cur_windows[index] = cur_windows[index + 1]
index += 1
add_id = bisect.bisect_right(cur_windows[:w_len - 1], val[i + window // 2])
index = w_len - 1
while index > add_id:
cur_windows[index] = cur_windows[index - 1]
index -= 1
cur_windows[add_id] = data[i + window // 2]
ans[i] = sorted_median(cur_windows, 0, w_len)
if need_two_end:
for i in range(t_len - window // 2, t_len):
delete_id = bisect.bisect_right(cur_windows[: w_len], data[i - window // 2 - 1]) - 1
index = delete_id
while index < w_len - 1:
cur_windows[index] = cur_windows[index + 1]
index += 1
w_len -= 1
ans[i] = sorted_median(cur_windows[: w_len], 0, w_len)
return ans
|
anomalydetector/msanomalydetector/_anomaly_kernel_cython.pyx/0
|
{
"file_path": "anomalydetector/msanomalydetector/_anomaly_kernel_cython.pyx",
"repo_id": "anomalydetector",
"token_count": 1096
}
| 314 |
# -*- coding: utf-8 -*-
"""
Version string and parsed tuple. Keeps it all in one place.
"""
__version__ = '1.1'
VERSION = tuple(int(x) for x in __version__.split('.'))
|
anomalydetector/version.py/0
|
{
"file_path": "anomalydetector/version.py",
"repo_id": "anomalydetector",
"token_count": 64
}
| 315 |
# Copyright (c) 2019-2020, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0.
# https://github.com/NVIDIA/DeepLearningExamples/blob/master/PyTorch/LanguageModeling/Transformer-XL/pytorch/utils/distributed.py
import os
from contextlib import contextmanager
from typing import Generator, Optional, Union
import torch
def init_distributed(use_cuda: bool) -> None:
"""Initialize distributed backend for parallel training.
This method sets up the distributed backend for parallel training based on the specified
`use_cuda` flag. If `use_cuda` is `True`, it initializes the distributed mode using the
CUDA/NCCL backend. Otherwise, it uses the Gloo backend.
Args:
use_cuda: Whether to initialize the distributed mode using the CUDA/NCCL backend.
Raises:
AssertionError: If the distributed mode is not initialized successfully.
"""
world_size = int(os.environ.get("WORLD_SIZE", 1))
distributed = world_size > 1
if distributed:
backend = "nccl" if use_cuda else "gloo"
torch.distributed.init_process_group(backend=backend, init_method="env://")
assert torch.distributed.is_initialized()
def barrier() -> None:
"""Synchronize all processes in the distributed backend.
This method calls the `torch.distributed.barrier` function if the distributed mode is
available and initialized. The `barrier` function synchronizes all processes in the
distributed backend by blocking the processes until all processes have reached this point.
"""
if torch.distributed.is_available() and torch.distributed.is_initialized():
torch.distributed.barrier()
def get_rank() -> int:
"""Get the rank of the current process in the distributed backend.
Returns:
The rank of the current process in the distributed backend. If the distributed mode
is not available or not initialized, the returned rank will be `0`.
"""
if torch.distributed.is_available() and torch.distributed.is_initialized():
return torch.distributed.get_rank()
return 0
def get_world_size() -> int:
"""Get the total number of processes in the distributed backend.
Returns:
The total number of processes in the distributed backend. If the distributed mode
is not available or not initialized, the returned world size will be `1`.
"""
if torch.distributed.is_available() and torch.distributed.is_initialized():
return torch.distributed.get_world_size()
return 1
def all_reduce(tensor: Union[int, float, torch.Tensor], op: Optional[str] = "sum") -> Union[int, float]:
"""Reduce the input tensor/value into a scalar using the specified reduction operator.
This method applies the specified reduction operator to the input tensor/value in a distributed
manner. The result is a scalar value that is computed by aggregating the values from all
processes in the distributed backend.
Args:
tensor: Input tensor/value to be reduced.
op: Type of reduction operator. The supported operators are "sum", "mean",
"min", "max", and "product".
Returns:
The scalar value obtained by applying the reduction operator to the input
tensor/value. If the distributed mode is not available or not initialized,
the inputvtensor/value is returned as is.
Raises:
RuntimeError: If the specified reduction operator is not supported.
"""
if torch.distributed.is_available() and torch.distributed.is_initialized():
if op == "sum" or op == "mean":
torch_op = torch.distributed.ReduceOp.SUM
elif op == "min":
torch_op = torch.distributed.ReduceOp.MIN
elif op == "max":
torch_op = torch.distributed.ReduceOp.MAX
elif op == "product":
torch_op = torch.distributed.ReduceOp.PRODUCT
else:
raise RuntimeError(f"Operator: {op} is not supported yet.")
backend = torch.distributed.get_backend()
if backend == torch.distributed.Backend.NCCL:
device = torch.device("cuda")
elif backend == torch.distributed.Backend.GLOO:
device = torch.device("cpu")
else:
raise RuntimeError(f"Distributed backend: {backend} is not supported yet.")
tensor = torch.tensor(tensor, device=device)
torch.distributed.all_reduce(tensor, torch_op)
if op == "mean":
tensor /= get_world_size()
return tensor.item()
return tensor
@contextmanager
def sync_workers() -> Generator[int, None, None]:
"""Context manager for synchronizing the processes in the distributed backend.
This context manager yields the rank of the current process in the distributed backend and
synchronizes all processes on exit.
Yields:
The rank of the current process in the distributed backend.
Example:
>>> with sync_workers():
>>> # Execute some code that should be synchronized across all processes.
>>> pass
"""
rank = get_rank()
yield rank
barrier()
|
archai/archai/common/distributed_utils.py/0
|
{
"file_path": "archai/archai/common/distributed_utils.py",
"repo_id": "archai",
"token_count": 1755
}
| 316 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from typing import Callable, Optional
from overrides import overrides
from torch.utils.data import Dataset
from torchvision.datasets import FGVCAircraft
from torchvision.transforms import ToTensor
from archai.api.dataset_provider import DatasetProvider
class AircraftDatasetProvider(DatasetProvider):
"""FGVC Aircraft dataset provider."""
def __init__(
self,
root: Optional[str] = "dataroot",
) -> None:
"""Initialize FGVC Aircraft dataset provider.
Args:
root: Root directory of dataset where is saved.
"""
super().__init__()
self.root = root
@overrides
def get_train_dataset(
self,
annotation_level: Optional[str] = "variant",
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
) -> Dataset:
return FGVCAircraft(
self.root,
split="train",
annotation_level=annotation_level,
transform=transform or ToTensor(),
target_transform=target_transform,
download=True,
)
@overrides
def get_val_dataset(
self,
annotation_level: Optional[str] = "variant",
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
) -> Dataset:
return FGVCAircraft(
self.root,
split="val",
annotation_level=annotation_level,
transform=transform or ToTensor(),
target_transform=target_transform,
download=True,
)
@overrides
def get_test_dataset(
self,
annotation_level: Optional[str] = "variant",
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
) -> Dataset:
return FGVCAircraft(
self.root,
split="test",
annotation_level=annotation_level,
transform=transform or ToTensor(),
target_transform=target_transform,
download=True,
)
|
archai/archai/datasets/cv/aircraft_dataset_provider.py/0
|
{
"file_path": "archai/archai/datasets/cv/aircraft_dataset_provider.py",
"repo_id": "archai",
"token_count": 943
}
| 317 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import random
from typing import Tuple
import torch
class Brightness:
"""Brightness transform."""
def __init__(self, value: float) -> None:
"""Initialize the brightness transform.
Args:
value: Brightness factor, e.g., 0 = no change, 1 = completely white,
-1 = completely black, <0 = darker, >0 = brighter.
"""
self.value = max(min(value, 1.0), -1.0)
def __call__(self, *imgs: Tuple[torch.Tensor, ...]) -> torch.Tensor:
outputs = []
for idx, img in enumerate(imgs):
img = torch.clamp(img.float().add(self.value).type(img.type()), 0, 1)
outputs.append(img)
return outputs if idx > 1 else outputs[0]
class RandomBrightness:
"""Random brightness transform."""
def __init__(self, min_val: float, max_val: float) -> None:
"""Initialize the random brightness transform.
Args:
min_val: Minimum brightness factor.
max_val: Maximum brightness factor.
"""
self.values = (min_val, max_val)
def __call__(self, *imgs: Tuple[torch.Tensor, ...]) -> torch.Tensor:
value = random.uniform(self.values[0], self.values[1])
outputs = Brightness(value)(*imgs)
return outputs
|
archai/archai/datasets/cv/transforms/brightness.py/0
|
{
"file_path": "archai/archai/datasets/cv/transforms/brightness.py",
"repo_id": "archai",
"token_count": 560
}
| 318 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from abc import abstractmethod
from typing import List, Optional
import torch
from overrides import EnforceOverrides
from archai.common.ordered_dict_logger import OrderedDictLogger
from archai.datasets.nlp.tokenizer_utils.token_config import SpecialTokenEnum
logger = OrderedDictLogger(source=__name__)
class TokenizerBase(EnforceOverrides):
"""Abstract class for tokenizers.
This class serves as a base for training, encoding and decoding. The class enforces
implementation of nine methods: `__len__`, `train`, `is_trained`, `load`, `encode_text`,
`decode_text`, `special_token_id`, `token_to_id` and `id_to_token`.
Note:
This class is inherited from `EnforceOverrides` and any overridden methods in the
subclass should be decorated with `@overrides` to ensure they are properly overridden.
"""
@abstractmethod
def __len__(self) -> int:
"""Get the length of the vocabulary.
Returns:
The length of the vocabulary.
"""
pass
@abstractmethod
def train(self, filepaths: List[str]) -> None:
"""Train the tokenizer on a list of files.
Args:
filepaths: A list of paths to input files.
"""
pass
@abstractmethod
def is_trained(self) -> bool:
"""Check if the vocabulary has been trained.
Returns:
`True` if the vocabulary has been trained, `False` otherwise.
"""
pass
@abstractmethod
def load(self) -> None:
"""Load a pre-trained tokenizer."""
pass
@abstractmethod
def encode_text(self, text: str) -> List[int]:
"""Encode text into tokens.
Args:
text: The input text to encode.
Returns:
The encoded text (tokens).
"""
pass
@abstractmethod
def decode_text(self, ids: List[int]) -> str:
"""Decode tokens into text.
Args:
ids: The tokens to decode.
Returns:
The decoded tokens (text).
"""
pass
@abstractmethod
def special_token_id(self, sp: SpecialTokenEnum) -> int:
"""Get the identifier of a special token.
Args:
sp: The special token's enumerator.
Returns:
The special token's identifier.
"""
pass
@abstractmethod
def token_to_id(self, t: str) -> int:
"""Convert a string-based token to its identifier.
Args:
t: The string-based token.
Returns:
The token's identifier.
"""
pass
@abstractmethod
def id_to_token(self, id: int) -> str:
"""Convert a token identifier to its string-based representation.
Args:
id: The token's identifier.
Returns:
The string-based token.
"""
pass
def tokens_to_ids(self, ts: List[str]) -> List[int]:
"""Convert a list of string-based tokens to their corresponding identifiers.
Args:
ts: A list of string-based tokens.
Returns:
The identifiers corresponding to the input tokens.
"""
return [self.token_to_id(t) for t in ts]
def ids_to_tokens(self, ids: List[int]) -> List[str]:
"""Convert a list of tokens' identifiers to their string-based representations.
Args:
ids: A list of tokens' identifiers.
Returns:
The string-based representations of the input tokens.
"""
return [self.id_to_token(id) for id in ids]
def encode_file(self, path: str, verbose: Optional[bool] = True) -> torch.Tensor:
"""Encode text from an input file.
This method reads text from the specified file and encodes it using
the `encode_text` method. It also includes options for verbosity and
efficiently handling large datasets by converting the encoded tokens
to a `torch.Tensor` every 500k lines.
Args:
path: The path to the input file.
verbose: Whether to add verbosity to the logger.
Returns:
The encoded tokens.
"""
logger.info(f"Encoding file: {path}")
encoded = []
tensor_encoded = torch.LongTensor()
with open(path, "r", encoding="utf-8") as f:
for idx, line in enumerate(f):
# Converts to tensor.Tensor every 500k lines,
# otherwise Python list uses a lot of RAM
if idx > 0 and idx % 500000 == 0:
tensor_encoded = torch.cat((tensor_encoded, torch.LongTensor(encoded)))
encoded = []
if verbose and idx > 0 and idx % 500000 == 0:
logger.debug(f"Completed line: {format(idx)}")
tokens = self.encode_text(line)
encoded.extend(tokens)
if len(encoded) > 0:
tensor_encoded = torch.cat((tensor_encoded, torch.LongTensor(encoded)))
return tensor_encoded
|
archai/archai/datasets/nlp/tokenizer_utils/tokenizer_base.py/0
|
{
"file_path": "archai/archai/datasets/nlp/tokenizer_utils/tokenizer_base.py",
"repo_id": "archai",
"token_count": 2190
}
| 319 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from abc import abstractmethod
from typing import List
import numpy as np
from overrides import EnforceOverrides
from archai.discrete_search.api.archai_model import ArchaiModel
class DiscreteSearchSpace(EnforceOverrides):
"""Abstract class for discrete search spaces.
This class serves as a base for implementing search spaces. The class enforces
implementation of five methods: `save_arch`, `load_arch`, `save_model_weights`,
`load_model_weights` and `random_sample`.
Note:
This class is inherited from `EnforceOverrides` and any overridden methods in the
subclass should be decorated with `@overrides` to ensure they are properly overridden.
Examples:
>>> class MyDiscreteSearchSpace(DiscreteSearchSpace):
>>> def __init__(self) -> None:
>>> super().__init__()
>>>
>>> @overrides
>>> def save_arch(self, arch, file_path) -> None:
>>> torch.save(arch, file_path)
>>>
>>> @overrides
>>> def load_arch(self, file_path) -> ArchaiModel:
>>> return torch.load(file_path)
>>>
>>> @overrides
>>> def save_model_weights(self, model, file_path) -> None:
>>> torch.save(model.state_dict(), file_path)
>>>
>>> @overrides
>>> def load_model_weights(self, model, file_path) -> None:
>>> model.load_state_dict(torch.load(file_path))
>>>
>>> @overrides
>>> def random_sample(self, config) -> ArchaiModel:
>>> return ArchaiModel(config)
"""
@abstractmethod
def save_arch(self, model: ArchaiModel, file_path: str) -> None:
"""Save an architecture to a file without saving the weights.
Args:
model: Model's architecture to save.
file_path: File path to save the architecture.
"""
pass
@abstractmethod
def load_arch(self, file_path: str) -> ArchaiModel:
"""Load from a file an architecture that was saved using `SearchSpace.save_arch()`.
Args:
file_path: File path to load the architecture.
Returns:
Loaded model.
"""
pass
@abstractmethod
def save_model_weights(self, model: ArchaiModel, file_path: str) -> None:
"""Save the weights of a model.
Args:
model: Model to save the weights.
file_path: File path to save the weights.
"""
pass
@abstractmethod
def load_model_weights(self, model: ArchaiModel, file_path: str) -> None:
"""Load the weights (created with `SearchSpace.save_model_weights()`) into a model
of the same architecture.
Args:
model: Model to load the weights.
file_path: File path to load the weights.
"""
pass
@abstractmethod
def random_sample(self) -> ArchaiModel:
"""Randomly sample an architecture from the search spaces.
Returns:
Sampled architecture.
"""
pass
class EvolutionarySearchSpace(DiscreteSearchSpace, EnforceOverrides):
"""Abstract class for discrete search spaces compatible with evolutionary algorithms.
The class enforces implementation of two methods: `mutate` and `crossover`.
Note:
This class is inherited from `EnforceOverrides` and any overridden methods in the
subclass should be decorated with `@overrides` to ensure they are properly overridden.
"""
@abstractmethod
def mutate(self, arch: ArchaiModel) -> ArchaiModel:
"""Mutate an architecture from the search space.
This method should not alter the base model architecture directly,
only generate a new one.
Args:
arch: Base model.
Returns:
Mutated model.
"""
pass
@abstractmethod
def crossover(self, arch_list: List[ArchaiModel]) -> ArchaiModel:
"""Combine a list of architectures into a new one.
Args:
arch_list: List of architectures.
Returns:
Resulting model.
"""
pass
class BayesOptSearchSpace(DiscreteSearchSpace, EnforceOverrides):
"""Abstract class for discrete search spaces compatible with Bayesian Optimization algorithms.
The class enforces implementation of a single method: `encode`.
Note:
This class is inherited from `EnforceOverrides` and any overridden methods in the
subclass should be decorated with `@overrides` to ensure they are properly overridden.
"""
@abstractmethod
def encode(self, arch: ArchaiModel) -> np.ndarray:
"""Encode an architecture into a fixed-length vector representation.
Args:
arch: Model from the search space.
Returns:
Fixed-length vector representation of `arch`.
"""
pass
|
archai/archai/discrete_search/api/search_space.py/0
|
{
"file_path": "archai/archai/discrete_search/api/search_space.py",
"repo_id": "archai",
"token_count": 1989
}
| 320 |
# Copyright (c) DeepSpeed Team - Microsoft Corporation.
# Licensed under the MIT License.
# https://github.com/microsoft/DeepSpeed/blob/master/deepspeed/profiling/flops_profiler/profiler.py
import time
from functools import partial
from typing import List, Optional
import torch
from archai.discrete_search.evaluators.pt_profiler_utils.pt_profiler_hooks import (
FLOPS,
MACS,
disable_functional_hooks,
disable_tensor_hooks,
enable_functional_hooks,
enable_tensor_hooks,
)
class ProfilerModel:
"""Prepare a model to be used with profilling."""
def __init__(self, model: torch.nn.Module) -> None:
"""Initialize with custom arguments and keyword arguments.
Args:
model: Pre-trained model.
"""
self.model = model
self.is_profiling = False
self.is_patched = False
def start(self, ignore_layers: Optional[List[str]] = None) -> None:
"""Start profiling.
Args:
ignore_layers: Layers to be ignored when profiling.
"""
self.reset()
enable_functional_hooks()
enable_tensor_hooks()
def register_hooks(module: torch.nn.Module, ignore_layers: List[str]) -> None:
if ignore_layers and type(module) in ignore_layers:
return
def pre_hook(module: torch.nn.Module, input: torch.Tensor):
FLOPS.append([])
MACS.append([])
if not hasattr(module, "__pre_hook_handle__"):
module.__pre_hook_handle__ = module.register_forward_pre_hook(pre_hook)
def post_hook(module: torch.nn.Module, input: torch.Tensor, output: torch.Tensor):
if FLOPS:
module.__flops__ += sum([elem[1] for elem in FLOPS[-1]])
FLOPS.pop()
module.__macs__ += sum([elem[1] for elem in MACS[-1]])
MACS.pop()
if not hasattr(module, "__post_hook_handle__"):
module.__post_hook_handle__ = module.register_forward_hook(post_hook)
def start_time_hook(module: torch.nn.Module, input: torch.Tensor):
if torch.cuda.is_available():
torch.cuda.synchronize()
module.__start_time__ = time.time()
if not hasattr(module, "__start_time_hook_handle"):
module.__start_time_hook_handle__ = module.register_forward_pre_hook(start_time_hook)
def end_time_hook(module: torch.nn.Module, input: torch.Tensor, output: torch.Tensor):
if torch.cuda.is_available():
torch.cuda.synchronize()
module.__latency__ += time.time() - module.__start_time__
if not hasattr(module, "__end_time_hook_handle__"):
module.__end_time_hook_handle__ = module.register_forward_hook(end_time_hook)
def peak_memory_hook(module: torch.nn.Module, input: torch.Tensor, output: torch.Tensor):
if torch.cuda.is_available():
module.__peak_memory__ = torch.cuda.max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
if not hasattr(module, "__peak_memory_hook_handle__"):
module.__peak_memory_hook_handle__ = module.register_forward_hook(peak_memory_hook)
self.model.apply(partial(register_hooks, ignore_layers=ignore_layers))
self.is_profiling = True
self.is_patched = True
def stop(self) -> None:
"""Stop profiling."""
if self.is_profiling and self.is_patched:
disable_functional_hooks()
disable_tensor_hooks()
self.is_patched = False
def remove_hooks(module: torch.nn.Module) -> None:
if hasattr(module, "__pre_hook_handle__"):
module.__pre_hook_handle__.remove()
del module.__pre_hook_handle__
if hasattr(module, "__post_hook_handle__"):
module.__post_hook_handle__.remove()
del module.__post_hook_handle__
if hasattr(module, "__flops_handle__"):
module.__flops_handle__.remove()
del module.__flops_handle__
if hasattr(module, "__start_time_hook_handle__"):
module.__start_time_hook_handle__.remove()
del module.__start_time_hook_handle__
if hasattr(module, "__end_time_hook_handle__"):
module.__end_time_hook_handle__.remove()
del module.__end_time_hook_handle__
if hasattr(module, "__peak_memory_hook_handle__"):
module.__peak_memory_hook_handle__.remove()
del module.__peak_memory_hook_handle__
self.model.apply(remove_hooks)
def reset(self) -> None:
"""Reset the profiler."""
def reset_attrs(module: torch.nn.Module) -> None:
module.__flops__ = 0
module.__macs__ = 0
module.__params__ = sum(p.numel() for p in module.parameters())
module.__start_time__ = 0
module.__latency__ = 0
module.__peak_memory__ = 0
self.model.apply(reset_attrs)
def end(self) -> None:
"""End the profiler."""
if not self.is_profiling:
return
self.stop()
self.is_profiling = False
def remove_attrs(module: torch.nn.Module) -> None:
if hasattr(module, "__flops__"):
del module.__flops__
if hasattr(module, "__macs__"):
del module.__macs__
if hasattr(module, "__params__"):
del module.__params__
if hasattr(module, "__start_time__"):
del module.__start_time__
if hasattr(module, "__latency__"):
del module.__latency__
if hasattr(module, "__peak_memory__"):
del module.__peak_memory__
self.model.apply(remove_attrs)
def get_flops(self) -> int:
"""Get the model's number of FLOPs.
Returns:
Number of floating point operations.
"""
def _get(module: torch.nn.Module) -> int:
flops = module.__flops__
for child in module.children():
flops += _get(child)
return flops
return _get(self.model)
def get_macs(self) -> int:
"""Get the model's number of MACs.
Returns:
Number of multiply-accumulate operations.
"""
def _get(module: torch.nn.Module) -> int:
macs = module.__macs__
for child in module.children():
macs += _get(child)
return macs
return _get(self.model)
def get_params(self) -> int:
"""Get the model's total number of parameters.
Returns:
Number of parameters.
"""
return self.model.__params__
def get_latency(self) -> float:
"""Get the model's latency.
Returns:
Latency (seconds).
"""
def _get(module: torch.nn.Module) -> int:
latency = module.__latency__
if latency == 0:
for child in module.children():
latency += child.__latency__
return latency
return _get(self.model)
def get_peak_memory(self) -> float:
"""Get the model's peak memory.
Returns:
Peak memory (bytes).
"""
def _get(module: torch.nn.Module) -> int:
peak_memory = [module.__peak_memory__]
for child in module.children():
peak_memory += [_get(child)]
return max(peak_memory)
return _get(self.model)
|
archai/archai/discrete_search/evaluators/pt_profiler_utils/pt_profiler_model.py/0
|
{
"file_path": "archai/archai/discrete_search/evaluators/pt_profiler_utils/pt_profiler_model.py",
"repo_id": "archai",
"token_count": 3755
}
| 321 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import math
from typing import Dict, Optional
import torch
import torch.nn as nn
from flash_attn.modules.mha import MHA
from flash_attn.modules.mlp import FusedMLP
from transformers.modeling_outputs import CausalLMOutput
from transformers.models.codegen.configuration_codegen import CodeGenConfig
from transformers.models.codegen.modeling_codegen import (
CodeGenAttention,
CodeGenMLP,
CodeGenPreTrainedModel,
apply_rotary_pos_emb,
fixed_pos_embedding,
)
from xformers.ops import LowerTriangularMask, memory_efficient_attention
class CodeGenFlashConfig(CodeGenConfig):
model_type = "codegen-flash"
def __init__(
self,
*args,
pad_vocab_size_multiple: Optional[int] = 1,
attn_type: Optional[str] = "default",
use_fused_mlp: Optional[bool] = False,
**kwargs
) -> None:
super().__init__(*args, **kwargs)
self.vocab_size = int(math.ceil(self.vocab_size / pad_vocab_size_multiple) * pad_vocab_size_multiple)
assert attn_type in [
"default",
"flash",
"xformers",
], "`attn_type` should be one of: `default`, `flash` or `xformers`."
self.attn_type = attn_type
self.use_fused_mlp = use_fused_mlp
class CodeGenFlashEmbedding(nn.Module):
def __init__(self, config: CodeGenFlashConfig) -> None:
super().__init__()
self.wte = nn.Embedding(config.vocab_size, config.n_embd)
self.drop = nn.Dropout(config.embd_pdrop)
def forward(self, input_ids: torch.LongTensor) -> torch.FloatTensor:
input_shape = input_ids.size()
input_ids = input_ids.view(-1, input_shape[-1])
hidden_states = self.wte(input_ids)
hidden_states = self.drop(hidden_states)
return hidden_states
class CodeGenXAttention(CodeGenAttention):
def __init__(self, config):
super().__init__(config)
def _merge_heads(self, tensor, num_attention_heads, attn_head_size):
new_shape = tensor.size()[:-2] + (num_attention_heads * attn_head_size,)
return tensor.view(new_shape)
def forward(self, hidden_states: Optional[torch.FloatTensor]) -> torch.Tensor:
qkv = self.qkv_proj(hidden_states)
# TODO(enijkamp): factor out number of logical TPU-v4 cores or make forward pass agnostic
mp_num = 4
qkv_split = qkv.reshape(qkv.shape[:-1] + (mp_num, -1))
local_dim = self.head_dim * self.num_attention_heads // mp_num
query, value, key = torch.split(qkv_split, local_dim, dim=-1)
query = self._split_heads(query, self.num_attention_heads, self.head_dim, mp_num=mp_num)
key = self._split_heads(key, self.num_attention_heads, self.head_dim, mp_num=mp_num)
value = self._split_heads(value, self.num_attention_heads, self.head_dim, mp_num=mp_num)
seq_len = key.shape[1]
offset = 0
if self.rotary_dim is not None:
k_rot = key[:, :, :, : self.rotary_dim]
k_pass = key[:, :, :, self.rotary_dim :]
q_rot = query[:, :, :, : self.rotary_dim]
q_pass = query[:, :, :, self.rotary_dim :]
sincos = fixed_pos_embedding(k_rot, 1, seq_len=seq_len)
k_rot = apply_rotary_pos_emb(k_rot, sincos, offset=offset)
q_rot = apply_rotary_pos_emb(q_rot, sincos, offset=offset)
key = torch.cat([k_rot, k_pass], dim=-1)
query = torch.cat([q_rot, q_pass], dim=-1)
else:
sincos = fixed_pos_embedding(key, 1, seq_len=seq_len)
key = apply_rotary_pos_emb(key, sincos, offset=offset)
query = apply_rotary_pos_emb(query, sincos, offset=offset)
# compute self-attention: V x Softmax(QK^T)
attn_output = memory_efficient_attention(
query.to(torch.float16),
key.to(torch.float16),
value.to(torch.float16),
attn_bias=LowerTriangularMask(),
)
attn_output = self._merge_heads(attn_output, self.num_attention_heads, self.head_dim)
attn_output = self.out_proj(attn_output)
attn_output = self.resid_dropout(attn_output)
return attn_output
class CodeGenFlashBlock(nn.Module):
def __init__(self, config: CodeGenFlashConfig) -> None:
super().__init__()
inner_dim = config.n_inner if config.n_inner is not None else 4 * config.n_embd
rotary_dim = min(config.rotary_dim, config.n_ctx // config.num_attention_heads)
self.attn_type = config.attn_type
self.use_fused_mlp = config.use_fused_mlp
self.resid_pdrop = config.resid_pdrop
self.ln_1 = nn.LayerNorm(config.n_embd, eps=config.layer_norm_epsilon)
if self.attn_type == "default":
self.attn = CodeGenAttention(config)
elif self.attn_type == "flash":
head_dim = config.n_embd // config.n_head
self.attn = MHA(
embed_dim=config.n_embd,
num_heads=config.n_head,
cross_attn=False,
bias=True,
dropout=config.attn_pdrop,
softmax_scale=head_dim ** (-0.5),
causal=True,
rotary_emb_dim=rotary_dim,
fused_bias_fc=True,
use_flash_attn=True,
return_residual=False,
)
elif self.attn_type == "xformers":
self.attn = CodeGenXAttention(config)
if not self.use_fused_mlp:
self.mlp = CodeGenMLP(inner_dim, config)
else:
activation = (
"gelu_approx" if config.activation_function in ["gelu_new", "gelu_fast", "gelu_approx"] else "relu"
)
self.mlp = FusedMLP(in_features=config.n_embd, hidden_features=inner_dim, activation=activation)
def forward(self, hidden_states: torch.FloatTensor) -> torch.FloatTensor:
residual = hidden_states
hidden_states = self.ln_1(hidden_states)
attn_outputs = self.attn(hidden_states)
if isinstance(attn_outputs, tuple):
attn_outputs = attn_outputs[0]
feed_forward_hidden_states = self.mlp(hidden_states)
if self.attn_type == "flash":
attn_outputs = nn.Dropout(self.resid_pdrop)(attn_outputs)
feed_forward_hidden_states = nn.Dropout(self.resid_pdrop)(feed_forward_hidden_states)
hidden_states = attn_outputs + feed_forward_hidden_states + residual
return hidden_states
class CodeGenFlashLMHead(nn.Module):
def __init__(self, config: CodeGenFlashConfig) -> None:
super().__init__()
self.ln_f = nn.LayerNorm(config.n_embd, eps=config.layer_norm_epsilon)
self.lm_head = nn.Linear(config.n_embd, config.vocab_size)
def forward(self, hidden_states: torch.FloatTensor) -> torch.FloatTensor:
hidden_states = self.ln_f(hidden_states)
lm_logits = self.lm_head(hidden_states).to(torch.float32)
return lm_logits
class LMHeadLoss(nn.Module):
def __init__(self, shift_labels: Optional[bool] = False) -> None:
super().__init__()
self.shift_labels = shift_labels
self.loss_fct = nn.CrossEntropyLoss()
def forward(self, lm_logits: torch.FloatTensor, labels: torch.LongTensor) -> torch.FloatTensor:
if self.shift_labels:
lm_logits = lm_logits[..., :-1, :].contiguous()
labels = labels[..., 1:].contiguous()
loss = self.loss_fct(lm_logits.view(-1, lm_logits.size(-1)), labels.view(-1))
return loss
class CodeGenFlashSequential(CodeGenPreTrainedModel):
def __init__(self, config: CodeGenFlashConfig) -> None:
super().__init__(config)
modules = [CodeGenFlashEmbedding(config)]
for _ in range(config.n_layer):
modules.append(CodeGenFlashBlock(config))
modules.append(CodeGenFlashLMHead(config))
self.layers = nn.Sequential(*modules)
self.loss = LMHeadLoss()
self.post_init()
def forward(
self, input_ids: torch.LongTensor, labels: Optional[torch.LongTensor] = None, **kwargs
) -> CausalLMOutput:
lm_logits = self.layers(input_ids)
loss = None
if labels is not None:
loss = self.loss(lm_logits, labels)
return CausalLMOutput(loss=loss, logits=lm_logits)
def prepare_inputs_for_generation(self, input_ids: torch.LongTensor, **kwargs) -> Dict[str, torch.Tensor]:
return {"input_ids": input_ids}
def get_input_embeddings(self) -> torch.Tensor:
return self.layers[0].wte
def set_input_embeddings(self, new_embeddings: torch.Tensor) -> None:
self.layers[0].wte = new_embeddings
def get_output_embeddings(self) -> torch.Tensor:
return self.layers[-1].lm_head
def set_output_embeddings(self, new_embeddings: torch.Tensor) -> None:
self.layers[-1].lm_head = new_embeddings
|
archai/archai/discrete_search/search_spaces/nlp/tfpp/modeling_codegen_flash.py/0
|
{
"file_path": "archai/archai/discrete_search/search_spaces/nlp/tfpp/modeling_codegen_flash.py",
"repo_id": "archai",
"token_count": 4153
}
| 322 |
# TD: [2023-01-05]: Extracted the SSKernelDiag class from
# https://github.com/HazyResearch/state-spaces/blob/06dbbdfd0876501a7f12bf3262121badbc7658af/src/models/sequence/ss/kernel.py
# We make a small change to use the log_vandermonde CUDA code.
"""SSKernelDiag is the S4D kernel, a simpler algorithm for computing the kernel for the case of diagonal state matrices A.
"""
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from einops import rearrange, repeat
from opt_einsum import contract
from .utils import OptimModule
# This could be None if the CUDA import fails
from .ssm_ops.vandermonde import log_vandermonde_fast
try:
import pykeops
from .ssm_ops.vandermonde import log_vandermonde, log_vandermonde_transpose
has_pykeops = True
print("Pykeops installation found.")
except ImportError:
has_pykeops = False
from .ssm_ops.vandermonde import log_vandermonde_naive as log_vandermonde
from .ssm_ops.vandermonde import log_vandermonde_transpose_naive as log_vandermonde_transpose
print(
"Falling back on slow Vandermonde kernel. Install pykeops for improved memory efficiency."
)
_c2r = torch.view_as_real
_r2c = torch.view_as_complex
if tuple(map(int, torch.__version__.split('.')[:2])) >= (1, 10):
_resolve_conj = lambda x: x.conj().resolve_conj()
else:
_resolve_conj = lambda x: x.conj()
class SSKernelDiag(OptimModule):
"""Version using (complex) diagonal state matrix (S4D)"""
def __init__(
self,
A, B, C, log_dt,
L=None,
disc='bilinear',
real_type='exp',
lr=None,
bandlimit=None,
force_real=False,
):
super().__init__()
self.L = L
self.disc = disc
self.bandlimit = bandlimit
self.real_type = real_type
self.force_real = force_real
# Rank of low-rank correction
assert A.size(-1) == C.size(-1)
self.H = log_dt.size(-1)
self.N = A.size(-1)
assert A.size(-2) == B.size(-2) # Number of independent SSMs trained
assert self.H % A.size(-2) == 0
self.n_ssm = A.size(-2)
self.repeat = self.H // A.size(0)
self.channels = C.shape[0]
self.C = nn.Parameter(_c2r(_resolve_conj(C)))
# Register parameters
if lr is None or isinstance(lr, float): lr_dict = {}
else: lr_dict, lr = lr, None
self.register("log_dt", log_dt, lr_dict.get('dt', lr))
self.register("B", _c2r(B), lr_dict.get('B', lr))
self.register("inv_A_real", self._A_init(A.real), lr_dict.get('A', lr))
self.register("A_imag", A.imag, lr_dict.get('A', lr))
def _A_init(self, A_real):
A_real = torch.clamp(A_real, max=-1e-4)
if self.real_type == 'none':
return -A_real
elif self.real_type == 'exp':
return torch.log(-A_real) # Some of the HiPPO methods have real part 0
elif self.real_type == 'relu':
return -A_real
elif self.real_type == 'sigmoid':
return torch.logit(-A_real)
elif self.real_type == 'softplus':
return torch.log(torch.exp(-A_real)-1)
else: raise NotImplementedError
def _A(self):
# Get the internal A (diagonal) parameter
if self.real_type == 'none':
A_real = -self.inv_A_real
elif self.real_type == 'exp':
A_real = -torch.exp(self.inv_A_real)
elif self.real_type == 'relu':
# JAX version seems to NaN if you alloA 0's, although this code Aas fine Aithout it
A_real = -F.relu(self.inv_A_real)-1e-4
elif self.real_type == 'sigmoid':
A_real = -F.sigmoid(self.inv_A_real)
elif self.real_type == 'softplus':
A_real = -F.softplus(self.inv_A_real)
else: raise NotImplementedError
A = A_real + 1j * self.A_imag
return A
def forward(self, L, state=None, rate=1.0, u=None):
"""
state: (B, H, N) initial state
rate: sampling rate factor
L: target length
returns:
(C, H, L) convolution kernel (generally C=1)
(B, H, L) output from initial state
"""
dt = torch.exp(self.log_dt) * rate # (H)
C = _r2c(self.C) # (C H N)
A = self._A() # (H N)
B = _r2c(self.B)
B = repeat(B, 't n -> 1 (v t) n', v=self.repeat)
# Force A to be real valued, so the whole kernel can be interpreted as a "multi-head EMA"
if self.force_real:
A = A.real + 0j
if self.bandlimit is not None:
freqs = dt[:, None] / rate * A.imag.abs() / (2*math.pi) # (H, N)
mask = torch.where(freqs < self.bandlimit * .5, 1, 0)
C = C * mask
# Incorporate dt into A
A = repeat(A, 't n -> (v t) n', v=self.repeat)
dtA = A * dt.unsqueeze(-1) # (H N)
# Augment B with state
if state is not None:
s = state / dt.unsqueeze(-1)
if self.disc == 'bilinear':
s = s * (1. + dtA/2)
elif self.disc == 'zoh':
s = s * dtA * dtA.exp() / (dtA.exp() - 1.)
B = torch.cat([s, B], dim=-3) # (1+B H N)
C = (B[:, None, :, :] * C).view(-1, self.H, self.N)
if self.disc == 'zoh':
# Power up
C = C * (torch.exp(dtA)-1.) / A
# TODO (TD): make it work for C.shape[0] > 1
if log_vandermonde_fast is not None and C.shape[0] == 1:
K = log_vandermonde_fast(C.squeeze(0), dtA, L).unsqueeze(0) # (H L)
else:
K = log_vandermonde(C, dtA, L) # (H L)
elif self.disc == 'bilinear':
C = C * (1. - dtA/2).reciprocal() * dt.unsqueeze(-1) # or * dtA / A
dA = (1. + dtA/2) / (1. - dtA/2)
if log_vandermonde_fast is not None:
dA_log = repeat(dA.log(), 'h d -> (c h) d', c=C.shape[0])
K = rearrange(log_vandermonde_fast(rearrange(C, 'c h d -> (c h) d'), dA_log, L),
'(c h) d -> c h d', c=C.shape[0])
else:
K = log_vandermonde(C, dA.log(), L)
elif self.disc == 'dss':
# Implementation from DSS meant for case when real eigenvalues can be positive
P = dtA.unsqueeze(-1) * torch.arange(L, device=C.device) # [H N L]
A_gt_0 = A.real > 0 # [N]
if A_gt_0.any():
with torch.no_grad():
P_max = dtA * (A_gt_0 * (L-1)) # [H N]
P = P - P_max.unsqueeze(-1) # [H N L]
S = P.exp() # [H N L]
dtA_neg = dtA * (1 - 2*A_gt_0) # [H N]
num = dtA_neg.exp() - 1 # [H N]
den = (dtA_neg * L).exp() - 1 # [H N]
# Inline reciprocal function for DSS logic
x = den * A
x_conj = _resolve_conj(x)
r = x_conj / (x*x_conj + 1e-7)
C = C * num * r # [C H N]
K = contract('chn,hnl->chl', C, S).float()
else: assert False, f"{self.disc} not supported"
K = K.view(-1, self.channels, self.H, L) # (1+B C H L)
if state is not None:
K_state = K[:-1, :, :, :] # (B C H L)
else:
K_state = None
K = K[-1, :, :, :] # (C H L)
return K, K_state
def _setup_step(self):
# These methods are organized like this to be compatible with the NPLR kernel interface
dt = torch.exp(self.log_dt) # (H)
B = _r2c(self.B) # (H N)
C = _r2c(self.C) # (C H N)
self.dC = C
A = self._A() # (H N)
A = repeat(A, 't n -> (v t) n', v=self.repeat)
B = repeat(B, 't n -> (v t) n', v=self.repeat)
# Incorporate dt into A
dtA = A * dt.unsqueeze(-1) # (H N)
if self.disc == 'zoh':
self.dA = torch.exp(dtA) # (H N)
self.dB = B * (torch.exp(dtA)-1.) / A # (C H N)
elif self.disc == 'bilinear':
self.dA = (1. + dtA/2) / (1. - dtA/2)
self.dB = B * (1. - dtA/2).reciprocal() * dt.unsqueeze(-1) # or * dtA / A
def default_state(self, *batch_shape):
C = _r2c(self.C)
state = torch.zeros(*batch_shape, self.H, self.N, dtype=C.dtype, device=C.device)
return state
def step(self, u, state):
next_state = contract("h n, b h n -> b h n", self.dA, state) \
+ contract("h n, b h -> b h n", self.dB, u)
y = contract("c h n, b h n -> b c h", self.dC, next_state)
return 2*y.real, next_state
def forward_state(self, u, state):
self._setup_step()
AL = self.dA ** u.size(-1)
u = u.flip(-1).to(self.dA).contiguous() # (B H L)
v = log_vandermonde_transpose(u, self.dB, self.dA.log(), u.size(-1))
next_state = AL * state + v
return next_state
class EMAKernel(OptimModule):
"""Translation of Mega's MultiHeadEMA.
This is a minimal implementation of the convolution kernel part of the module.
This module, together with the main S4 block in src.models.sequence.ss.s4
(which is really just a fft-conv wrapper around any convolution kernel,
such as this one), should be exactly equivalent to using the original Mega
EMA module in src.models.sequence.ss.ema.
Two additional flags have been provided to resolve discrepencies in parameter
count between S4(D) and EMA
- `dt_tie` makes the shape of the step size \Delta (H, 1) instead of (H, N)
- `efficient_bidirectional` ties the A/B/dt parameters for the conv kernels
in both forwards and backwards directions. This should have exactly the same
speed, slightly more parameter efficiency, and unchanged performance.
"""
def __init__(
self,
H,
N=2,
channels=1,
l_max=None,
dt_tie=False,
efficient_bidirectional=False,
):
super().__init__()
self.H = H
self.N = N
self.channels = channels
self.l_max = l_max
self.scale = math.sqrt(1.0 / self.N)
# Exactly match the parameter count of S4(D) when bididirectional is on
self.efficient_bidirectional = efficient_bidirectional
if self.efficient_bidirectional:
H_C = H * channels
else:
H *= channels
H_C = H
self.delta = nn.Parameter(torch.Tensor(H, 1 if dt_tie else N, 1))
self.alpha = nn.Parameter(torch.Tensor(H, N, 1))
self.beta = nn.Parameter(torch.Tensor(H, N, 1))
self.gamma = nn.Parameter(torch.Tensor(H_C, N))
# self.omega = nn.Parameter(torch.Tensor(H)) # D skip connection handled by outside class
self.reset_parameters()
def reset_parameters(self):
with torch.no_grad():
nn.init.normal_(self.delta, mean=0.0, std=0.2)
nn.init.normal_(self.alpha, mean=0.0, std=0.2)
# Mega comment: beta [1, -1, 1, -1, ...] seems more stable.
val = torch.ones(self.N, 1)
if self.N > 1:
idx = torch.tensor(list(range(1, self.N, 2)))
val.index_fill_(0, idx, -1.0)
self.beta.normal_(mean=0.0, std=0.02).add_(val)
nn.init.normal_(self.gamma, mean=0.0, std=1.0)
# nn.init.normal_(self.omega, mean=0.0, std=1.0)
def coeffs(self): # Same as discretize
p = torch.sigmoid(self.delta) # (H N 1)
alpha = torch.sigmoid(self.alpha)
q = 1.0 - p * alpha
return p, q
def forward(self, L=None, state=None, rate=1.0):
L = L if self.l_max is None else min(self.l_max, L)
p, q = self.coeffs() # (H N 1)
vander = torch.arange(L).to(p).view(1, 1, L) * torch.log(q) # (H N L)
kernel = (p * self.beta) * torch.exp(vander)
if self.efficient_bidirectional:
C = rearrange(self.gamma * self.scale, '(c h) n -> c h n', c=self.channels)
kernel = torch.einsum('dnl,cdn->cdl', kernel, C)
# kernel = rearrange(kernel, 'c d l -> (c d) l')
else:
kernel = torch.einsum('dnl,dn->dl', kernel, self.gamma * self.scale)
kernel = rearrange(kernel, '(c h) l -> c h l', c=self.channels)
kernel = kernel[..., :L]
# kernel = rearrange(kernel, '(c h) l -> c h l', c=self.channels)
return kernel, None # k_state
|
archai/archai/discrete_search/search_spaces/nlp/tfpp/ops/ssm_utils/ss_kernel_diag.py/0
|
{
"file_path": "archai/archai/discrete_search/search_spaces/nlp/tfpp/ops/ssm_utils/ss_kernel_diag.py",
"repo_id": "archai",
"token_count": 6457
}
| 323 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
#
# Copyright (c) 2018, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0.
from typing import Optional
import torch
import torch.nn as nn
class PositionWiseFF(nn.Module):
def __init__(
self,
d_model: int,
d_inner: int,
dropout: float,
pre_lnorm: Optional[bool] = False,
layer_norm_epsilon: Optional[float] = 1e-5,
) -> None:
super().__init__()
self.d_model = d_model
self.d_inner = d_inner
self.dropout = dropout
self.pre_lnorm = pre_lnorm
self.ff = nn.Sequential(
nn.Linear(d_model, d_inner),
nn.ReLU(inplace=True),
nn.Dropout(dropout),
nn.Linear(d_inner, d_model),
nn.Dropout(dropout),
)
self.layer_norm = nn.LayerNorm(d_model, eps=layer_norm_epsilon)
def forward(self, inputs: torch.FloatTensor) -> torch.FloatTensor:
if self.pre_lnorm:
output = self.ff(self.layer_norm(inputs))
output += inputs
else:
output = self.ff(inputs)
output = self.layer_norm(inputs + output)
return output
class PositionWiseFFPrimerEZ(nn.Module):
def __init__(
self,
d_model: int,
d_inner: int,
dropout: float,
pre_lnorm: Optional[bool] = False,
layer_norm_epsilon: Optional[float] = 1e-5,
) -> None:
super().__init__()
# Squared ReLU: https://arxiv.org/abs/2109.08668
self.d_model = d_model
self.d_inner = d_inner
self.dropout = dropout
self.pre_lnorm = pre_lnorm
self.ff1 = nn.Sequential(nn.Linear(d_model, d_inner), nn.ReLU(inplace=True))
self.ff2 = nn.Sequential(nn.Dropout(dropout), nn.Linear(d_inner, d_model), nn.Dropout(dropout))
self.layer_norm = nn.LayerNorm(d_model, eps=layer_norm_epsilon)
def forward(self, inputs: torch.FloatTensor) -> torch.FloatTensor:
if self.pre_lnorm:
output = self.ff2(self.ff1(self.layer_norm(inputs)) ** 2)
output += inputs
else:
output = self.ff2(self.ff1(inputs) ** 2)
output = self.layer_norm(inputs + output)
return output
|
archai/archai/discrete_search/search_spaces/nlp/transformer_flex/models/mem_transformer_utils/position_wise_ff.py/0
|
{
"file_path": "archai/archai/discrete_search/search_spaces/nlp/transformer_flex/models/mem_transformer_utils/position_wise_ff.py",
"repo_id": "archai",
"token_count": 1133
}
| 324 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from typing import Dict, Optional, Tuple
import torch
import torch.nn.functional as F
def gpt2_onnx_forward(
self,
input_ids: torch.LongTensor,
past_key_values: Optional[Tuple[torch.FloatTensor, ...]] = None,
) -> Dict[str, torch.FloatTensor]:
"""Forward pass through the GPT-2 model with ONNX exportability.
This method overrides the default GPT-2 forward method and returns
both output probabilities and past key/values.
Args:
input_ids: Input tensor.
past_key_values: Past pre-computed key/values tensor.
Returns:
Output probabilities and past key/values.
"""
outputs_dict = {}
outputs = self.transformer(input_ids, past_key_values=past_key_values)
last_hidden_state = outputs.last_hidden_state
past_key_values = outputs.past_key_values
logits = F.softmax(self.lm_head(last_hidden_state[:, -1, :]), dim=-1)
outputs_dict["logits"] = logits
if past_key_values:
past_key_values = tuple([torch.stack(p) for p in past_key_values])
outputs_dict["past_key_values"] = past_key_values
return outputs_dict
|
archai/archai/onnx/onnx_forward.py/0
|
{
"file_path": "archai/archai/onnx/onnx_forward.py",
"repo_id": "archai",
"token_count": 437
}
| 325 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from typing import Dict, List
import torch
from overrides import overrides
from torch import nn
from archai.common.common import get_conf
from archai.common.ordered_dict_logger import get_global_logger
from archai.supergraph.algos.divnas.analyse_activations import compute_brute_force_sol
from archai.supergraph.algos.divnas.divnas_cell import Divnas_Cell
from archai.supergraph.algos.divnas.divop import DivOp
from archai.supergraph.datasets.data import get_data
from archai.supergraph.nas.cell import Cell
from archai.supergraph.nas.finalizers import Finalizers
from archai.supergraph.nas.model import Model
from archai.supergraph.nas.model_desc import CellDesc, EdgeDesc, ModelDesc, NodeDesc
logger = get_global_logger()
class DivnasFinalizers(Finalizers):
@overrides
def finalize_model(self, model: Model, to_cpu=True, restore_device=True) -> ModelDesc:
logger.pushd('finalize')
# get config and train data loader
# TODO: confirm this is correct in case you get silent bugs
conf = get_conf()
conf_loader = conf['nas']['search']['loader']
data_loaders = get_data(conf_loader)
assert data_loaders.train_dl is not None
# wrap all cells in the model
self._divnas_cells:Dict[int, Divnas_Cell] = {}
for _, cell in enumerate(model.cells):
divnas_cell = Divnas_Cell(cell)
self._divnas_cells[id(cell)] = divnas_cell
# go through all edges in the DAG and if they are of divop
# type then set them to collect activations
sigma = conf['nas']['search']['divnas']['sigma']
for _, dcell in enumerate(self._divnas_cells.values()):
dcell.collect_activations(DivOp, sigma)
# now we need to run one evaluation epoch to collect activations
# we do it on cpu otherwise we might run into memory issues
# later we can redo the whole logic in pytorch itself
# at the end of this each node in a cell will have the covariance
# matrix of all incoming edges' ops
model = model.cpu()
model.eval()
with torch.no_grad():
for _ in range(1):
for _, (x, _) in enumerate(data_loaders.train_dl):
_, _ = model(x), None
# now you can go through and update the
# node covariances in every cell
for dcell in self._divnas_cells.values():
dcell.update_covs()
logger.popd()
return super().finalize_model(model, to_cpu, restore_device)
@overrides
def finalize_cell(self, cell:Cell, cell_index:int,
model_desc:ModelDesc, *args, **kwargs)->CellDesc:
# first finalize each node, we will need to recreate node desc with final version
max_final_edges = model_desc.max_final_edges
node_descs:List[NodeDesc] = []
dcell = self._divnas_cells[id(cell)]
assert len(cell.dag) == len(list(dcell.node_covs.values()))
for i,node in enumerate(cell.dag):
node_cov = dcell.node_covs[id(node)]
node_desc = self.finalize_node(node, i, cell.desc.nodes()[i],
max_final_edges, node_cov)
node_descs.append(node_desc)
# (optional) clear out all activation collection information
dcell.clear_collect_activations()
desc = cell.desc
finalized = CellDesc(
id = desc.id, cell_type=desc.cell_type, conf_cell=desc.conf_cell,
stems=[cell.s0_op.finalize()[0], cell.s1_op.finalize()[0]],
stem_shapes=desc.stem_shapes,
nodes = node_descs, node_shapes=desc.node_shapes,
post_op=cell.post_op.finalize()[0],
out_shape=desc.out_shape,
trainables_from = desc.trainables_from
)
return finalized
@overrides
def finalize_node(self, node:nn.ModuleList, node_index:int,
node_desc:NodeDesc, max_final_edges:int,
cov, *args, **kwargs)->NodeDesc:
# node is a list of edges
assert len(node) >= max_final_edges
# covariance matrix shape must be square 2-D
assert len(cov.shape) == 2
assert cov.shape[0] == cov.shape[1]
# the number of primitive operators has to be greater
# than equal to the maximum number of final edges
# allowed
assert cov.shape[0] >= max_final_edges
# get total number of ops incoming to this node
num_ops = sum([edge._op.num_valid_div_ops for edge in node])
# and collect some bookkeeping indices
edge_num_and_op_ind = []
for j, edge in enumerate(node):
if type(edge._op) == DivOp:
for k in range(edge._op.num_valid_div_ops):
edge_num_and_op_ind.append((j, k))
assert len(edge_num_and_op_ind) == num_ops
# run brute force set selection algorithm
max_subset, max_mi = compute_brute_force_sol(cov, max_final_edges)
# convert the cov indices to edge descs
selected_edges = []
for ind in max_subset:
edge_ind, op_ind = edge_num_and_op_ind[ind]
op_desc = node[edge_ind]._op.get_valid_op_desc(op_ind)
new_edge = EdgeDesc(op_desc, node[edge_ind].input_ids)
selected_edges.append(new_edge)
# for edge in selected_edges:
# self.finalize_edge(edge)
return NodeDesc(selected_edges, node_desc.conv_params)
|
archai/archai/supergraph/algos/divnas/divnas_finalizers.py/0
|
{
"file_path": "archai/archai/supergraph/algos/divnas/divnas_finalizers.py",
"repo_id": "archai",
"token_count": 2476
}
| 326 |
# NASBench 101 Implementation
##Credits
Code in this directory is from https://github.com/romulus0914/NASBench-PyTorch authored by Romulus Hong.
|
archai/archai/supergraph/algos/nasbench101/README.md/0
|
{
"file_path": "archai/archai/supergraph/algos/nasbench101/README.md",
"repo_id": "archai",
"token_count": 40
}
| 327 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import copy
import glob
import math
import os
import pathlib
from typing import Optional
# only works on linux
import ray
import yaml
from overrides import overrides
from archai.common import common, ml_utils, utils
from archai.common.config import Config
from archai.common.ordered_dict_logger import get_global_logger
from archai.supergraph.algos.petridish.petridish_utils import (
ConvexHullPoint,
ExperimentStage,
JobStage,
plot_pool,
save_hull,
)
from archai.supergraph.nas import nas_utils
from archai.supergraph.nas.evaluater import EvalResult, Evaluater
from archai.supergraph.nas.model_desc import CellType, ModelDesc
from archai.supergraph.nas.model_desc_builder import ModelDescBuilder
logger = get_global_logger()
def filepath_ext(filepath:str)->str:
"""Returns '.f' for '/a/b/c/d.e.f' """
return pathlib.Path(filepath).suffix
def filepath_name_only(filepath:str)->str:
"""Returns 'd.e' for '/a/b/c/d.e.f' """
return pathlib.Path(filepath).stem
def append_to_filename(filepath:str, name_suffix:str, new_ext:Optional[str]=None)->str:
"""Returns '/a/b/c/h.f' for filepath='/a/b/c/d.e.f', new_name='h' """
ext = new_ext or filepath_ext(filepath)
name = filepath_name_only(filepath)
return str(pathlib.Path(filepath).with_name(name+name_suffix).with_suffix(ext))
class EvaluaterPetridish(Evaluater):
@overrides
def evaluate(self, conf_eval:Config, model_desc_builder:ModelDescBuilder)->EvalResult:
"""Takes a folder of model descriptions output by search process and
trains them in a distributed manner using ray with 1 gpu"""
logger.pushd('evaluate')
final_desc_foldername:str = conf_eval['final_desc_foldername']
source_desc_foldername:str = conf_eval.get('source_desc_foldername', final_desc_foldername)
# get list of model descs in the gallery folder
source_desc_folderpath = utils.full_path(source_desc_foldername)
final_desc_folderpath = utils.full_path(final_desc_foldername, True)
files = [os.path.join(final_desc_folderpath, utils.filepath_name_ext(f)) \
for f in glob.glob(os.path.join(source_desc_folderpath, 'model_desc_*.yaml')) \
if os.path.isfile(os.path.join(source_desc_folderpath, f))]
logger.info({'model_desc_files':len(files)})
# to avoid all workers download datasets individually, let's do it before hand
self._ensure_dataset_download(conf_eval)
future_ids = []
for model_desc_filename in files:
future_id = EvaluaterPetridish._train_dist.remote(self, conf_eval, model_desc_builder, model_desc_filename, common.get_state(), source_desc_folderpath)
future_ids.append(future_id)
# wait for all eval jobs to be finished
ready_refs, remaining_refs = ray.wait(future_ids, num_returns=len(future_ids))
# plot pareto curve of gallery of models
hull_points = [ray.get(ready_ref) for ready_ref in ready_refs]
save_hull(hull_points, common.get_expdir())
plot_pool(hull_points, common.get_expdir(), ExperimentStage.EVAL)
best_point = max(hull_points, key=lambda p:p.metrics.best_val_top1())
logger.info({'best_val_top1':best_point.metrics.best_val_top1(),
'best_MAdd': best_point.model_stats.MAdd})
logger.popd()
return EvalResult(best_point.metrics)
@staticmethod
@ray.remote(num_gpus=1)
def _train_dist(evaluater:Evaluater, conf_eval:Config, model_desc_builder:ModelDescBuilder,
model_desc_filename:str, common_state, source_folder:Optional[str])->ConvexHullPoint:
"""Train given a model"""
common.init_from(common_state)
# region config vars
conf_model_desc = conf_eval['model_desc']
max_cells = conf_model_desc['n_cells']
conf_checkpoint = conf_eval['checkpoint']
resume = conf_eval['resume']
conf_petridish = conf_eval['petridish']
cell_count_scale = conf_petridish['cell_count_scale']
#endregion
#register ops as we are in different process now
model_desc_builder.pre_build(conf_model_desc)
model_filename = utils.append_to_filename(model_desc_filename, '_model', '.pt')
full_desc_filename = utils.append_to_filename(model_desc_filename, '_full', '.yaml')
metrics_filename = utils.append_to_filename(model_desc_filename, '_metrics', '.yaml')
model_stats_filename = utils.append_to_filename(model_desc_filename, '_model_stats', '.yaml')
# create checkpoint for this specific model desc by changing the config
checkpoint = None
if conf_checkpoint is not None:
conf_checkpoint['filename'] = utils.append_to_filename(model_filename, '_checkpoint', '.pth')
checkpoint = nas_utils.create_checkpoint(conf_checkpoint, resume)
if checkpoint is not None and resume:
if 'metrics_stats' in checkpoint:
# return the output we had recorded in the checkpoint
convex_hull_point = checkpoint['metrics_stats']
return convex_hull_point
# template model is what we used during the search
if source_folder:
template_model_desc = ModelDesc.load(os.path.join(source_folder, utils.filepath_name_ext(model_desc_filename)))
else:
template_model_desc = ModelDesc.load(model_desc_filename)
# we first scale this model by number of cells, keeping reductions same as in search
n_cells = math.ceil(len(template_model_desc.cell_descs())*cell_count_scale)
n_cells = min(n_cells, max_cells)
conf_model_desc = copy.deepcopy(conf_model_desc)
conf_model_desc['n_cells'] = n_cells
conf_model_desc['n_reductions'] = n_reductions = template_model_desc.cell_type_count(CellType.Reduction)
model_desc = model_desc_builder.build(conf_model_desc,
template=template_model_desc)
# save desc for reference
model_desc.save(full_desc_filename)
model = evaluater.model_from_desc(model_desc)
train_metrics = evaluater.train_model(conf_eval, model, checkpoint)
train_metrics.save(metrics_filename)
# get metrics_stats
model_stats = nas_utils.get_model_stats(model)
# save metrics_stats
with open(model_stats_filename, 'w') as f:
yaml.dump(model_stats, f)
# save model
if model_filename:
model_filename = utils.full_path(model_filename)
ml_utils.save_model(model, model_filename)
# TODO: Causes logging error at random times. Commenting out as stop-gap fix.
# logger.info({'model_save_path': model_filename})
hull_point = ConvexHullPoint(JobStage.EVAL_TRAINED, 0, 0, model_desc,
(n_cells, n_reductions, len(model_desc.cell_descs()[0].nodes())),
metrics=train_metrics, model_stats=model_stats)
if checkpoint:
checkpoint.new()
checkpoint['metrics_stats'] = hull_point
checkpoint.commit()
return hull_point
def _ensure_dataset_download(self, conf_search:Config)->None:
conf_loader = conf_search['loader']
self.get_data(conf_loader)
|
archai/archai/supergraph/algos/petridish/evaluater_petridish.py/0
|
{
"file_path": "archai/archai/supergraph/algos/petridish/evaluater_petridish.py",
"repo_id": "archai",
"token_count": 3092
}
| 328 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
# code in this file is adpated from rpmcruz/autoaugment
# https://github.com/rpmcruz/autoaugment/blob/master/transformations.py
import random
from collections import defaultdict
from typing import List, Union
import numpy as np
import PIL
import PIL.ImageDraw
import PIL.ImageEnhance
import PIL.ImageOps
from archai.common.ordered_dict_logger import get_global_logger
from archai.datasets.cv.transforms.custom_cutout import CustomCutout
from archai.supergraph.datasets.aug_policies import (
fa_reduced_cifar10,
fa_reduced_svhn,
fa_resnet50_rimagenet,
)
logger = get_global_logger()
_random_mirror = True
class Augmentation:
def __init__(self, policies):
self.policies = policies
def __call__(self, img):
for _ in range(1):
policy = random.choice(self.policies)
for name, pr, level in policy:
if random.random() > pr:
continue
img = apply_augment(img, name, level)
return img
def add_named_augs(transform_train, aug:Union[List, str], cutout:int):
# TODO: recheck: total_aug remains None in original fastaug code
total_aug = augs = None
logger.info({'augmentation': aug})
if isinstance(aug, list):
transform_train.transforms.insert(0, Augmentation(aug))
elif aug:
if aug == 'fa_reduced_cifar10':
transform_train.transforms.insert(0, Augmentation(fa_reduced_cifar10()))
elif aug == 'fa_reduced_imagenet':
transform_train.transforms.insert(0, Augmentation(fa_resnet50_rimagenet()))
elif aug == 'fa_reduced_svhn':
transform_train.transforms.insert(0, Augmentation(fa_reduced_svhn()))
elif aug == 'arsaug':
transform_train.transforms.insert(0, Augmentation(arsaug_policy()))
elif aug == 'autoaug_cifar10':
transform_train.transforms.insert(0, Augmentation(autoaug_paper_cifar10()))
elif aug == 'autoaug_extend':
transform_train.transforms.insert(0, Augmentation(autoaug_policy()))
elif aug in ['default', 'inception', 'inception320']:
pass
else:
raise ValueError('Augmentations not found: %s' % aug)
# add cutout transform
# TODO: use PyTorch built-in cutout
logger.info({'cutout': cutout})
if cutout > 0:
transform_train.transforms.append(CustomCutout(cutout))
return total_aug, augs
def ShearX(img, v): # [-0.3, 0.3]
assert -0.3 <= v <= 0.3
if _random_mirror and random.random() > 0.5:
v = -v
return img.transform(img.size, PIL.Image.AFFINE, (1, v, 0, 0, 1, 0))
def ShearY(img, v): # [-0.3, 0.3]
assert -0.3 <= v <= 0.3
if _random_mirror and random.random() > 0.5:
v = -v
return img.transform(img.size, PIL.Image.AFFINE, (1, 0, 0, v, 1, 0))
def TranslateX(img, v): # [-150, 150] => percentage: [-0.45, 0.45]
assert -0.45 <= v <= 0.45
if _random_mirror and random.random() > 0.5:
v = -v
v = v * img.size[0]
return img.transform(img.size, PIL.Image.AFFINE, (1, 0, v, 0, 1, 0))
def TranslateY(img, v): # [-150, 150] => percentage: [-0.45, 0.45]
assert -0.45 <= v <= 0.45
if _random_mirror and random.random() > 0.5:
v = -v
v = v * img.size[1]
return img.transform(img.size, PIL.Image.AFFINE, (1, 0, 0, 0, 1, v))
def TranslateXAbs(img, v): # [-150, 150] => percentage: [-0.45, 0.45]
assert 0 <= v <= 10
if random.random() > 0.5:
v = -v
return img.transform(img.size, PIL.Image.AFFINE, (1, 0, v, 0, 1, 0))
def TranslateYAbs(img, v): # [-150, 150] => percentage: [-0.45, 0.45]
assert 0 <= v <= 10
if random.random() > 0.5:
v = -v
return img.transform(img.size, PIL.Image.AFFINE, (1, 0, 0, 0, 1, v))
def Rotate(img, v): # [-30, 30]
assert -30 <= v <= 30
if _random_mirror and random.random() > 0.5:
v = -v
return img.rotate(v)
def AutoContrast(img, _):
return PIL.ImageOps.autocontrast(img)
def Invert(img, _):
return PIL.ImageOps.invert(img)
def Equalize(img, _):
return PIL.ImageOps.equalize(img)
def Flip(img, _): # not from the paper
return PIL.ImageOps.mirror(img)
def Solarize(img, v): # [0, 256]
assert 0 <= v <= 256
return PIL.ImageOps.solarize(img, v)
def Posterize(img, v): # [4, 8]
assert 4 <= v <= 8
v = int(v)
return PIL.ImageOps.posterize(img, v)
def Posterize2(img, v): # [0, 4]
assert 0 <= v <= 4
v = int(v)
return PIL.ImageOps.posterize(img, v)
def Contrast(img, v): # [0.1,1.9]
assert 0.1 <= v <= 1.9
return PIL.ImageEnhance.Contrast(img).enhance(v)
def Color(img, v): # [0.1,1.9]
assert 0.1 <= v <= 1.9
return PIL.ImageEnhance.Color(img).enhance(v)
def Brightness(img, v): # [0.1,1.9]
assert 0.1 <= v <= 1.9
return PIL.ImageEnhance.Brightness(img).enhance(v)
def Sharpness(img, v): # [0.1,1.9]
assert 0.1 <= v <= 1.9
return PIL.ImageEnhance.Sharpness(img).enhance(v)
def Cutout(img, v): # [0, 60] => percentage: [0, 0.2]
assert 0.0 <= v <= 0.2
if v <= 0.:
return img
v = v * img.size[0]
return CutoutAbs(img, v)
def CutoutAbs(img, v): # [0, 60] => percentage: [0, 0.2]
# assert 0 <= v <= 20
if v < 0:
return img
w, h = img.size
x0 = np.random.uniform(w)
y0 = np.random.uniform(h)
x0 = int(max(0, x0 - v / 2.))
y0 = int(max(0, y0 - v / 2.))
x1 = min(w, x0 + v)
y1 = min(h, y0 + v)
xy = (x0, y0, x1, y1)
color = (125, 123, 114)
# color = (0, 0, 0)
img = img.copy()
PIL.ImageDraw.Draw(img).rectangle(xy, color)
return img
def SamplePairing(imgs): # [0, 0.4]
def f(img1, v):
i = np.random.choice(len(imgs))
img2 = PIL.Image.fromarray(imgs[i])
return PIL.Image.blend(img1, img2, v)
return f
def augment_list(for_autoaug=True): # 16 oeprations and their ranges
l = [
(ShearX, -0.3, 0.3), # 0
(ShearY, -0.3, 0.3), # 1
(TranslateX, -0.45, 0.45), # 2
(TranslateY, -0.45, 0.45), # 3
(Rotate, -30, 30), # 4
(AutoContrast, 0, 1), # 5
(Invert, 0, 1), # 6
(Equalize, 0, 1), # 7
(Solarize, 0, 256), # 8
(Posterize, 4, 8), # 9
(Contrast, 0.1, 1.9), # 10
(Color, 0.1, 1.9), # 11
(Brightness, 0.1, 1.9), # 12
(Sharpness, 0.1, 1.9), # 13
(Cutout, 0, 0.2), # 14
# (SamplePairing(imgs), 0, 0.4), # 15
]
if for_autoaug:
l += [
(CutoutAbs, 0, 20), # compatible with auto-augment
(Posterize2, 0, 4), # 9
(TranslateXAbs, 0, 10), # 9
(TranslateYAbs, 0, 10), # 9
]
return l
_augment_dict = {fn.__name__: (fn, v1, v2) for fn, v1, v2 in augment_list()}
def get_augment(name):
global _augment_dict
return _augment_dict[name]
def apply_augment(img, name, level):
augment_fn, low, high = get_augment(name)
return augment_fn(img.copy(), level * (high - low) + low)
def arsaug_policy():
exp0_0 = [
[('Solarize', 0.66, 0.34), ('Equalize', 0.56, 0.61)],
[('Equalize', 0.43, 0.06), ('AutoContrast', 0.66, 0.08)],
[('Color', 0.72, 0.47), ('Contrast', 0.88, 0.86)],
[('Brightness', 0.84, 0.71), ('Color', 0.31, 0.74)],
[('Rotate', 0.68, 0.26), ('TranslateX', 0.38, 0.88)]]
exp0_1 = [
[('TranslateY', 0.88, 0.96), ('TranslateY', 0.53, 0.79)],
[('AutoContrast', 0.44, 0.36), ('Solarize', 0.22, 0.48)],
[('AutoContrast', 0.93, 0.32), ('Solarize', 0.85, 0.26)],
[('Solarize', 0.55, 0.38), ('Equalize', 0.43, 0.48)],
[('TranslateY', 0.72, 0.93), ('AutoContrast', 0.83, 0.95)]]
exp0_2 = [
[('Solarize', 0.43, 0.58), ('AutoContrast', 0.82, 0.26)],
[('TranslateY', 0.71, 0.79), ('AutoContrast', 0.81, 0.94)],
[('AutoContrast', 0.92, 0.18), ('TranslateY', 0.77, 0.85)],
[('Equalize', 0.71, 0.69), ('Color', 0.23, 0.33)],
[('Sharpness', 0.36, 0.98), ('Brightness', 0.72, 0.78)]]
exp0_3 = [
[('Equalize', 0.74, 0.49), ('TranslateY', 0.86, 0.91)],
[('TranslateY', 0.82, 0.91), ('TranslateY', 0.96, 0.79)],
[('AutoContrast', 0.53, 0.37), ('Solarize', 0.39, 0.47)],
[('TranslateY', 0.22, 0.78), ('Color', 0.91, 0.65)],
[('Brightness', 0.82, 0.46), ('Color', 0.23, 0.91)]]
exp0_4 = [
[('Cutout', 0.27, 0.45), ('Equalize', 0.37, 0.21)],
[('Color', 0.43, 0.23), ('Brightness', 0.65, 0.71)],
[('ShearX', 0.49, 0.31), ('AutoContrast', 0.92, 0.28)],
[('Equalize', 0.62, 0.59), ('Equalize', 0.38, 0.91)],
[('Solarize', 0.57, 0.31), ('Equalize', 0.61, 0.51)]]
exp0_5 = [
[('TranslateY', 0.29, 0.35), ('Sharpness', 0.31, 0.64)],
[('Color', 0.73, 0.77), ('TranslateX', 0.65, 0.76)],
[('ShearY', 0.29, 0.74), ('Posterize', 0.42, 0.58)],
[('Color', 0.92, 0.79), ('Equalize', 0.68, 0.54)],
[('Sharpness', 0.87, 0.91), ('Sharpness', 0.93, 0.41)]]
exp0_6 = [
[('Solarize', 0.39, 0.35), ('Color', 0.31, 0.44)],
[('Color', 0.33, 0.77), ('Color', 0.25, 0.46)],
[('ShearY', 0.29, 0.74), ('Posterize', 0.42, 0.58)],
[('AutoContrast', 0.32, 0.79), ('Cutout', 0.68, 0.34)],
[('AutoContrast', 0.67, 0.91), ('AutoContrast', 0.73, 0.83)]]
return exp0_0 + exp0_1 + exp0_2 + exp0_3 + exp0_4 + exp0_5 + exp0_6
def autoaug2arsaug(f):
def autoaug():
mapper = defaultdict(lambda: lambda x: x)
mapper.update({
'ShearX': lambda x: float_parameter(x, 0.3),
'ShearY': lambda x: float_parameter(x, 0.3),
'TranslateX': lambda x: int_parameter(x, 10),
'TranslateY': lambda x: int_parameter(x, 10),
'Rotate': lambda x: int_parameter(x, 30),
'Solarize': lambda x: 256 - int_parameter(x, 256),
'Posterize2': lambda x: 4 - int_parameter(x, 4),
'Contrast': lambda x: float_parameter(x, 1.8) + .1,
'Color': lambda x: float_parameter(x, 1.8) + .1,
'Brightness': lambda x: float_parameter(x, 1.8) + .1,
'Sharpness': lambda x: float_parameter(x, 1.8) + .1,
'CutoutAbs': lambda x: int_parameter(x, 20)
})
def low_high(name, prev_value):
_, low, high = get_augment(name)
return float(prev_value - low) / (high - low)
policies = f()
new_policies = []
for policy in policies:
new_policies.append([(name, pr, low_high(name, mapper[name](level))) for name, pr, level in policy])
return new_policies
return autoaug
@autoaug2arsaug
def autoaug_paper_cifar10():
return [
[('Invert', 0.1, 7), ('Contrast', 0.2, 6)],
[('Rotate', 0.7, 2), ('TranslateXAbs', 0.3, 9)],
[('Sharpness', 0.8, 1), ('Sharpness', 0.9, 3)],
[('ShearY', 0.5, 8), ('TranslateYAbs', 0.7, 9)],
[('AutoContrast', 0.5, 8), ('Equalize', 0.9, 2)],
[('ShearY', 0.2, 7), ('Posterize2', 0.3, 7)],
[('Color', 0.4, 3), ('Brightness', 0.6, 7)],
[('Sharpness', 0.3, 9), ('Brightness', 0.7, 9)],
[('Equalize', 0.6, 5), ('Equalize', 0.5, 1)],
[('Contrast', 0.6, 7), ('Sharpness', 0.6, 5)],
[('Color', 0.7, 7), ('TranslateXAbs', 0.5, 8)],
[('Equalize', 0.3, 7), ('AutoContrast', 0.4, 8)],
[('TranslateYAbs', 0.4, 3), ('Sharpness', 0.2, 6)],
[('Brightness', 0.9, 6), ('Color', 0.2, 6)],
[('Solarize', 0.5, 2), ('Invert', 0.0, 3)],
[('Equalize', 0.2, 0), ('AutoContrast', 0.6, 0)],
[('Equalize', 0.2, 8), ('Equalize', 0.6, 4)],
[('Color', 0.9, 9), ('Equalize', 0.6, 6)],
[('AutoContrast', 0.8, 4), ('Solarize', 0.2, 8)],
[('Brightness', 0.1, 3), ('Color', 0.7, 0)],
[('Solarize', 0.4, 5), ('AutoContrast', 0.9, 3)],
[('TranslateYAbs', 0.9, 9), ('TranslateYAbs', 0.7, 9)],
[('AutoContrast', 0.9, 2), ('Solarize', 0.8, 3)],
[('Equalize', 0.8, 8), ('Invert', 0.1, 3)],
[('TranslateYAbs', 0.7, 9), ('AutoContrast', 0.9, 1)],
]
@autoaug2arsaug
def autoaug_policy():
"""AutoAugment policies found on Cifar."""
exp0_0 = [
[('Invert', 0.1, 7), ('Contrast', 0.2, 6)],
[('Rotate', 0.7, 2), ('TranslateXAbs', 0.3, 9)],
[('Sharpness', 0.8, 1), ('Sharpness', 0.9, 3)],
[('ShearY', 0.5, 8), ('TranslateYAbs', 0.7, 9)],
[('AutoContrast', 0.5, 8), ('Equalize', 0.9, 2)]]
exp0_1 = [
[('Solarize', 0.4, 5), ('AutoContrast', 0.9, 3)],
[('TranslateYAbs', 0.9, 9), ('TranslateYAbs', 0.7, 9)],
[('AutoContrast', 0.9, 2), ('Solarize', 0.8, 3)],
[('Equalize', 0.8, 8), ('Invert', 0.1, 3)],
[('TranslateYAbs', 0.7, 9), ('AutoContrast', 0.9, 1)]]
exp0_2 = [
[('Solarize', 0.4, 5), ('AutoContrast', 0.0, 2)],
[('TranslateYAbs', 0.7, 9), ('TranslateYAbs', 0.7, 9)],
[('AutoContrast', 0.9, 0), ('Solarize', 0.4, 3)],
[('Equalize', 0.7, 5), ('Invert', 0.1, 3)],
[('TranslateYAbs', 0.7, 9), ('TranslateYAbs', 0.7, 9)]]
exp0_3 = [
[('Solarize', 0.4, 5), ('AutoContrast', 0.9, 1)],
[('TranslateYAbs', 0.8, 9), ('TranslateYAbs', 0.9, 9)],
[('AutoContrast', 0.8, 0), ('TranslateYAbs', 0.7, 9)],
[('TranslateYAbs', 0.2, 7), ('Color', 0.9, 6)],
[('Equalize', 0.7, 6), ('Color', 0.4, 9)]]
exp1_0 = [
[('ShearY', 0.2, 7), ('Posterize2', 0.3, 7)],
[('Color', 0.4, 3), ('Brightness', 0.6, 7)],
[('Sharpness', 0.3, 9), ('Brightness', 0.7, 9)],
[('Equalize', 0.6, 5), ('Equalize', 0.5, 1)],
[('Contrast', 0.6, 7), ('Sharpness', 0.6, 5)]]
exp1_1 = [
[('Brightness', 0.3, 7), ('AutoContrast', 0.5, 8)],
[('AutoContrast', 0.9, 4), ('AutoContrast', 0.5, 6)],
[('Solarize', 0.3, 5), ('Equalize', 0.6, 5)],
[('TranslateYAbs', 0.2, 4), ('Sharpness', 0.3, 3)],
[('Brightness', 0.0, 8), ('Color', 0.8, 8)]]
exp1_2 = [
[('Solarize', 0.2, 6), ('Color', 0.8, 6)],
[('Solarize', 0.2, 6), ('AutoContrast', 0.8, 1)],
[('Solarize', 0.4, 1), ('Equalize', 0.6, 5)],
[('Brightness', 0.0, 0), ('Solarize', 0.5, 2)],
[('AutoContrast', 0.9, 5), ('Brightness', 0.5, 3)]]
exp1_3 = [
[('Contrast', 0.7, 5), ('Brightness', 0.0, 2)],
[('Solarize', 0.2, 8), ('Solarize', 0.1, 5)],
[('Contrast', 0.5, 1), ('TranslateYAbs', 0.2, 9)],
[('AutoContrast', 0.6, 5), ('TranslateYAbs', 0.0, 9)],
[('AutoContrast', 0.9, 4), ('Equalize', 0.8, 4)]]
exp1_4 = [
[('Brightness', 0.0, 7), ('Equalize', 0.4, 7)],
[('Solarize', 0.2, 5), ('Equalize', 0.7, 5)],
[('Equalize', 0.6, 8), ('Color', 0.6, 2)],
[('Color', 0.3, 7), ('Color', 0.2, 4)],
[('AutoContrast', 0.5, 2), ('Solarize', 0.7, 2)]]
exp1_5 = [
[('AutoContrast', 0.2, 0), ('Equalize', 0.1, 0)],
[('ShearY', 0.6, 5), ('Equalize', 0.6, 5)],
[('Brightness', 0.9, 3), ('AutoContrast', 0.4, 1)],
[('Equalize', 0.8, 8), ('Equalize', 0.7, 7)],
[('Equalize', 0.7, 7), ('Solarize', 0.5, 0)]]
exp1_6 = [
[('Equalize', 0.8, 4), ('TranslateYAbs', 0.8, 9)],
[('TranslateYAbs', 0.8, 9), ('TranslateYAbs', 0.6, 9)],
[('TranslateYAbs', 0.9, 0), ('TranslateYAbs', 0.5, 9)],
[('AutoContrast', 0.5, 3), ('Solarize', 0.3, 4)],
[('Solarize', 0.5, 3), ('Equalize', 0.4, 4)]]
exp2_0 = [
[('Color', 0.7, 7), ('TranslateXAbs', 0.5, 8)],
[('Equalize', 0.3, 7), ('AutoContrast', 0.4, 8)],
[('TranslateYAbs', 0.4, 3), ('Sharpness', 0.2, 6)],
[('Brightness', 0.9, 6), ('Color', 0.2, 8)],
[('Solarize', 0.5, 2), ('Invert', 0.0, 3)]]
exp2_1 = [
[('AutoContrast', 0.1, 5), ('Brightness', 0.0, 0)],
[('CutoutAbs', 0.2, 4), ('Equalize', 0.1, 1)],
[('Equalize', 0.7, 7), ('AutoContrast', 0.6, 4)],
[('Color', 0.1, 8), ('ShearY', 0.2, 3)],
[('ShearY', 0.4, 2), ('Rotate', 0.7, 0)]]
exp2_2 = [
[('ShearY', 0.1, 3), ('AutoContrast', 0.9, 5)],
[('TranslateYAbs', 0.3, 6), ('CutoutAbs', 0.3, 3)],
[('Equalize', 0.5, 0), ('Solarize', 0.6, 6)],
[('AutoContrast', 0.3, 5), ('Rotate', 0.2, 7)],
[('Equalize', 0.8, 2), ('Invert', 0.4, 0)]]
exp2_3 = [
[('Equalize', 0.9, 5), ('Color', 0.7, 0)],
[('Equalize', 0.1, 1), ('ShearY', 0.1, 3)],
[('AutoContrast', 0.7, 3), ('Equalize', 0.7, 0)],
[('Brightness', 0.5, 1), ('Contrast', 0.1, 7)],
[('Contrast', 0.1, 4), ('Solarize', 0.6, 5)]]
exp2_4 = [
[('Solarize', 0.2, 3), ('ShearX', 0.0, 0)],
[('TranslateXAbs', 0.3, 0), ('TranslateXAbs', 0.6, 0)],
[('Equalize', 0.5, 9), ('TranslateYAbs', 0.6, 7)],
[('ShearX', 0.1, 0), ('Sharpness', 0.5, 1)],
[('Equalize', 0.8, 6), ('Invert', 0.3, 6)]]
exp2_5 = [
[('AutoContrast', 0.3, 9), ('CutoutAbs', 0.5, 3)],
[('ShearX', 0.4, 4), ('AutoContrast', 0.9, 2)],
[('ShearX', 0.0, 3), ('Posterize2', 0.0, 3)],
[('Solarize', 0.4, 3), ('Color', 0.2, 4)],
[('Equalize', 0.1, 4), ('Equalize', 0.7, 6)]]
exp2_6 = [
[('Equalize', 0.3, 8), ('AutoContrast', 0.4, 3)],
[('Solarize', 0.6, 4), ('AutoContrast', 0.7, 6)],
[('AutoContrast', 0.2, 9), ('Brightness', 0.4, 8)],
[('Equalize', 0.1, 0), ('Equalize', 0.0, 6)],
[('Equalize', 0.8, 4), ('Equalize', 0.0, 4)]]
exp2_7 = [
[('Equalize', 0.5, 5), ('AutoContrast', 0.1, 2)],
[('Solarize', 0.5, 5), ('AutoContrast', 0.9, 5)],
[('AutoContrast', 0.6, 1), ('AutoContrast', 0.7, 8)],
[('Equalize', 0.2, 0), ('AutoContrast', 0.1, 2)],
[('Equalize', 0.6, 9), ('Equalize', 0.4, 4)]]
exp0s = exp0_0 + exp0_1 + exp0_2 + exp0_3
exp1s = exp1_0 + exp1_1 + exp1_2 + exp1_3 + exp1_4 + exp1_5 + exp1_6
exp2s = exp2_0 + exp2_1 + exp2_2 + exp2_3 + exp2_4 + exp2_5 + exp2_6 + exp2_7
return exp0s + exp1s + exp2s
_PARAMETER_MAX = 10
def float_parameter(level, maxval):
return float(level) * maxval / _PARAMETER_MAX
def int_parameter(level, maxval):
return int(float_parameter(level, maxval))
def no_duplicates(f):
def wrap_remove_duplicates():
policies = f()
return remove_deplicates(policies)
return wrap_remove_duplicates
def remove_deplicates(policies):
s = set()
new_policies = []
for ops in policies:
key = []
for op in ops:
key.append(op[0])
key = '_'.join(key)
if key in s:
continue
else:
s.add(key)
new_policies.append(ops)
return new_policies
def policy_decoder(augment, num_policy, num_op):
op_list = augment_list(False)
policies = []
for i in range(num_policy):
ops = []
for j in range(num_op):
op_idx = augment['policy_%d_%d' % (i, j)]
op_prob = augment['prob_%d_%d' % (i, j)]
op_level = augment['level_%d_%d' % (i, j)]
ops.append((op_list[op_idx][0].__name__, op_prob, op_level))
policies.append(ops)
return policies
|
archai/archai/supergraph/datasets/augmentation.py/0
|
{
"file_path": "archai/archai/supergraph/datasets/augmentation.py",
"repo_id": "archai",
"token_count": 10051
}
| 329 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import torchvision
from overrides import overrides
from torchvision.transforms import transforms
from archai.common import utils
from archai.common.config import Config
from archai.supergraph.datasets.dataset_provider import (
DatasetProvider,
ImgSize,
TrainTestDatasets,
register_dataset_provider,
)
class MnistProvider(DatasetProvider):
def __init__(self, conf_dataset:Config):
super().__init__(conf_dataset)
self._dataroot = utils.full_path(conf_dataset['dataroot'])
@overrides
def get_datasets(self, load_train:bool, load_test:bool,
transform_train, transform_test)->TrainTestDatasets:
trainset, testset = None, None
if load_train:
trainset = torchvision.datasets.MNIST(root=self._dataroot, train=True,
download=True, transform=transform_train)
if load_test:
testset = torchvision.datasets.MNIST(root=self._dataroot, train=False,
download=True, transform=transform_test)
return trainset, testset
@overrides
def get_transforms(self, img_size:ImgSize)->tuple:
MEAN = [0.13066051707548254]
STD = [0.30810780244715075]
transf = [
transforms.RandomAffine(degrees=15, translate=(0.1, 0.1),
scale=(0.9, 1.1), shear=0.1)
]
normalize = [
transforms.ToTensor(),
transforms.Normalize(MEAN, STD)
]
train_transform = transforms.Compose(transf + normalize)
test_transform = transforms.Compose(normalize)
return train_transform, test_transform
register_dataset_provider('mnist', MnistProvider)
|
archai/archai/supergraph/datasets/providers/mnist_provider.py/0
|
{
"file_path": "archai/archai/supergraph/datasets/providers/mnist_provider.py",
"repo_id": "archai",
"token_count": 753
}
| 330 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
"""
Note: All classes in this file needs to be deepcopy compatible because
descs are used as template to create copies by macro builder.
"""
import copy
import os
import pathlib
from enum import Enum
from typing import List, Mapping, Optional, Union
import torch
import yaml
from archai.common import utils
from archai.common.config import Config
from archai.common.ordered_dict_logger import get_global_logger
logger = get_global_logger()
# Each tensor shape is list
# A layer can output multiple tensors so its shapes are TensorShapes
# list of all layer outputs is TensorShapesList]
TensorShape=List[Union[int, float]]
TensorShapes=List[TensorShape]
TensorShapesList=List[TensorShapes]
class ConvMacroParams:
"""Holds parameters that may be altered by macro architecture"""
def __init__(self, ch_in:int, ch_out:int) -> None:
self.ch_in, self.ch_out = ch_in, ch_out
def clone(self)->'ConvMacroParams':
return copy.deepcopy(self)
class OpDesc:
"""Op description that is in each edge"""
def __init__(self, name:str, params:dict, in_len:int,
trainables:Optional[Mapping],
children:Optional[List['OpDesc']]=None,
children_ins:Optional[List[int]]=None)->None:
self.name = name
self.in_len = in_len
self.params = params # parameters specific to op needed to construct it
self.trainables = trainables # TODO: make this private due to clear_trainable
# If op is keeping any child op then it should save it in children.
# This way we can control state_dict of children.
self.children = children
self.children_ins = children_ins
def clone(self, clone_trainables=True)->'OpDesc':
cloned = copy.deepcopy(self)
if not clone_trainables:
cloned.clear_trainables()
return cloned
def clear_trainables(self)->None:
self.trainables = None
if self.children is not None:
for child in self.children:
child.clear_trainables()
def state_dict(self)->dict:
return {
'trainables': self.trainables,
'children': [child.state_dict() if child is not None else None
for child in self.children] \
if self.children is not None else None
}
def load_state_dict(self, state_dict)->None:
self.trainables = state_dict['trainables']
c, cs = self.children, state_dict['children']
assert (c is None and cs is None) or \
(c is not None and cs is not None and len(c) == len(cs))
# TODO: when c and cs are both none, zip throws an error that the
# first argument should be iterable
if (c is None and cs is None):
return
for cx, csx in utils.zip_eq(c, cs):
if cx is not None and csx is not None:
cx.load_state_dict(csx)
class EdgeDesc:
"""Edge description between two nodes in the cell
"""
def __init__(self, op_desc:OpDesc, input_ids:List[int])->None:
assert op_desc.in_len == len(input_ids)
self.op_desc = op_desc
self.input_ids = input_ids
def clone(self, conv_params:Optional[ConvMacroParams], clear_trainables:bool)\
->'EdgeDesc':
# edge cloning is same as deep copy except that we do it through
# constructor for future proofing any additional future rules and
# that we allow overiding conv_params and clearing weights
e = EdgeDesc(self.op_desc.clone(), self.input_ids)
# op_desc should have params set from cloning. If no override supplied
# then don't change it
if conv_params is not None:
e.op_desc.params['conv'] = conv_params
if clear_trainables:
e.op_desc.clear_trainables()
return e
def clear_trainables(self)->None:
self.op_desc.clear_trainables()
def state_dict(self)->dict:
return {'op_desc': self.op_desc.state_dict()}
def load_state_dict(self, state_dict)->None:
self.op_desc.load_state_dict(state_dict['op_desc'])
class NodeDesc:
def __init__(self, edges:List[EdgeDesc], conv_params:ConvMacroParams) -> None:
self.edges = edges
self.conv_params = conv_params
def clone(self):
# don't override conv_params or reset learned weights
# node cloning is currently equivalent to deep copy
return NodeDesc(edges=[e.clone(conv_params=None, clear_trainables=False)
for e in self.edges], conv_params=self.conv_params)
def clear_trainables(self)->None:
for edge in self.edges:
edge.clear_trainables()
def state_dict(self)->dict:
return { 'edges': [e.state_dict() for e in self.edges] }
def load_state_dict(self, state_dict)->None:
for e, es in zip(self.edges, state_dict['edges']):
e.load_state_dict(es)
class AuxTowerDesc:
def __init__(self, ch_in:int, n_classes:int, stride:int) -> None:
self.ch_in = ch_in
self.n_classes = n_classes
self.stride = stride
class CellType(Enum):
Regular = 'regular'
Reduction = 'reduction'
class CellDesc:
def __init__(self, id:int, cell_type:CellType, conf_cell:Config,
stems:List[OpDesc], stem_shapes:TensorShapes,
nodes:List[NodeDesc], node_shapes: TensorShapes,
post_op:OpDesc, out_shape:TensorShape, trainables_from:int)->None:
self.cell_type = cell_type
self.id = id
self.conf_cell = conf_cell
self.stems = stems
self.stem_shapes = stem_shapes
self.out_shape = out_shape
self.trainables_from = trainables_from
self.reset_nodes(nodes, node_shapes, post_op, out_shape)
def clone(self, id:int)->'CellDesc':
c = copy.deepcopy(self) # note that trainables_from is also cloned
c.id = id
return c
def clear_trainables(self)->None:
for stem in self.stems:
stem.clear_trainables()
for node in self._nodes:
node.clear_trainables()
self.post_op.clear_trainables()
def state_dict(self)->dict:
return {
'id': self.id,
'cell_type': self.cell_type,
'stems': [s.state_dict() for s in self.stems],
'stem_shapes': self.stem_shapes,
'nodes': [n.state_dict() for n in self.nodes()],
'node_shapes': self.node_shapes,
'post_op': self.post_op.state_dict(),
'out_shape': self.out_shape
}
def load_state_dict(self, state_dict)->None:
assert self.id == state_dict['id']
assert self.cell_type == state_dict['cell_type']
for s, ss in utils.zip_eq(self.stems, state_dict['stems']):
s.load_state_dict(ss)
self.stem_shapes = state_dict['stem_shapes']
for n, ns in utils.zip_eq(self.nodes(), state_dict['nodes']):
n.load_state_dict(ns)
self.node_shapes = state_dict['node_shapes']
self.post_op.load_state_dict(state_dict['post_op'])
self.out_shape = state_dict['out_shape']
def reset_nodes(self, nodes:List[NodeDesc], node_shapes:TensorShapes,
post_op:OpDesc, out_shape:TensorShape)->None:
self._nodes = nodes
self.node_shapes = node_shapes
self.post_op = post_op
self.out_shape = out_shape
def nodes(self)->List[NodeDesc]:
return self._nodes
def all_empty(self)->bool:
return len(self._nodes)==0 or all((len(n.edges)==0 for n in self._nodes))
def all_full(self)->bool:
return len(self._nodes)>0 and all((len(n.edges)>0 for n in self._nodes))
class ModelDesc:
def __init__(self, conf_model_desc:Config, model_stems:List[OpDesc], pool_op:OpDesc,
cell_descs:List[CellDesc], aux_tower_descs:List[Optional[AuxTowerDesc]],
logits_op:OpDesc)->None:
self.conf_model_desc = conf_model_desc
conf_dataset = conf_model_desc['dataset']
self.ds_ch:int = conf_dataset['channels']
self.n_classes:int = conf_dataset['n_classes']
self.params = conf_model_desc['params'].to_dict()
self.max_final_edges:int = conf_model_desc['max_final_edges']
self.model_stems, self.pool_op = model_stems, pool_op
self.logits_op = logits_op
self.reset_cells(cell_descs, aux_tower_descs)
def reset_cells(self, cell_descs:List[CellDesc],
aux_tower_descs:List[Optional[AuxTowerDesc]])->None:
assert len(cell_descs) == len(aux_tower_descs)
# every cell should have unique ID so we can tell where arch params are shared
assert len(set(c.id for c in cell_descs)) == len(cell_descs)
self._cell_descs = cell_descs
self.aux_tower_descs = aux_tower_descs
def clear_trainables(self)->None:
for stem in self.model_stems:
stem.clear_trainables()
for attr in ['pool_op', 'logits_op']:
op_desc:OpDesc = getattr(self, attr)
op_desc.clear_trainables()
for cell_desc in self._cell_descs:
cell_desc.clear_trainables()
def cell_descs(self)->List[CellDesc]:
return self._cell_descs
def cell_type_count(self, cell_type:CellType)->int:
return sum(1 for c in self._cell_descs if c.cell_type==cell_type)
def clone(self)->'ModelDesc':
return copy.deepcopy(self)
def has_aux_tower(self)->bool:
return any(self.aux_tower_descs)
def all_empty(self)->bool:
return len(self._cell_descs)==0 or \
all((c.all_empty() for c in self._cell_descs))
def all_full(self)->bool:
return len(self._cell_descs)>0 and \
all((c.all_full() for c in self._cell_descs))
def state_dict(self)->dict:
return {
'cell_descs': [c.state_dict() for c in self.cell_descs()],
'model_stems': [stem.state_dict() for stem in self.model_stems],
'pool_op': self.pool_op.state_dict(),
'logits_op': self.logits_op.state_dict()
}
def load_state_dict(self, state_dict)->None:
for c, cs in utils.zip_eq(self.cell_descs(), state_dict['cell_descs']):
c.load_state_dict(cs)
for stem, state in utils.zip_eq(self.model_stems, state_dict['model_stems']):
stem.load_state_dict(state)
self.pool_op.load_state_dict(state_dict['pool_op'])
self.logits_op.load_state_dict(state_dict['logits_op'])
def save(self, filename:str, save_trainables=False)->Optional[str]:
if filename:
filename = utils.full_path(filename)
if save_trainables:
state_dict = self.state_dict()
pt_filepath = ModelDesc._pt_filepath(filename)
torch.save(state_dict, pt_filepath)
# save yaml
cloned = self.clone()
cloned.clear_trainables()
utils.write_string(filename, yaml.dump(cloned))
return filename
@staticmethod
def _pt_filepath(desc_filepath:str)->str:
# change file extension
return str(pathlib.Path(desc_filepath).with_suffix('.pth'))
@staticmethod
def load(filename:str, load_trainables=False)->'ModelDesc':
filename = utils.full_path(filename)
if not filename or not os.path.exists(filename):
raise RuntimeError("Model description file is not found."
"Typically this file should be generated from the search."
"Please copy this file to '{}'".format(filename))
logger.info({'final_desc_filename': filename})
with open(filename, 'r') as f:
model_desc = yaml.load(f, Loader=yaml.Loader)
if load_trainables:
# look for pth file that should have pytorch parameters state_dict
pt_filepath = ModelDesc._pt_filepath(filename)
if os.path.exists(pt_filepath):
state_dict = torch.load(pt_filepath, map_location=torch.device('cpu'))
model_desc.load_state_dict(state_dict)
# else no need to restore weights
return model_desc
|
archai/archai/supergraph/nas/model_desc.py/0
|
{
"file_path": "archai/archai/supergraph/nas/model_desc.py",
"repo_id": "archai",
"token_count": 5656
}
| 331 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from typing import Callable, Optional, Tuple
import torch
from overrides import EnforceOverrides
from torch import Tensor, nn
from torch.optim.lr_scheduler import _LRScheduler
from torch.optim.optimizer import Optimizer
from torch.utils.data import DataLoader
from archai.common import ml_utils
from archai.common.apex_utils import ApexUtils
from archai.common.config import Config
from archai.common.ordered_dict_logger import get_global_logger
from archai.supergraph.datasets import data
from archai.supergraph.utils.checkpoint import CheckPoint
from archai.supergraph.utils.metrics import Metrics
from archai.supergraph.utils.multi_optim import MultiOptim, OptimSched
from archai.supergraph.utils.tester import Tester
logger = get_global_logger()
class Trainer(EnforceOverrides):
def __init__(self, conf_train:Config, model:nn.Module,
checkpoint:Optional[CheckPoint]=None)->None:
# region config vars
self.conf_train = conf_train
conf_lossfn = conf_train['lossfn']
self._aux_weight = conf_train['aux_weight']
self._grad_clip = conf_train['grad_clip']
self._drop_path_prob = conf_train['drop_path_prob']
self._logger_freq = conf_train['logger_freq']
self._title = conf_train['title']
self._epochs = conf_train['epochs']
self.conf_optim = conf_train['optimizer']
self.conf_sched = conf_train['lr_schedule']
self.batch_chunks = conf_train['batch_chunks']
conf_validation = conf_train['validation']
conf_apex = conf_train['apex']
self._validation_freq = 0 if conf_validation is None else conf_validation['freq']
# endregion
logger.pushd(self._title + '__init__')
self._apex = ApexUtils(conf_apex)
self._checkpoint = checkpoint
self.model = model
self._lossfn = ml_utils.get_lossfn(conf_lossfn)
# using separate apex for Tester is not possible because we must use
# same distributed model as Trainer and hence they must share apex
self._tester = Tester(conf_validation, model, self._apex) \
if conf_validation else None
self._metrics:Optional[Metrics] = None
self._droppath_module = self._get_droppath_module()
if self._droppath_module is None and self._drop_path_prob > 0.0:
logger.warn({'droppath_module': None})
self._start_epoch = -1 # nothing is started yet
logger.popd()
def fit(self, data_loaders:data.DataLoaders)->Metrics:
logger.pushd(self._title)
assert data_loaders.train_dl is not None
self._metrics = Metrics(self._title, self._apex, logger_freq=self._logger_freq)
# create optimizers and schedulers
self._multi_optim = self.create_multi_optim(len(data_loaders.train_dl))
# before checkpoint restore, convert to amp
self.model = self._apex.to_amp(self.model, self._multi_optim,
batch_size=data_loaders.train_dl.batch_size) # pyright: ignore[reportGeneralTypeIssues]
self._lossfn = self._lossfn.to(self.get_device())
self.pre_fit(data_loaders)
# we need to restore checkpoint after all objects are created because
# restoring checkpoint requires load_state_dict calls on these objects
self._start_epoch = 0
# do we have a checkpoint
checkpoint_avail = self._checkpoint is not None
checkpoint_val = checkpoint_avail and 'trainer' in self._checkpoint # pyright: ignore[reportGeneralTypeIssues]
resumed = False
if checkpoint_val:
# restore checkpoint
resumed = True
self.restore_checkpoint()
elif checkpoint_avail: # TODO: bad checkpoint?
self._checkpoint.clear()
logger.warn({'resumed': resumed, 'checkpoint_avail': checkpoint_avail,
'checkpoint_val': checkpoint_val,
'start_epoch': self._start_epoch,
'total_epochs': self._epochs})
logger.info({'aux_weight': self._aux_weight,
'grad_clip': self._grad_clip,
'drop_path_prob': self._drop_path_prob,
'validation_freq': self._validation_freq,
'batch_chunks': self.batch_chunks})
if self._start_epoch >= self._epochs:
logger.warn(f'fit done because start_epoch {self._start_epoch}>={self._epochs}')
return self.get_metrics() # we already finished the run, we might be checkpointed
logger.pushd('epochs')
for epoch in range(self._start_epoch, self._epochs):
logger.pushd(epoch)
self._set_epoch(epoch, data_loaders)
self.pre_epoch(data_loaders)
self._train_epoch(data_loaders.train_dl)
self.post_epoch(data_loaders)
logger.popd()
logger.popd()
self.post_fit(data_loaders)
# make sure we don't keep references to the graph
del self._multi_optim
logger.popd()
return self.get_metrics()
def create_multi_optim(self, train_len:int)->MultiOptim:
logger.info({'steps_per_epoch': train_len,
'conf_sched': self.conf_sched.to_dict()})
logger.info({'conf_optim': self.conf_optim.to_dict()})
# optimizers, schedulers needs to be recreated for each fit call
# as they have state specific to each run
optim = self.create_optimizer(self.conf_optim, self.model.parameters())
# create scheduler for optim before applying amp
sched, sched_on_epoch = self.create_scheduler(self.conf_sched, optim, train_len)
multi_optim = MultiOptim()
multi_optim.append(OptimSched(optim, sched, sched_on_epoch))
logger.info({'multi_optim_len': len(multi_optim)})
return multi_optim
def create_optimizer(self, conf_optim:Config, params)->Optimizer:
optim = ml_utils.create_optimizer(conf_optim, params)
return optim
def create_scheduler(self, conf_sched:Config, optim:Optimizer, steps_per_epoch:int) \
->Tuple[Optional[_LRScheduler],bool]:
return ml_utils.create_lr_scheduler(conf_sched, self._epochs,
optim, steps_per_epoch)
def get_optimizer(self, index=0)->Optimizer:
return self._multi_optim[index].optim
def get_scheduler(self, index=0)->Optional[_LRScheduler]:
return self._multi_optim[index].sched
def get_metrics(self)->Metrics:
return self._metrics # pyright: ignore[reportGeneralTypeIssues]
def _set_epoch(self, epoch:int, data_loaders:data.DataLoaders)->None:
# optimizers such as bi-level may use val set for its own use
# which causes reshuffling due to automatic epoch counting
# here we make sure that val_dl has same epoch as train_dl
if hasattr(data_loaders.train_dl.sampler, 'set_epoch'):
data_loaders.train_dl.sampler.set_epoch(epoch) # pyright: ignore[reportGeneralTypeIssues,reportOptionalMemberAccess]
if data_loaders.val_dl is not None and hasattr(data_loaders.val_dl.sampler, 'set_epoch'):
data_loaders.val_dl.sampler.set_epoch(epoch) # pyright: ignore[reportGeneralTypeIssues]
# apply droppath
self._set_drop_path(epoch, self._epochs)
assert self._metrics.epochs() == epoch
######################### hooks #########################
def pre_fit(self, data_loaders:data.DataLoaders)->None:
self._metrics.pre_run()
def post_fit(self, data_loaders:data.DataLoaders)->None:
test_metrics = None
# first run test before checkpointing, otherwise we won't have val metrics
if data_loaders.test_dl and self._tester:
test_metrics = self._tester.test(data_loaders.test_dl)
self._metrics.post_run(test_metrics=test_metrics)
def pre_epoch(self, data_loaders:data.DataLoaders)->None:
self._metrics.pre_epoch(lr=self._multi_optim.get_lr(0, 0))
def post_epoch(self, data_loaders:data.DataLoaders)->None:
val_metrics = None
# first run test before checkpointing, otherwise we won't have val metrics
if data_loaders.val_dl and self._tester and self._validation_freq > 0:
if self._metrics.epochs() % self._validation_freq == 0 or \
self._metrics.epochs() >= self._epochs: # last epoch
# these asserts makes sure train and val are not ovrlapiing
# assert train_dl.sampler.epoch == val_dl.sampler.epoch
# tidx = list(train_dl.sampler)
# vidx = list(val_dl.sampler)
# assert all(ti not in vidx for ti in tidx)
val_metrics = self._tester.test(data_loaders.val_dl)
# update val metrics
self._metrics.post_epoch(lr=self._multi_optim.get_lr(0, 0), val_metrics=val_metrics)
# checkpoint if enabled with given freq or if this is the last epoch
if self._checkpoint is not None and self._apex.is_master() and \
self._checkpoint.freq > 0 and (self._metrics.epochs() % self._checkpoint.freq == 0 or \
self._metrics.epochs() >= self._epochs):
self._checkpoint.new()
self.update_checkpoint(self._checkpoint)
self._checkpoint.commit()
def pre_step(self, x:Tensor, y:Tensor)->None:
self._metrics.pre_step(x, y)
def post_step(self, x:Tensor, y:Tensor, logits:Tensor, loss:Tensor,
steps:int)->None:
self._metrics.post_step(x, y, logits, loss, steps)
######################### hooks #########################
def get_device(self):
return self._apex.device
def restore_checkpoint(self)->None:
state = self._checkpoint['trainer']
last_epoch = state['last_epoch']
assert last_epoch >= 0 and last_epoch < self._epochs
self._metrics.load_state_dict(state['metrics'])
assert self._metrics.epochs() == last_epoch+1
self._apex.load_state_dict(state['amp'])
self.model.load_state_dict(state['model'])
self._multi_optim.load_state_dict(state['multi_optim'])
self._start_epoch = last_epoch + 1
def epoch(self)->int:
return self._metrics.epochs()
def update_checkpoint(self, checkpoint:CheckPoint)->None:
# TODO: Don't need to pass checkpoint
# save all necessory state
state = {
'last_epoch': self._metrics.epochs()-1,
'metrics': self._metrics.state_dict(),
'model': self.model.state_dict(),
'multi_optim': self._multi_optim.state_dict(),
'amp': self._apex.state_dict()
}
self._checkpoint['trainer'] = state
def _train_epoch(self, train_dl: DataLoader)->None:
steps = len(train_dl)
self.model.train()
logger.pushd('steps')
for step, (x, y) in enumerate(train_dl):
logger.pushd(step)
assert self.model.training # derived class might alter the mode
# TODO: please check that no algorithm is invalidated by swapping prestep with zero grad
self._multi_optim.zero_grad()
self.pre_step(x, y)
# divide batch in to chunks if needed so it fits in GPU RAM
if self.batch_chunks > 1:
x_chunks, y_chunks = torch.chunk(x, self.batch_chunks), torch.chunk(y, self.batch_chunks)
else:
x_chunks, y_chunks = (x,), (y,)
logits_chunks = []
loss_sum, loss_count = 0.0, 0
for xc, yc in zip(x_chunks, y_chunks):
xc, yc = xc.to(self.get_device(), non_blocking=True), yc.to(self.get_device(), non_blocking=True)
with self._apex.autocast():
logits_c, aux_logits = self.model(xc), None
tupled_out = isinstance(logits_c, Tuple) and len(logits_c) >=2
# if self._aux_weight: # TODO: some other way to validate?
# assert tupled_out, "aux_logits cannot be None unless aux tower is disabled"
if tupled_out: # then we are using model created by desc
logits_c, aux_logits = logits_c[0], logits_c[1]
loss_c = self.compute_loss(self._lossfn, yc, logits_c, # pyright: ignore[reportGeneralTypeIssues]
self._aux_weight, aux_logits)
self._apex.backward(loss_c)
loss_sum += loss_c.item() * len(logits_c)
loss_count += len(logits_c)
# TODO: cannot place on CPU if it was half precision but should we somehow?
logits_chunks.append(logits_c.detach()) # pyright: ignore[reportGeneralTypeIssues]
# TODO: original darts clips alphas as well but pt.darts doesn't
self._apex.clip_grad(self._grad_clip, self.model, self._multi_optim)
self._apex.step(self._multi_optim)
# TODO: we possibly need to sync so all replicas are upto date
self._apex.sync_devices()
# TODO: we need to put y on GPU because logits are on GPU. Is this good idea from GPU mem perspective?
self.post_step(x, y.to(self.get_device(), non_blocking=True),
ml_utils.join_chunks(logits_chunks),
torch.tensor(loss_sum/loss_count),
steps)
logger.popd()
# end of step
self._multi_optim.epoch()
logger.popd()
def compute_loss(self, lossfn:Callable, y:Tensor, logits:Tensor,
aux_weight:float, aux_logits:Optional[Tensor])->Tensor:
loss = lossfn(logits, y)
if aux_weight > 0.0 and aux_logits is not None:
loss += aux_weight * lossfn(aux_logits, y)
return loss
def _get_droppath_module(self)->Optional[nn.Module]:
m = self.model
if hasattr(self.model, 'module'): # for data parallel model
m = self.model.module
if hasattr(m, 'drop_path_prob'):
return m # pyright: ignore[reportGeneralTypeIssues]
return None
def _set_drop_path(self, epoch:int, epochs:int)->None:
if self._drop_path_prob and self._droppath_module is not None:
drop_prob = self._drop_path_prob * epoch / epochs
# set value as property in model (it will be used by forward())
# this is necessory when using DataParallel(model)
# https://github.com/pytorch/pytorch/issues/16885
m = self.model
if hasattr(self.model, 'module'): # for data parallel model
m = self.model.module
if hasattr(m, 'drop_path_prob'):
m.drop_path_prob(drop_prob) # pyright: ignore[reportGeneralTypeIssues]
else:
raise RuntimeError('Drop path value {} was specified but model'
' does not have drop_path_prob() method'\
.format(self._drop_path_prob))
|
archai/archai/supergraph/utils/trainer.py/0
|
{
"file_path": "archai/archai/supergraph/utils/trainer.py",
"repo_id": "archai",
"token_count": 6886
}
| 332 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT licen
import os
from dataclasses import asdict, dataclass, field
from typing import Any, Dict, Optional, Tuple
import numpy as np
import torch
from archai.common.distributed_utils import (
get_world_size,
init_distributed,
sync_workers,
)
from archai.common.file_utils import get_full_path
def _get_amlt_dirs() -> Tuple[str, str]:
data_dir = os.environ.get("AMLT_DATA_DIR", "")
output_dir = os.environ.get("AMLT_OUTPUT_DIR", "")
return data_dir, output_dir
def _get_default_dataroot() -> str:
is_amlt_available = os.environ.get("AMLT_OUTPUT_DIR", None)
return "/var/tmp/dataroot" if is_amlt_available else "~/dataroot"
def _create_dirs(
dataroot: str,
dataset_name: str,
experiment_name: Optional[str] = "tmp",
output_dir: Optional[str] = "~/logdir",
pretrained_path: Optional[str] = "",
cache_dir: Optional[str] = "",
) -> Tuple[str, str, str, str]:
def _get_dataset_dir_name(dataset_name: str) -> str:
if dataset_name == "wt2":
return "wikitext-2"
if dataset_name == "wt103":
return "wikitext-103"
if dataset_name == "lm1b":
return "one-billion-words"
if dataset_name.startswith("olx_"):
return dataset_name
raise RuntimeError(f"Dataset: {dataset_name} is not supported yet.")
pt_data_dir, pt_output_dir = _get_amlt_dirs()
if pt_output_dir:
pt_output_dir = os.path.join(pt_output_dir, experiment_name)
dataroot = dataroot or pt_data_dir or _get_default_dataroot()
dataroot = get_full_path(dataroot)
dataset_dir = get_full_path(os.path.join(dataroot, "textpred", _get_dataset_dir_name(dataset_name)))
output_dir = get_full_path(pt_output_dir or os.path.join(output_dir, experiment_name), create_folder=True)
if not os.path.isabs(cache_dir):
cache_dir = os.path.join(dataset_dir, cache_dir)
cache_dir = get_full_path(cache_dir, create_folder=True)
if not os.path.isabs(pretrained_path) and pretrained_path:
pretrained_path = os.path.join(os.path.dirname(output_dir), pretrained_path)
return dataset_dir, output_dir, pretrained_path, cache_dir
@dataclass
class NvidiaTrainingArguments:
"""Define arguments used in the NVIDIA training pipeline.
Args:
experiment_name: Name of the experiment.
checkpoint_file_path: Path to the checkpoint file.
output_dir: Output folder.
seed: Random seed.
no_cuda: Whether CUDA should not be used.
logging_steps: Number of steps between logs.
do_eval: Whether to enable evaluation.
eval_steps: Number of steps between evaluations.
save_all_checkpoints: Whether to save all checkpoints from `eval_steps` steps.
dataset_name: Name of the dataset.
dataset_dir: Dataset folder.
dataset_cache_dir: Dataset cache folder.
dataset_refresh_cache: Whether cache should be refreshed.
vocab_type: Name of the vocabulary/tokenizer.
vocab_size: Size of the vocabulary.
iterator_roll: Whether iterator should be rolled.
global_batch_size: Global batch size.
per_device_global_batch_size: Individual GPU batch size.
seq_len: Sequence length.
strategy: Distributed training strategy.
local_rank: Local rank of process.
find_unused_parameters: Whether unused parameters should be found.
max_steps: Maximum number of training steps.
gradient_accumulation_steps: Number of gradient accumulation steps.
fp16: Whether FP16 precision should be used.
optim: Name of the optimizer.
learning_rate: Optimizer learning rate.
weight_decay: Optimizer weight decay.
momentum: Optimizer momentum.
max_grad_norm: Optimizer gradients clipping value.
lr_scheduler_type: Name of the scheduler.
lr_qat_scheduler_type: Name of the QAT-based scheduler.
lr_scheduler_max_steps: Maximum number of scheduler steps.
lr_scheduler_warmup_steps: Number of warmup steps for the scheduler.
lr_scheduler_patience: Scheduler patience.
lr_scheduler_min_lr: Scheduler minimum learning rate.
lr_scheduler_decay_rate: Scheduler decay rate.
qat: Whether QAT should be used during training.
mixed_qat: Whether MixedQAT should be used during training.
"""
experiment_name: str = field(metadata={"help": "Name of the experiment."})
checkpoint_file_path: str = field(default="", metadata={"help": "Path to the checkpoint file."})
output_dir: str = field(default="~/logdir", metadata={"help": "Output folder."})
seed: int = field(default=42, metadata={"help": "Random seed."})
no_cuda: bool = field(default=False, metadata={"help": "Whether CUDA should not be used."})
logging_steps: int = field(default=10, metadata={"help": "Number of steps between logs."})
do_eval: bool = field(default=True, metadata={"help": "Whether to enable evaluation."})
eval_steps: int = field(default=100, metadata={"help": "Number of steps between evaluations."})
save_all_checkpoints: bool = field(
default=False, metadata={"help": "Whether to save all checkpoints from `eval_steps` steps."}
)
dataset_name: str = field(default="wt103", metadata={"help": "Name of the dataset."})
dataset_dir: str = field(default="", metadata={"help": "Dataset folder."})
dataset_cache_dir: str = field(default="cache", metadata={"help": "Dataset cache folder."})
dataset_refresh_cache: bool = field(default=False, metadata={"help": "Whether cache should be refreshed."})
vocab_type: str = field(default="gpt2", metadata={"help": "Type of the tokenizer."})
vocab_size: int = field(default=10000, metadata={"help": "Size of the vocabulary"})
iterator_roll: bool = field(default=True, metadata={"help": "Whether iterator should be rolled."})
global_batch_size: int = field(default=256, metadata={"help": "Global batch size."})
per_device_global_batch_size: int = field(default=None, metadata={"help": "Individual GPU batch size."})
seq_len: int = field(default=192, metadata={"help": "Sequence length."})
strategy: str = field(default="ddp", metadata={"help": "Multi-GPU strategy."})
local_rank: int = field(default=os.getenv("LOCAL_RANK", 0), metadata={"help": "Local rank of process."})
find_unused_parameters: bool = field(default=False, metadata={"help": "Whether unused parameters should be found."})
max_steps: int = field(default=40000, metadata={"help": "Maximum number of training steps."})
gradient_accumulation_steps: int = field(default=1, metadata={"help": "Number of gradient accumulation steps."})
fp16: bool = field(default=False, metadata={"help": "Whether FP16 precision should be used."})
optim: str = field(default="jitlamb", metadata={"help": "Name of the optimizer."})
learning_rate: float = field(default=0.01, metadata={"help": "Optimizer learning rate."})
weight_decay: float = field(default=0.0, metadata={"help": "Optimizer weight decay."})
momentum: float = field(default=0.0, metadata={"help": "Optimizer momentum."})
max_grad_norm: float = field(default=0.25, metadata={"help": "Optimizer gradients clipping value."})
lr_scheduler_type: str = field(default="cosine", metadata={"help": "Name of the scheduler."})
lr_qat_scheduler_type: str = field(default="cosine", metadata={"help": "Name of the QAT-based scheduler."})
lr_scheduler_max_steps: int = field(default=None, metadata={"help": "Maximum number of scheduler steps."})
lr_scheduler_warmup_steps: int = field(default=1000, metadata={"help": "Number of scheduler warmup steps."})
lr_scheduler_patience: float = field(default=0, metadata={"help": "Scheduler patience."})
lr_scheduler_min_lr: float = field(default=0.001, metadata={"help": "Scheduler minimum learning rate."})
lr_scheduler_decay_rate: float = field(default=0.5, metadata={"help": "Scheduler decay rate."})
qat: bool = field(default=False, metadata={"help": "Whether QAT should be used during training."})
mixed_qat: bool = field(default=False, metadata={"help": "Whether MixedQAT should be used during training."})
@property
def device(self) -> torch.device:
"""Return a PyTorch device instance."""
return torch.device("cuda" if not self.no_cuda else "cpu")
def __post_init__(self) -> None:
"""Override post-initialization with custom instructions.
Ensure that `qat` and `mixed_qat` are not used together, set the random seed,
initialize distributed training, create necessary directories,
and set the global batch size.
"""
assert not (self.qat and self.mixed_qat), "`qat` and `mixed_qat` should not be used together."
np.random.seed(self.seed)
torch.manual_seed(self.seed)
self.local_rank = int(self.local_rank)
if not self.no_cuda:
torch.cuda.set_device(self.local_rank)
init_distributed(True)
(self.dataset_dir, self.output_dir, self.checkpoint_file_path, self.dataset_cache_dir,) = _create_dirs(
self.dataset_dir,
self.dataset_name,
self.experiment_name,
self.output_dir,
self.checkpoint_file_path,
self.dataset_cache_dir,
)
with sync_workers() as rank:
if rank == 0:
os.makedirs(self.output_dir, exist_ok=True)
if self.per_device_global_batch_size is not None:
world_size = get_world_size()
self.global_batch_size = world_size * self.per_device_global_batch_size
def to_dict(self) -> Dict[str, Any]:
"""Convert attributes into a dictionary representation.
Returns:
Attributes encoded as a dictionary.
"""
return asdict(self)
|
archai/archai/trainers/nlp/nvidia_training_args.py/0
|
{
"file_path": "archai/archai/trainers/nlp/nvidia_training_args.py",
"repo_id": "archai",
"token_count": 3759
}
| 333 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.