python_code
stringlengths 0
229k
|
---|
# Copyright (c) 2018-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from .sampler import Sampler
from .sample_methods import sample_multinomial, epsilon_greedy
|
# Copyright (c) 2018-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import torch
import sys
def uniform_multinomial(batchsize, num_action, use_cuda=True):
''' Sample with uniform probability.
Args:
batchsize(int): batch size
num_action(int): total number of actions to sample
use_cuda(bool): indicates if tensor is put on cuda
'''
# [TODO] Make the type more friendly
if use_cuda:
uniform_p = torch.cuda.FloatTensor(num_action).fill_(1.0 / num_action)
else:
uniform_p = torch.FloatTensor(num_action).fill_(1.0 / num_action)
return uniform_p.multinomial(batchsize, replacement=True)
def sample_with_check(probs, greedy=False):
"""multinomial sampling with out of bound check
Args:
probs(tensor): probability to sample from
greedy(bool): if ``True``, pick the action with maximum probability,
otherwise sample from it.
"""
num_action = probs.size(1)
if greedy:
_, actions = probs.max(1)
return actions
while True:
actions = probs.multinomial(1)[:, 0]
cond1 = (actions < 0).sum()
cond2 = (actions >= num_action).sum()
if cond1 == 0 and cond2 == 0:
return actions
print(
"Warning! sampling out of bound! cond1 = %d, cond2 = %d" %
(cond1, cond2))
print("prob = ")
print(probs)
print("action = ")
print(actions)
print("condition1 = ")
print(actions < 0)
print("condition2 = ")
print(actions >= num_action)
print("#actions = ")
print(num_action)
sys.stdout.flush()
def sample_eps_with_check(probs, epsilon, greedy=False):
"""multinomial sampling with out of bound check,
with at least ``epsilon`` probability
Args:
probs(tensor): probability to sample from
epsilon(float): Minimum probability in sampling
greedy(bool): if ``True``, pick the action with maximum probability,
otherwise sample from it.
"""
# actions = self.sample_policy(state_curr[self.sample_node].data, args)
actions = sample_with_check(probs, greedy=greedy)
if epsilon > 1e-10:
num_action = probs.size(1)
batchsize = probs.size(0)
probs = probs.data if isinstance(
probs, torch.autograd.Variable) else probs
rej_p = probs.new().resize_(2)
rej_p[0] = 1 - epsilon
rej_p[1] = epsilon
rej = rej_p.multinomial(batchsize, replacement=True).byte()
uniform_p = probs.new().resize_(num_action).fill_(1.0 / num_action)
uniform_sampling = uniform_p.multinomial(batchsize, replacement=True)
actions[rej] = uniform_sampling[rej]
return actions
def sample_multinomial(state_curr, args, node="pi", greedy=False):
''' multinomial sampling
Args:
state_curr(dict): current state containing all data
args(dict): customized arguments for sampling. `epsilon` is used
node(str): name string for policy, default is "pi"
greedy(bool): if ``True``, pick the action with maximum probability,
otherwise sample from it.
Returns:
A list of actions using multinomial sampling.
'''
if isinstance(state_curr[node], list):
# Action map
probs = state_curr[node]
rx = len(probs)
ry = len(probs[0])
batchsize = probs[0][0].size(0)
actions = [np.zeros((rx, ry), dtype='int32') for i in range(batchsize)]
for i, actionx_prob in enumerate(probs):
for j, action_prob in enumerate(actionx_prob):
this_action = sample_eps_with_check(
action_prob.data, args.epsilon, greedy=greedy)
for k in range(batchsize):
actions[k][i, j] = this_action[k]
return actions
else:
probs = state_curr[node].data
return sample_eps_with_check(probs, args.epsilon, greedy=greedy)
def epsilon_greedy(state_curr, args, node="pi"):
''' epsilon greedy sampling
Args:
state_curr(dict): current state containing all data
args(dict): customized arguments for sampling. `epsilon` is used
node(str): name string for policy, default is "pi"
Returns:
A list of actions using epsilon greedy sampling.
'''
return sample_multinomial(state_curr, args, node=node, greedy=True)
def original_distribution(state_curr, args, node="pi"):
''' Send original probability as it is.
Args:
state_curr(dict): current state containing all data
args(dict): customized arguments for sampling. `epsilon` is used
node(str): name string for policy, default is "pi"
Returns:
A list of original probabilities.
'''
probs = state_curr[node].data
batchsize = probs.size(0)
# Return a list of list.
return [list(probs[i]) for i in range(batchsize)]
|
# Copyright (c) 2018-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from elf.options import auto_import_options, PyOptionSpec
from .sample_methods import sample_multinomial, epsilon_greedy
class Sampler(object):
@classmethod
def get_option_spec(cls):
spec = PyOptionSpec()
spec.addStrOption(
'sample_policy',
'choices of epsilon-greedy, multinomial, or uniform',
'epsilon-greedy')
spec.addBoolOption(
'store_greedy',
('if enabled, picks maximum-probability action; '
'otherwise, sample from distribution'),
False)
spec.addFloatOption(
'epsilon',
'used in epsilon-greedy',
0.0)
spec.addStrListOption(
'sample_nodes',
'nodes to be sampled and saved',
['pi,a'])
return spec
@auto_import_options
def __init__(self, option_map):
"""Initialization for Sampler."""
self.sample_nodes = []
for nodes in self.options.sample_nodes:
policy, action = nodes.split(",")
self.sample_nodes.append((policy, action))
def sample(self, state_curr):
"""Sample an action from distribution using a certain sample method
Args:
state_curr(dict): current state containing all data
"""
# TODO: This only handles epsilon_greedy and multinomial for now. Add
# uniform and original_distribution?
sampler = (epsilon_greedy
if self.options.store_greedy
else sample_multinomial)
actions = {}
for pi_node, a_node in self.sample_nodes:
actions[a_node] = sampler(state_curr, self.options, node=pi_node)
actions[pi_node] = state_curr[pi_node].data
return actions
|
# Copyright (c) 2018-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from .stats import EvalCount, RewardCount, WinRate, Stats
|
# Copyright (c) 2018-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from elf.options import import_options, PyOptionSpec
class EvalCount(object):
''' Eval Count. Run games and record required stats.'''
def __init__(self):
# All previous ids.
self.ids = {}
# id for old models.
# If this variable is set, then do not count win_rate of ids_exclude.
self.ids_exclude = set()
self.summary_count = 0
self.num_terminal = 0
def reset(self):
pass
def _on_terminal(self, id, record):
pass
def reset_on_new_model(self):
self.reset()
self.ids_exclude.update(self.ids.keys())
self.ids = dict()
def feed(self, id, *args, **kwargs):
# Game is running, not reaching terminal yet.
# Register a game id.
if id not in self.ids:
self.ids[id] = 0
self.ids[id] = self._on_game(id, self.ids[id], *args, **kwargs)
def count_completed(self):
return self.num_terminal
def terminal(self, id):
# If this game id ended and is in the exclude list, skip
# It is not counted as the number of games completed.
if id in self.ids_exclude:
self.ids_exclude.remove(id)
if id in self.ids:
del self.ids[id]
return
if id in self.ids:
self._on_terminal(id, self.ids[id])
# This game is over, remove game id if it is already in ids
del self.ids[id]
self.num_terminal += 1
# else:
# This should only happen when seq=0
# print("id=%s seq=%d, winner=%d" % (id, seq, winner))
def summary(self):
ret = self._summary()
self.reset()
self.num_terminal = 0
self.summary_count += 1
return ret
def print_summary(self):
summary = self.summary()
for k, v in summary.items():
print("%s: %s" % (str(k), str(v)))
def feed_batch(self, batch, hist_idx=0):
ids = batch["id"][hist_idx]
last_terminals = batch["last_terminal"][hist_idx]
last_r = batch["last_r"][hist_idx]
for batch_idx, (id, last_terminal) in enumerate(
zip(ids, last_terminals)):
self.feed(id, last_r[batch_idx])
if last_terminal:
self.terminal(id)
class RewardCount(EvalCount):
''' Class to accumulate rewards achieved'''
def __init__(self):
super(RewardCount, self).__init__()
self.reset()
def reset(self):
self.n = 0
self.sum_reward = 0
def _on_terminal(self, id, record):
self.sum_reward += record
self.n += 1
def _on_game(self, id, record, reward, seq=None):
return record + reward
def _summary(self):
str_reward = "[%d] Reward: %.2f/%d" % (
self.summary_count,
float(self.sum_reward) / (self.n + 1e-10),
self.n
)
return dict(str_reward=str_reward)
class WinRate(EvalCount):
''' Class to accumulate game results to win rate'''
def __init__(self):
super(WinRate, self).__init__()
self.total_win_count = 0
self.total_lose_count = 0
self.summary_count = 0
self.highest_win_rate = -1.0
self.highest_win_rate_idx = -1
def reset(self):
self.win_count = 0
self.lose_count = 0
def _on_game(self, id, record, final_reward, seq=None):
if final_reward > 0.5:
self.win_count += 1
self.total_win_count += 1
elif final_reward < -0.5:
self.lose_count += 1
self.total_lose_count += 1
def _summary(self):
total = self.win_count + self.lose_count
win_rate = self.win_count / (total + 1e-10)
new_record = False
if win_rate > self.highest_win_rate:
self.highest_win_rate = win_rate
self.highest_win_rate_idx = self.summary_count
new_record = True
str_win_rate = (
f'[{self.summary_count}] Win rate: {win_rate:.3f} '
f'[{self.win_count}/{self.lose_count}/{total}], '
f'Best win rate: {self.highest_win_rate:.3f} '
f'[{self.highest_win_rate_idx}]'
)
total = self.total_win_count + self.total_lose_count
str_acc_win_rate = "Accumulated win rate: %.3f [%d/%d/%d]" % (
self.total_win_count / (total + 1e-10),
self.total_win_count, self.total_lose_count, total
)
return dict(
new_record=new_record,
count=self.summary_count,
best_win_rate=self.highest_win_rate,
str_win_rate=str_win_rate,
str_acc_win_rate=str_acc_win_rate,
)
def win_count(self): return self.total_win_count
def lose_count(self): return self.total_lose_count
def total_winlose_count(
self): return self.total_win_count + self.total_lose_count
def winlose_count(self): return self.win_count + self.lose_count
class Stats(EvalCount):
@classmethod
def get_option_spec(cls, stats_name=''):
spec = PyOptionSpec()
spec.addStrOption(
stats_name + '_stats',
'type of stat to report (rewards or winrate)',
'')
return spec
def __init__(self, option_map, stats_name=''):
"""Initialization for Stats."""
import_options(self, option_map, self.get_option_spec(stats_name))
self.name = stats_name + "_stats"
self.collector = None
self.stats_name = getattr(self.options, self.name)
if self.stats_name == "rewards":
self.collector = RewardCount()
elif self.stats_name == "winrate":
self.collector = WinRate()
else:
self.collector = None
print("Stats: Name " + str(self.stats_name) + " is not known!")
# raise ValueError(
# "Name " + str(self.stats_name) + " is not known!")
def is_valid(self):
return self.collector is not None
def feed(self, id, *args, **kwargs):
self.collector.feed(id, *args, **kwargs)
def count_completed(self):
return self.collector.count_completed()
def reset_on_new_model(self):
self.collector.reset_on_new_model()
def terminal(self, id):
return self.collector.terminal(id)
def reset(self):
self.collector.reset()
def summary(self):
return self.collector.summary()
def print_summary(self):
self.collector.print_summary()
def feed_batch(self, batch, hist_idx=0):
return self.collector.feed_batch(batch, hist_idx=hist_idx)
|
# Copyright (c) 2018-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from datetime import datetime
import time
from collections import defaultdict
class RLTimer:
''' A customized timer class'''
def __init__(self):
self.overall_counts = defaultdict(int)
self.last_overall_mark = defaultdict(lambda: -1)
self.restart()
def restart(self):
self.start_time = time.time()
self.curr_time = datetime.now()
self.durations = defaultdict(lambda: dict(duration=0, counter=0))
def record(self, name):
curr_time = datetime.now()
self.durations[name]["duration"] += (curr_time -
self.curr_time).total_seconds()
self.durations[name]["counter"] += 1
self.overall_counts[name] += 1
self.curr_time = curr_time
def print(self, nstep):
final_time = time.time()
total_duration = (final_time - self.start_time) * 1000.0 / nstep
s = ", ".join(
"%s: %.3f ms" %
(name,
d["duration"] *
1000.0 /
d["counter"]) for name,
d in self.durations.items())
return "Total: %.3f ms. " % total_duration + s
def printInterval(self, name, nstep, callback):
if self.checkPeriodicCondition(name, nstep):
callback(self)
self.restart()
self.updatePeriodicCondition(name)
def checkPeriodicCondition(self, name, nstep):
curr_count = self.overall_counts[name]
last_count = self.last_overall_mark[name]
return curr_count > last_count and curr_count % nstep == 0
def updatePeriodicCondition(self, name):
self.last_overall_mark[name] = self.overall_counts[name]
def getPeriodicValue(self, name):
return self.overall_counts[name]
|
# Copyright (c) 2018-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from .utils import ModelSaver, MultiCounter, topk_accuracy
from .trainer import Trainer, Evaluator
from .lstm_trainer import LSTMTrainer
|
# Copyright (c) 2018-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from datetime import datetime
import torch
from torch.autograd import Variable
from elf.options import auto_import_options, PyOptionSpec
from ..stats import Stats
from ..utils import HistState
from .utils import ModelSaver, MultiCounter
class LSTMTrainer(object):
@classmethod
def get_option_spec(cls):
spec = PyOptionSpec()
spec.addIntOption(
'freq_update',
'frequency of model update',
1)
spec.addIntOption(
'num_games',
'number of games',
1024)
spec.addIntOption(
'batchsize',
'batch size',
128)
spec.addIntOption(
'gpu',
'which GPU to use',
-1)
spec.addIntOption(
'T',
'number of timestamps',
6)
spec.addStrOption(
'parsed_args',
'dummy option',
'')
spec.merge(Stats.get_option_spec('trainer'))
spec.merge(ModelSaver.get_option_spec())
return spec
@auto_import_options
def __init__(self, option_map, verbose=False):
self.stats = Stats(option_map, "trainer")
self.saver = ModelSaver(option_map)
self.counter = MultiCounter()
# [TODO] Hard coded now, need to fix.
num_hiddens = 13 * 25
gpu = self.options.gpu
assert gpu is not None and gpu >= 0
def init_state():
return torch.FloatTensor(num_hiddens).cuda(gpu).zero_()
self.hs = HistState(self.options.T, init_state)
self.stats.reset()
def episode_start(self, i):
pass
def actor(self, batch):
self.counter.inc("actor")
ids = batch["id"][0]
seqs = batch["seq"][0]
self.hs.preprocess(ids, seqs)
hiddens = Variable(self.hs.newest(ids, 0))
m = self.mi["actor"]
m.set_volatile(True)
state_curr = m(batch.hist(0), hiddens)
m.set_volatile(False)
reply_msg = self.sampler.sample(state_curr)
reply_msg["rv"] = self.mi["actor"].step
next_hiddens = m.transition(state_curr["h"], reply_msg["a"])
self.hs.feed(ids, next_hiddens.data)
self.stats.feed_batch(batch)
return reply_msg
def train(self, batch):
self.counter.inc("train")
mi = self.mi
ids = batch["id"][0]
T = batch["s"].size(0)
hiddens = self.hs.newest(ids, T - 1)
mi.zero_grad()
self.rl_method.update(mi, batch, hiddens, self.counter.stats)
mi.update_weights()
if self.counter.counts["train"] % self.options.freq_update == 0:
mi.update_model("actor", mi["model"])
def episode_summary(self, i):
prefix = "[%s][%d] Iter" % (
str(datetime.now()), self.options.batchsize) + "[%d]: " % i
print(prefix)
if self.counter.counts["train"] > 0:
self.saver.feed(self.mi["model"])
print(
"Command arguments:", ' '.join(map(str, self.options.parsed_args)))
self.counter.summary(global_counter=i)
print("")
self.stats.print_summary()
if self.stats.count_completed() > 10000:
self.stats.reset()
def setup(self, rl_method=None, mi=None, sampler=None):
self.rl_method = rl_method
self.mi = mi
self.sampler = sampler
|
# Copyright (c) 2018-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import os
from collections import defaultdict, deque, Counter
from datetime import datetime
from elf.options import auto_import_options, PyOptionSpec
class SymLink(object):
def __init__(self, sym_prefix, latest_k=5):
self.sym_prefix = sym_prefix
self.latest_k = latest_k
self.latest_files = deque()
def feed(self, filename):
self.latest_files.appendleft(filename)
if len(self.latest_files) > self.latest_k:
self.latest_files.pop()
for k, name in enumerate(self.latest_files):
symlink_file = self.sym_prefix + str(k)
try:
if os.path.exists(symlink_file):
os.unlink(symlink_file)
os.symlink(name, symlink_file)
except BaseException:
print(
"Build symlink %s for %s failed, skipped" %
(symlink_file, name))
class ModelSaver(object):
@classmethod
def get_option_spec(cls):
spec = PyOptionSpec()
spec.addStrOption(
'record_dir',
'directory to record in',
'./record')
spec.addStrOption(
'save_prefix',
'prefix of savefiles',
'save')
spec.addStrOption(
'save_dir',
'directory for savefiles',
os.environ.get('save', './'))
spec.addStrOption(
'latest_symlink',
'name for latest model symlink',
'latest')
spec.addIntOption(
'num_games',
'number of games',
1024)
spec.addIntOption(
'batchsize',
'batch size',
128)
return spec
@auto_import_options
def __init__(self, option_map):
self.save = (self.options.num_games == self.options.batchsize)
if self.save and not os.path.exists(self.options.record_dir):
os.mkdir(self.options.record_dir)
if not os.path.exists(self.options.save_dir):
os.mkdir(self.options.save_dir)
self.symlinker = SymLink(
os.path.join(
self.options.save_dir,
self.options.latest_symlink))
def feed(self, model):
basename = self.options.save_prefix + "-%d.bin" % model.step
print("Save to " + self.options.save_dir)
filename = os.path.join(self.options.save_dir, basename)
print("Filename = " + filename)
model.save(filename)
# Create a symlink
self.symlinker.feed(basename)
class ValueStats(object):
def __init__(self, name=None):
self.name = name
self.reset()
def feed(self, v):
self.summation += v
if v > self.max_value:
self.max_value = v
self.max_idx = self.counter
if v < self.min_value:
self.min_value = v
self.min_idx = self.counter
self.counter += 1
def summary(self, info=None):
info = "" if info is None else info
name = "" if self.name is None else self.name
if self.counter > 0:
try:
return "%s%s[%d]: avg: %.5f, min: %.5f[%d], max: %.5f[%d]" % (
info, name, self.counter, self.summation / self.counter,
self.min_value, self.min_idx, self.max_value, self.max_idx
)
except BaseException:
return "%s%s[Err]:" % (info, name)
else:
return "%s%s[0]" % (info, name)
def reset(self):
self.counter = 0
self.summation = 0.0
self.max_value = -1e38
self.min_value = 1e38
self.max_idx = None
self.min_idx = None
def topk_accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0)
res.append(correct_k.mul_(100.0 / batch_size))
return res
class MultiCounter(object):
def __init__(self, verbose=False):
self.last_time = None
self.verbose = verbose
self.counts = Counter()
self.stats = defaultdict(lambda: ValueStats())
self.total_count = 0
def inc(self, key):
if self.verbose:
print("[MultiCounter]: %s" % key)
self.counts[key] += 1
self.total_count += 1
def reset(self):
for k in sorted(self.stats.keys()):
self.stats[k].reset()
self.counts = Counter()
self.total_count = 0
self.last_time = datetime.now()
def summary(self, global_counter=None):
this_time = datetime.now()
if self.last_time is not None:
print(
"[%d] Time spent = %f ms" %
(global_counter,
(this_time - self.last_time).total_seconds() * 1000))
for key, count in self.counts.items():
print("%s: %d/%d" % (key, count, self.total_count))
for k in sorted(self.stats.keys()):
v = self.stats[k]
print(v.summary(info=str(global_counter) + ":" + k))
|
# Copyright (c) 2018-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import os
import sys
from datetime import datetime
from elf.options import auto_import_options, import_options, PyOptionSpec
from ..stats import Stats
from .timer import RLTimer
from .utils import ModelSaver, MultiCounter
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..', 'elf'))
# import torch.multiprocessing as _mp
# mp = _mp.get_context('spawn')
class Evaluator(object):
@classmethod
def get_option_spec(cls, name='eval'):
spec = PyOptionSpec()
spec.addStrListOption(
'keys_in_reply',
'keys in reply',
[])
spec.addIntOption(
'num_minibatch',
'number of minibatches',
5000)
spec.addStrListOption(
'parsed_args',
'dummy option',
'')
spec.merge(Stats.get_option_spec(name))
return spec
def __init__(
self,
option_map,
name='eval',
stats=True,
verbose=False,
actor_name="actor"):
"""Initialization for Evaluator."""
import_options(self, option_map, self.get_option_spec(name))
if stats:
self.stats = Stats(option_map, name)
else:
self.stats = None
if self.stats is not None and not self.stats.is_valid():
self.stats = None
self.name = name
self.actor_name = actor_name
self.verbose = verbose
self.keys_in_reply = set(self.options.keys_in_reply)
def episode_start(self, i):
''' Called before each episode. Reset ``actor_count`` to 0.
Args:
i(int): index in the minibatch
'''
self.actor_count = 0
def actor(self, batch):
"""Actor.
Get the model, forward the batch and get a distribution.
Sample from it and act.
Reply the message to game engine.
Args:
batch(dict): batch data
Returns:
reply_msg(dict):
``pi``: policy, ``a``: action,
``V``: value, `rv`: reply version,
signatured by step
"""
if self.verbose:
print("In Evaluator[%s]::actor" % self.name)
# actor model.
m = self.mi[self.actor_name]
m.set_volatile(True)
state_curr = m.forward(batch)
m.set_volatile(False)
if self.sampler is not None:
reply_msg = self.sampler.sample(state_curr)
else:
reply_msg = dict(pi=state_curr["pi"].data)
if self.stats is not None:
self.stats.feed_batch(batch)
if "rv" in self.keys_in_reply:
reply_msg["rv"] = self.mi[self.actor_name].step
if "V" in self.keys_in_reply:
reply_msg["V"] = state_curr["V"].data
self.actor_count += 1
return reply_msg
def episode_summary(self, i):
''' Called after each episode. Print stats and summary
Args:
i(int): index in the minibatch
'''
print(
"[%s] actor count: %d/%d" %
(self.name,
self.actor_count,
self.options.num_minibatch))
if self.stats is not None:
self.stats.print_summary()
if self.stats.count_completed() > 10000:
self.stats.reset()
def setup(self, mi=None, sampler=None):
''' Setup `ModelInterface` and `Sampler`. Resetting stats.
Args:
mi(`ModelInterface`)
sample(`Sampler`)
'''
self.mi = mi
self.sampler = sampler
if self.stats is not None:
self.stats.reset()
class Trainer(object):
@classmethod
def get_option_spec(cls):
spec = PyOptionSpec()
spec.addIntOption(
'freq_update',
'frequency of model update',
1)
spec.addBoolOption(
'save_first',
'save first model',
False)
spec.addIntOption(
'num_games',
'number of games',
1024)
spec.addIntOption(
'batchsize',
'batch size',
128)
spec.merge(Evaluator.get_option_spec('trainer'))
spec.merge(ModelSaver.get_option_spec())
return spec
@auto_import_options
def __init__(self, option_map, verbose=False, actor_name="actor"):
"""Initialization for Trainer."""
self.timer = RLTimer()
self.verbose = verbose
self.last_time = None
self.evaluator = Evaluator(
option_map,
'trainer',
verbose=verbose,
actor_name=actor_name)
self.saver = ModelSaver(option_map)
self.counter = MultiCounter(verbose=verbose)
self.just_update = False
def actor(self, batch):
"""Actor.
Get the model, forward the batch and get a distribution.
Sample from it and act.
Reply the message to game engine.
Args:
batch(dict): batch data
Returns:
reply_msg(dict):
``pi``: policy, ``a``: action, ``V``: value,
`rv`: reply version, signatured by step
"""
self.counter.inc("actor")
return self.evaluator.actor(batch)
def train(self, batch, *args, **kwargs):
''' Trainer.
Get the model, forward the batch and update the weights.
Args:
batch(dict): batch data
'''
mi = self.evaluator.mi
self.counter.inc("train")
self.timer.record("batch_train")
mi.zero_grad()
res = self.rl_method.update(mi, batch,
self.counter.stats, *args, **kwargs)
if res["backprop"]:
mi.update_weights()
self.timer.record("compute_train")
if self.counter.counts["train"] % self.options.freq_update == 0:
# Update actor model
# print("Update actor model")
# Save the current model.
if "actor" in mi:
mi.update_model("actor", mi["model"])
self.just_updated = True
self.just_updated = False
def episode_reset(self):
''' Reset stats '''
self.counter.reset()
self.timer.restart()
def episode_start(self, i):
''' Called before each episode.
Args:
i(int): index in the minibatch
'''
self.evaluator.episode_start(i)
def episode_summary(self, i, save=True):
"""Called after each episode. Print stats and summary.
Also print arguments passed in.
Args:
i(int): index in the minibatch
"""
prefix = "[%s][%d] Iter" % (
str(datetime.now()), self.options.batchsize) + "[%d]: " % i
print(prefix)
if self.counter.counts["train"] > 0 and save:
self.saver.feed(self.evaluator.mi["model"])
print(
"Command arguments:", ' '.join(map(str, self.options.parsed_args)))
self.counter.summary(global_counter=i)
print("")
self.evaluator.episode_summary(i)
self.episode_reset()
return self.evaluator.mi["model"].step
def setup(self, rl_method=None, mi=None, sampler=None):
''' Setup `RLMethod`, ModelInterface` and `Sampler`
Args:
rl_method(RLmethod)
mi(`ModelInterface`)
sample(`Sampler`)
'''
self.rl_method = rl_method
self.evaluator.setup(mi=mi, sampler=sampler)
if self.options.save_first:
print("Save first: ")
self.saver.feed(self.evaluator.mi["model"])
|
# Copyright (c) 2018-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import sys
from collections import defaultdict
import numpy as np
import torch
class Allocator(object):
''' A wrapper class for batch data'''
torch_types = {
"int32_t": torch.IntTensor,
"int64_t": torch.LongTensor,
"float": torch.FloatTensor,
"unsigned char": torch.ByteTensor,
"char": torch.ByteTensor
}
numpy_types = {
"int32_t": 'i4',
'int64_t': 'i8',
'float': 'f4',
'unsigned char': 'byte',
'char': 'byte'
}
@staticmethod
def _alloc(p, gpu, use_numpy=True):
name = p.field().name()
type_name = p.field().type_name()
sz = p.field().sz().vec()
print(name, type_name, sz)
if not use_numpy:
v = Allocator.torch_types[type_name](*sz)
if gpu is not None:
with torch.cuda.device(gpu):
v = v.pin_memory()
v.fill_(1)
# Return pointer, size and byte_stride
strides = [i * v.element_size() for i in v.stride()]
p.set(v.data_ptr(), strides)
else:
v = np.zeros(sz, dtype=Allocator.numpy_types[type_name])
v[:] = 1
import pdb
pdb.set_trace()
# Return pointer, size and byte_size
p.set(v.ctypes.data, v.strides)
return name, v
@staticmethod
def spec2batches(ctx, batchsize, spec, gpu, use_numpy=False, num_recv=1):
batch_spec = []
name2idx = defaultdict(lambda: list())
idx2name = dict()
for name, v in spec.items():
print("%s: %s" % (name, v))
# TODO this might not good since it changes the input.
if "input" not in v or v["input"] is None:
v["input"] = []
if "reply" not in v or v["reply"] is None:
v["reply"] = []
this_batchsize = v.get("batchsize", batchsize)
keys = list(set(v["input"] + v["reply"]))
print("SharedMem: \"%s\", keys: %s" % (name, str(keys)))
smem_opts = ctx.createSharedMemOptions(name, this_batchsize)
smem_opts.setTimeout(v.get("timeout_usec", 0))
for _ in range(num_recv):
smem = ctx.allocateSharedMem(smem_opts, keys)
spec = dict((
Allocator._alloc(smem[field], gpu, use_numpy=use_numpy)
for field in keys
))
# Split spec.
spec_input = {key: spec[key] for key in v["input"]}
spec_reply = {key: spec[key] for key in v["reply"]}
batch_spec.append(dict(input=spec_input, reply=spec_reply))
idx = smem.getSharedMemOptions().idx()
name2idx[name].append(idx)
idx2name[idx] = name
return batch_spec, name2idx, idx2name
def tensor_slice(t, dim, b, e=None):
if e is None:
e = b + 1
if dim == 0:
return t[b:e]
elif dim == 1:
return t[:, b:e]
elif dim == 2:
return t[:, :, b:e]
else:
raise ValueError("unsupported %d in tensor_slice" % dim)
class Batch:
def __init__(self, _GC=None, _batchdim=0, _histdim=None, **kwargs):
'''Initialize `Batch` class.
Pass in a dict and wrap it into ``self.batch``
'''
self.GC = _GC
self.batchdim = _batchdim
self.histdim = _histdim
self.batch = kwargs
def empty_copy(self):
batch = Batch()
batch.GC = self.GC
batch.batchdim = self.batchdim
batch.histdim = self.histdim
return batch
def first_k(self, batchsize):
batch = self.empty_copy()
batch.batch = {
k: tensor_slice(
v,
self.batchdim,
0,
batchsize) for k,
v in self.batch.items()}
return batch
def __getitem__(self, key):
'''Get a key from batch. Can be either ``key`` or ``last_key``
Args:
key(str): key name. e.g. if ``r`` is passed in,
will search for ``r`` or ``last_r``
'''
if key in self.batch:
return self.batch[key]
else:
key_with_last = "last_" + key
if key_with_last in self.batch:
return self.batch[key_with_last][1:]
else:
raise KeyError(
"Batch(): specified key: %s or %s not found!" %
(key, key_with_last))
def add(self, key, value):
'''Add key=value in Batch.
This is used when you want to send additional state to the
learning algorithm, e.g., hidden state collected from the
previous iterations.
'''
self.batch[key] = value
return self
def __contains__(self, key):
return key in self.batch or "last_" + key in self.batch
def setzero(self):
''' Set all tensors in the batch to 0 '''
for _, v in self.batch.items():
v[:] = 0
def copy_from(self, src):
''' copy all keys and values from another dict or `Batch` object
Args:
src(dict or `Batch`): batch data to be copied
'''
this_src = src if isinstance(src, dict) else src.batch
key_assigned = {k: False for k in self.batch.keys()}
keys_extra = []
for k, v in this_src.items():
# Copy it down to cpu.
if k not in self.batch:
keys_extra.append(k)
continue
bk = self.batch[k]
key_assigned[k] = True
if v is None:
continue
if isinstance(v, list) and bk.numel() == len(v):
bk = bk.view(-1)
for i, vv in enumerate(v):
bk[i] = vv
elif isinstance(v, (int, float)):
bk.fill_(v)
else:
try:
bk[:] = v.squeeze_()
except BaseException:
import pdb
pdb.set_trace()
# Check whether there is any key missing.
keys_missing = [
k for k, assigned in key_assigned.items() if not assigned]
return keys_extra, keys_missing
def hist(self, hist_idx, key=None):
'''
return batch history.
Args:
s(int): s=1 means going back in time by one step, etc
key(str): if None, return all key's history,
otherwise just return that key's history
'''
if self.histdim is None:
raise ValueError("No histdim information for the batch")
if key is None:
new_batch = self.empty_copy()
new_batch.batch = {
k: tensor_slice(v, self.histdim, hist_idx)
for k, v in self.batch.items()
}
return new_batch
else:
return tensor_slice(self[key], self.histdim, hist_idx)
def half(self):
'''transfer batch data to fp16'''
new_batch = self.empty_copy()
new_batch.batch = {k: v.half()
for k, v in self.batch.items()}
return new_batch
def cpu2gpu(self, gpu, non_blocking=True):
''' transfer batch data to gpu '''
# For each time step
new_batch = self.empty_copy()
new_batch.batch = {k: v.cuda(gpu, non_blocking=non_blocking)
for k, v in self.batch.items()}
return new_batch
def cpu2cpu(self, gpu, non_blocking=True):
''' transfer batch data to gpu '''
# For each time step
new_batch = self.empty_copy()
new_batch.batch = {k: v.clone() for k, v in self.batch.items()}
return new_batch
def transfer_cpu2gpu(self, batch_gpu, non_blocking=True):
''' transfer batch data to gpu '''
# For each time step
for k, v in self.batch.items():
batch_gpu[k].copy_(v, non_blocking=non_blocking)
def transfer_cpu2cpu(self, batch_dst, non_blocking=True):
''' transfer batch data to cpu '''
# For each time step
for k, v in self.batch.items():
batch_dst[k].copy_(v)
def pin_clone(self):
''' clone and pin memory for faster transportations to gpu '''
batch = self.empty_copy()
batch.batch = {k: v.clone().pin_memory()
for k, v in self.batch.items()}
return batch
def to_numpy(self):
''' convert batch data to numpy format '''
return {
k: (v.numpy() if not isinstance(v, np.ndarray) else v)
for k, v in self.batch.items()
}
class GCWrapper:
def __init__(
self,
GC,
batchsize,
spec,
batchdim=0,
histdim=None,
use_numpy=False,
gpu=None,
params=dict(),
verbose=True,
num_recv=1):
'''Initialize GCWarpper
Parameters:
GC(C++ class): Game Context
co(C type): context parameters.
descriptions(list of tuple of dict):
descriptions of input and reply entries.
Detailed explanation can be seen in
:doc:`wrapper-python`.
The Python interface of wrapper.
use_numpy(boolean): whether we use numpy array (or PyTorch tensors)
gpu(int): gpu to use.
params(dict): additional parameters
'''
# TODO Make a unified argument server and remove ``params``
self.batches, self.name2idx, self.idx2name = Allocator.spec2batches(
GC.ctx(), batchsize, spec,
use_numpy=use_numpy, gpu=gpu, num_recv=num_recv)
self.batchdim = batchdim
self.histdim = histdim
self.gpu = gpu
self.params = params
self.GC = GC
self._cb = {}
def reg_has_callback(self, key):
return key in self.name2idx
def reg_callback_if_exists(self, key, cb):
if self.reg_has_callback(key):
self.reg_callback(key, cb)
return True
else:
return False
def reg_callback(self, key, cb):
'''Set callback function for key
Parameters:
key(str): the key used to register the callback function.
If the key is not present in the descriptions,
return ``False``.
cb(function): the callback function to be called.
The callback function has the signature
``cb(input_batch, input_batch_gpu, reply_batch)``.
'''
if key not in self.name2idx:
raise ValueError("Callback[%s] is not in the specification" % key)
if cb is None:
print("Warning: Callback[%s] is registered to None" % key)
for idx in self.name2idx[key]:
# print("Register " + str(cb) + " at idx: %d" % idx)
self._cb[idx] = cb
return True
def _makebatch(self, key_array):
return Batch(
_GC=self.GC,
_batchdim=self.batchdim,
_histdim=self.histdim,
**key_array)
def _call(self, smem, *args, **kwargs):
idx = smem.getSharedMemOptions().idx()
# print("smem idx: %d, label: %s" % (idx, self.idx2name[idx]))
# print(self.name2idx)
if idx not in self._cb:
raise ValueError("smem.idx[%d] is not in callback functions" % idx)
if self._cb[idx] is None:
return
batchsize = smem.effective_batchsize()
assert batchsize > 0
picked = self._makebatch(self.batches[idx]["input"]).first_k(batchsize)
if self.gpu is not None:
picked = picked.cpu2gpu(self.gpu)
# Save the infos structure, if people want to have access to state
# directly, they can use infos.s[i], which is a state pointer.
picked.smem = smem
picked.batchsize = batchsize
picked.max_batchsize = smem.getSharedMemOptions().batchsize()
# Get the reply array
if self.batches[idx]["reply"] is not None:
sel_reply = self._makebatch(
self.batches[idx]["reply"]).first_k(batchsize)
else:
sel_reply = None
reply = self._cb[idx](picked, *args, **kwargs)
# If reply is meaningful, send them back.
if isinstance(reply, dict) and sel_reply is not None:
if self.gpu is not None:
with torch.cuda.device(self.gpu):
keys_extra, keys_missing = sel_reply.copy_from(reply)
else:
keys_extra, keys_missing = sel_reply.copy_from(reply)
if len(keys_extra) > 0:
raise ValueError(
"Receive extra keys %s from reply!" %
str(keys_extra))
if len(keys_missing) > 0:
raise ValueError(
"Missing keys %s absent in reply!" %
str(keys_missing))
def _check_callbacks(self):
# Check whether all callbacks are assigned properly.
for key, indices in self.name2idx.items():
for idx in indices:
if idx not in self._cb:
raise ValueError(
("GCWrapper.start(): No callback function "
"for key = %s and idx = %d") %
(key, idx))
def run(self, *args, **kwargs):
'''Wait group of an arbitrary collector key.
Samples in a returned batch are always from the same group,
but the group key of the batch may be arbitrary.
'''
# print("before wait")
smem = self.GC.ctx().wait()
# print("before calling")
self._call(smem, *args, **kwargs)
# print("before_step")
self.GC.ctx().step()
def start(self):
'''Start all game environments'''
self._check_callbacks()
self.GC.ctx().start()
def stop(self):
'''Stop all game environments.
:func:`start()` cannot be called again after :func:`stop()`
has been called.
'''
self.GC.ctx().stop()
def reg_sig_int(self):
import signal
def signal_handler(s, frame):
print('Detected Ctrl-C!')
self.stop()
sys.exit(0)
signal.signal(signal.SIGINT, signal_handler)
|
# Copyright (c) 2018-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from elf.options import auto_import_options, PyOptionSpec
class ContextArgs(object):
@classmethod
def get_option_spec(cls):
spec = PyOptionSpec()
spec.addIntOption(
'num_games',
'number of games',
1024)
spec.addIntOption(
'batchsize',
'batch size',
128)
spec.addIntOption(
'T',
'number of timesteps',
6)
spec.addIntOption(
'mcts_threads',
'number of MCTS threads',
0)
spec.addIntOption(
'mcts_rollout_per_batch',
'Batch size for mcts rollout',
1)
spec.addIntOption(
'mcts_rollout_per_thread',
'number of rollotus per MCTS thread',
1)
spec.addBoolOption(
'mcts_verbose',
'enables mcts verbosity',
False)
spec.addBoolOption(
'mcts_verbose_time',
'enables mcts verbosity for time stats',
False)
spec.addBoolOption(
'mcts_persistent_tree',
'use persistent tree in MCTS',
False)
spec.addBoolOption(
'mcts_use_prior',
'use prior in MCTS',
False)
spec.addIntOption(
'mcts_virtual_loss',
'"virtual" number of losses for MCTS edges',
0)
spec.addStrOption(
'mcts_pick_method',
'criterion for mcts node selection',
'most_visited')
spec.addFloatOption(
'mcts_puct',
'prior weight',
1.0)
spec.addFloatOption(
'mcts_epsilon',
'for exploration enhancement, weight of randomization',
0.0)
spec.addFloatOption(
'mcts_alpha',
'for exploration enhancement, alpha term in gamma distribution',
0.0)
spec.addBoolOption(
"mcts_unexplored_q_zero",
'set all unexplored node to have Q value zero',
False)
spec.addBoolOption(
"mcts_root_unexplored_q_zero",
'set unexplored child of root node to have Q value zero',
False)
return spec
@auto_import_options
def __init__(self, option_map):
pass
def initialize(self, co):
options = self.options
mcts = co.mcts_options
co.num_games = options.num_games
co.batchsize = options.batchsize
co.T = options.T
mcts.num_threads = options.mcts_threads
mcts.num_rollouts_per_thread = options.mcts_rollout_per_thread
mcts.num_rollouts_per_batch = options.mcts_rollout_per_batch
mcts.verbose = options.mcts_verbose
mcts.verbose_time = options.mcts_verbose_time
mcts.virtual_loss = options.mcts_virtual_loss
mcts.pick_method = options.mcts_pick_method
mcts.persistent_tree = options.mcts_persistent_tree
mcts.root_epsilon = options.mcts_epsilon
mcts.root_alpha = options.mcts_alpha
mcts.alg_opt.use_prior = options.mcts_use_prior
mcts.alg_opt.c_puct = options.mcts_puct
mcts.alg_opt.unexplored_q_zero = options.mcts_unexplored_q_zero
mcts.alg_opt.root_unexplored_q_zero = \
options.mcts_root_unexplored_q_zero
|
# Copyright (c) 2018-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
# C++ imports
from _elf import *
# Other imports
from .context_utils import ContextArgs
from .more_labels import MoreLabels
from .utils_elf import GCWrapper, Batch
from .zmq_util import ZMQSender, ZMQReceiver
|
# Copyright (c) 2018-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import zmq
class ZMQCtx:
def __init__(self):
pass
def __enter__(self):
pass
def __exit__(self, ty, value, tb):
if value is not None:
# print(value)
pass
return True
# print("Send failed for " + self.identity + "..")
class ZMQSender:
def __init__(self, addr, identity, send_timeout=0, recv_timeout=0):
self.ctx = zmq.Context()
self.ctx.setsockopt(zmq.IPV6, 1)
self.sender = self.ctx.socket(zmq.DEALER)
self.sender.identity = identity.encode('ascii')
# self.sender.set_hwm(10000)
if send_timeout > 0:
self.sender.SNDTIMEO = send_timeout
if recv_timeout > 0:
self.sender.RCVTIMEO = recv_timeout
self.sender.connect(addr)
def Send(self, msg, copy=False):
with ZMQCtx():
self.sender.send(msg, copy=copy)
return True
return False
def Receive(self):
with ZMQCtx():
return self.sender.recv()
return None
class ZMQReceiver:
def __init__(self, addr, timeout=0):
self.ctx = zmq.Context()
self.ctx.setsockopt(zmq.IPV6, 1)
self.receiver = self.ctx.socket(zmq.ROUTER)
# self.receiver.set_hwm(10000)
if timeout > 0:
self.receiver.RCVTIMEO = timeout
self.receiver.bind(addr)
def Send(self, identity, msg):
with ZMQCtx():
self.receiver.send_multipart([identity, msg])
return True
return False
def Receive(self):
# return identity, msg
with ZMQCtx():
identity, msg = self.receiver.recv_multipart()
# print(identity)
# print(msg)
return identity, msg
return None, None
|
# Copyright (c) 2018-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from elf.options import auto_import_options, PyOptionSpec
class MoreLabels(object):
@classmethod
def get_option_spec(cls):
spec = PyOptionSpec()
spec.addStrListOption(
'additional_labels',
'add additional labels in the batch; e.g. id, seq, last_terminal',
[])
return spec
@auto_import_options
def __init__(self, option_map):
pass
def add_labels(self, desc):
if self.options.additional_labels:
for _, v in desc.items():
v["input"].extend(self.options.additional_labels)
|
# Copyright (c) 2018-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from .import_options import auto_import_options, import_options
from .py_option_map import PyOptionMap
from .py_option_spec import PyOptionSpec
|
# Copyright (c) 2018-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import json
import sys
import elf
from _elf import _options
# We can get rid of this and just eval() the type name.
# Depends on how safe we want to be.
_typename_to_type = {
'str': str,
'int': int,
'float': float,
'bool': bool,
}
class PyOptionSpec(_options.OptionSpec):
"""Override C++ OptionSpec with additional bells and whistles."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def getArgparseOptions(self):
return json.loads(self.getPythonArgparseOptionsAsJSONString())
def toArgparser(self):
"""Creates an ArgumentParser from a PyOptionSpec."""
parser = argparse.ArgumentParser()
parser_options = self.getArgparseOptions()
for parser_option in parser_options:
if 'type' in parser_option['kwargs']:
parser_option['kwargs']['type'] = \
_typename_to_type[parser_option['kwargs']['type']]
parser.add_argument(
*parser_option["args"],
**parser_option["kwargs"])
return parser
def parse(self, args=None, overrides=None):
"""Given a PyOptionSpec, parses the command line parameters
(``sys.argv```) and returns the resulting PyOptionMap.
``args`` can override ``sys.argv`` and ``overrides`` can override
any parsed items.
"""
parser = self.toArgparser()
arg_namespace = parser.parse_args(args=args)
if overrides:
for k, v in overrides.items():
setattr(arg_namespace, k, v)
arg_namespace.parsed_args = list(sys.argv if args is None else args)
option_map = elf.options.PyOptionMap(self)
option_map.loadOptionDict(vars(arg_namespace))
return option_map
@classmethod
def fromClasses(cls, classes):
option_spec = cls()
for c in classes:
option_spec.merge(c.get_option_spec())
return option_spec
def clone(self):
return PyOptionSpec(self)
def __deepcopy__(self, memo):
return self.clone()
|
# Copyright (c) 2018-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import json
import elf
from _elf import _options
class PyOptionMap(_options.OptionMap):
"""Override C++ OptionMap with additional bells and whistles."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def getPyOptionSpec(self):
return elf.options.PyOptionSpec(super().getOptionSpec())
def loadOptionDict(self, option_dict):
return self.loadJSONString(json.dumps(option_dict))
def getOptionDict(self):
return json.loads(self.getJSONString())
def get(self, option_name):
return json.loads(self.getAsJSONString(option_name))
def storeIntoNamespace(self, namespace, option_spec=None):
"""Stores the parameters from a PyOptionMap into a namespace."""
if option_spec is None:
option_spec = self.getPyOptionSpec()
option_names = option_spec.getOptionNames()
for name in option_names:
setattr(namespace, name, self.get(name))
def clone(self):
return PyOptionMap(self)
def __deepcopy__(self, memo):
return self.clone()
|
# Copyright (c) 2018-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import argparse
def import_options(obj, option_map, option_spec, namespace=None):
"""Stores the parameters from a PyOptionMap into ``obj.options``."""
if namespace is None:
setattr(obj, 'options', argparse.Namespace())
namespace = obj.options
if option_spec is None:
option_spec = option_map.getPyOptionSpec()
option_map.storeIntoNamespace(namespace, option_spec)
def auto_import_options(fn):
"""This decorator applies to __init__ methods where the first argument
is a PyOptionMap.
It copies each required argument (as specified by the class's
``get_option_spec()``) from the PyOptionMap into the object namespace
of ``self.options`` (i.e. ``self.options.blah``).
"""
def call(self, option_map, *args, **kwargs):
import_options(self, option_map, self.get_option_spec())
return fn(self, option_map, *args, **kwargs)
return call
|
# Copyright (c) 2018-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from elf.options import auto_import_options, PyOptionSpec
from . import LoggerLevel, set_level
class GlobalLoggingConfigurator(object):
"""Global configurator for logging."""
@classmethod
def get_option_spec(cls):
spec = PyOptionSpec()
spec.addStrOption(
'loglevel',
('Global log level. Choose from '
'trace, debug, info, warning, error, critical, or off)'),
'info')
return spec
@auto_import_options
def __init__(self, option_map):
pass
def configure(self):
loglevel = LoggerLevel.from_str(self.options.loglevel)
assert loglevel != LoggerLevel.invalid
set_level(loglevel)
|
# Copyright (c) 2018-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
# Let's import C++ logging functions/classes as-is.
from _elf._logging import *
from .configuration import GlobalLoggingConfigurator
|
# Copyright (c) 2018-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import inspect
import traceback
from collections import Counter
def move2xy(v):
if v.lower() == "pass":
return -1, -1
x = ord(v[0].lower()) - ord('a')
# Skip 'i'
if x >= 9:
x -= 1
y = int(v[1:]) - 1
return x, y
def xy2move(x, y):
if x == -1 and y == -1:
return "pass"
if x >= 8:
x += 1
return chr(x + 65) + str(y + 1)
def plot_plane(v):
s = ""
for j in range(v.size(1)):
for i in range(v.size(0)):
if v[i, v.size(1) - 1 - j] != 0:
s += "o "
else:
s += ". "
s += "\n"
print(s)
def topk_accuracy2(batch, state_curr, topk=(1,)):
pi = state_curr["pi"]
import torch
if isinstance(pi, torch.autograd.Variable):
pi = pi.data
score, indices = pi.sort(dim=1, descending=True)
maxk = max(topk)
topn_count = [0] * maxk
for ind, gt in zip(indices, batch["offline_a"][0]):
for i in range(maxk):
if ind[i] == gt[0]:
topn_count[i] += 1
for i in range(maxk):
topn_count[i] /= indices.size(0)
return [topn_count[i - 1] for i in topk]
class GoConsole:
def __init__(self, GC, evaluator):
self.exit = False
self.GC = GC
self.board_size = GC.params["board_size"]
self.evaluator = evaluator
self.last_move_idx = None
def move2action(self, v):
if v.lower() == "pass":
return self.board_size ** 2
x, y = move2xy(v)
return x * self.board_size + y
def action2move(self, a):
if a == self.board_size ** 2:
return "pass"
x = a // self.board_size
y = a % self.board_size
return xy2move(x, y)
def check(self, batch):
reply = self.evaluator.actor(batch)
topk = topk_accuracy2(batch, reply, topk=(1, 2, 3, 4, 5))
for i, v in enumerate(topk):
self.check_stats[i] += v
if sum(topk) == 0:
self.check_stats[-1] += 1
def actor(self, batch):
reply = self.evaluator.actor(batch)
return reply
def showboard(self, batch):
print(batch.GC.getGame(0).showBoard())
def prompt(self, prompt_str, batch):
if self.last_move_idx is not None:
curr_move_idx = batch["move_idx"][0][0]
if curr_move_idx - self.last_move_idx == 1:
self.check(batch)
self.last_move_idx = curr_move_idx
return
else:
n = sum(self.check_stats.values())
print("#Move: " + str(n))
accu = 0
for i in range(5):
accu += self.check_stats[i]
print("Top %d: %.3f" % (i, accu / n))
self.last_move_idx = None
self.showboard(batch)
# Ask user to choose
while True:
if getattr(self, "repeat", 0) > 0:
self.repeat -= 1
cmd = self.repeat_cmd
else:
cmd = input(prompt_str)
items = cmd.split()
if len(items) < 1:
print("Invalid input")
c = items[0]
reply = dict(pi=None, a=None, V=0)
try:
if c == 'p':
reply["a"] = self.move2action(items[1])
return reply
elif c == 'c':
reply = self.evaluator.actor(batch)
return reply
elif c == "s":
channel_id = int(items[1])
plot_plane(batch["s"][0][0][channel_id])
elif c == "a":
reply = self.evaluator.actor(batch)
if "pi" in reply:
score, indices = reply["pi"].squeeze().sort(
dim=0, descending=True)
first_n = int(items[1])
for i in range(first_n):
print("%s: %.3f" %
(self.action2move(indices[i]), score[i]))
else:
print("No key \"pi\"")
elif c == "check":
print("Top %d" % self.check(batch))
elif c == 'check2end':
self.check_stats = Counter()
self.check(batch)
self.last_move_idx = batch["move_idx"][0][0]
if len(items) == 2:
self.repeat = int(items[1])
self.repeat_cmd = "check2end_cont"
return
elif c == "check2end_cont":
if not hasattr(self, "check_stats"):
self.check_stats = Counter()
self.check(batch)
self.last_move_idx = batch["move_idx"][0][0]
return
elif c == "aug":
print(batch["aug_code"][0][0])
elif c == "show":
self.showboard(batch)
elif c == "dbg":
import pdb
pdb.set_trace()
elif c == 'offline_a':
if "offline_a" in batch:
for i, offline_a in \
enumerate(batch["offline_a"][0][0]):
print(
"[%d]: %s" %
(i, self.action2move(offline_a)))
else:
print("No offline_a available!")
elif c == "exit":
self.exit = True
return reply
else:
print("Invalid input: " + cmd + ". Please try again")
except Exception as e:
print("Something wrong! " + str(e))
'''
elif c == "u":
batch.GC.undoMove(0)
self.showboard(batch)
elif c == "h":
handicap = int(items[1])
batch.GC.applyHandicap(0, handicap)
self.showboard(batch)
'''
class GoConsoleGTP:
def on_protocol_version(self, batch, items, reply):
return True, "2"
def on_clear_board(self, batch, items, reply):
reply["a"] = self.actions["clear"]
return True, reply
def on_name(self, batch, items, reply):
return True, "DF2"
def on_komi(self, batch, items, reply):
# For now we just fix komi number.
if items[1] != "7.5":
return False, "We only support 7.5 komi for now"
return True, None
def on_boardsize(self, batch, items, reply):
if items[1] != str(self.board_size):
return (
False,
"We only support %dx%d board for now" % (
self.board_size, self.board_size)
)
return True, None
def on_genmove(self, batch, items, reply):
ret, msg = self.check_player(batch, items[1][0])
if ret:
reply["a"] = self.actions["skip"]
return True, reply
else:
return False, msg
def on_play(self, batch, items, reply):
ret, msg = self.check_player(batch, items[1][0])
if ret:
reply["a"] = self.move2action(items[2])
return True, reply
else:
return False, msg
def on_showboard(self, batch, items, reply):
self.showboard(batch)
return True, None
def on_final_score(self, batch, items, reply):
final_score = self.get_final_score(batch)
if final_score > 0:
return True, "B+%.1f" % final_score
else:
return True, "W+%.1f" % (-final_score)
def on_version(self, batch, items, reply):
return True, "1.0"
def on_exit(self, batch, items, reply):
self.exit = True
return True, reply
def on_quit(self, batch, items, reply):
return self.on_exit(batch, items, reply)
def on_list_commands(self, batch, items, reply):
msg = "\n".join(self.commands.keys())
return True, msg
def __init__(self, GC, evaluator):
self.exit = False
self.GC = GC
self.board_size = GC.params["board_size"]
self.evaluator = evaluator
self.actions = {
"skip": GC.params["ACTION_SKIP"],
"pass": GC.params["ACTION_PASS"],
"resign": GC.params["ACTION_RESIGN"],
"clear": GC.params["ACTION_CLEAR"]
}
self.last_cmd = ""
self.commands = {
key[3:]: func
for key, func in inspect.getmembers(
self, predicate=inspect.ismethod)
if key.startswith("on_")
}
def move2action(self, v):
if v.lower() in self.actions:
return self.actions[v.lower()]
x, y = move2xy(v)
return x * self.board_size + y
def actor(self, batch):
reply = self.evaluator.actor(batch)
return reply
def action2move(self, a):
x = a // self.board_size
y = a % self.board_size
return xy2move(x, y)
def showboard(self, batch):
print(batch.GC.getGame(0).showBoard())
def get_next_player(self, batch):
return batch.GC.getGame(0).getNextPlayer()
def get_last_move(self, batch):
return batch.GC.getGame(0).getLastMove()
def get_final_score(self, batch):
return batch.GC.getGame(0).getLastScore()
def check_player(self, batch, player):
board_next_player = self.get_next_player(batch)
if player.lower() != board_next_player.lower():
return (
False,
("Specified next player %s is not the same as the "
"next player %s on the board") % (
player, board_next_player
)
)
else:
return True, None
def print_msg(self, ret, msg):
print("\n%s %s\n\n" % (("=" if ret else "?"), msg))
def prompt(self, prompt_str, batch):
# Show last command results.
if self.last_cmd == "play" or self.last_cmd == "clear_board":
self.print_msg(True, "")
elif self.last_cmd == "genmove":
self.print_msg(True, self.get_last_move(batch))
self.last_cmd = ""
while True:
cmd = input(prompt_str)
items = cmd.split()
if len(items) < 1:
self.print_msg(False, "Invalid input")
continue
c = items[0]
reply = dict(pi=None, a=None, V=0)
try:
ret, msg = self.commands[c](batch, items, reply)
self.last_cmd = c
if not ret:
self.print_msg(False, msg)
else:
if isinstance(msg, dict):
return msg
elif isinstance(msg, str):
self.print_msg(True, msg)
else:
self.print_msg(True, "")
except Exception:
print(traceback.format_exc())
self.print_msg(False, "Invalid command")
|
#!/usr/bin/env python
# Copyright (c) 2018-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import os
import sys
import torch
from console_lib import GoConsoleGTP
from rlpytorch import Evaluator, load_env
def main():
print('Python version:', sys.version)
print('PyTorch version:', torch.__version__)
print('CUDA version', torch.version.cuda)
print('Conda env:', os.environ.get("CONDA_DEFAULT_ENV", ""))
additional_to_load = {
'evaluator': (
Evaluator.get_option_spec(),
lambda object_map: Evaluator(object_map, stats=None)),
}
# Set game to online model.
env = load_env(
os.environ,
overrides={
'num_games': 1,
'greedy': True,
'T': 1,
'model': 'online',
'additional_labels': ['aug_code', 'move_idx'],
},
additional_to_load=additional_to_load)
evaluator = env['evaluator']
GC = env["game"].initialize()
model_loader = env["model_loaders"][0]
model = model_loader.load_model(GC.params)
mi = env['mi']
mi.add_model("model", model)
mi.add_model("actor", model)
mi["model"].eval()
mi["actor"].eval()
console = GoConsoleGTP(GC, evaluator)
def human_actor(batch):
return console.prompt("", batch)
def actor(batch):
return console.actor(batch)
def train(batch):
console.prompt("DF Train> ", batch)
evaluator.setup(sampler=env["sampler"], mi=mi)
GC.reg_callback_if_exists("actor_black", actor)
GC.reg_callback_if_exists("human_actor", human_actor)
GC.reg_callback_if_exists("train", train)
GC.start()
GC.GC.getClient().setRequest(
mi["actor"].step, -1, env['game'].options.resign_thres, -1)
evaluator.episode_start(0)
while True:
GC.run()
if console.exit:
break
GC.stop()
if __name__ == '__main__':
main()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2018-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import os
import sys
import time
import re
from datetime import datetime
import torch
from rlpytorch import \
Evaluator, load_env, ModelInterface
class Stats(object):
def __init__(self):
self.total_batchsize = 0
self.total_sel_batchsize = 0
self.actor_count = 0
def feed(self, batch):
self.total_sel_batchsize += batch.batchsize
self.total_batchsize += batch.max_batchsize
self.actor_count += 1
if self.total_sel_batchsize >= 500000:
print(datetime.now())
batch_usage = self.total_sel_batchsize / self.total_batchsize
print(f'Batch usage: '
f'{self.total_sel_batchsize}/{self.total_batchsize} '
f'({100.0 * batch_usage:.2f}%)')
wr = batch.GC.getClient().getGameStats().getWinRateStats()
win_rate = (100.0 * wr.black_wins / wr.total_games
if wr.total_games > 0
else 0.0)
print(f'B/W: {wr.black_wins}/{wr.white_wins}. '
f'Black winrate: {win_rate:.2f} {wr.total_games}')
self.total_sel_batchsize = 0
self.total_batchsize = 0
print('Actor count:', self.actor_count)
name_matcher = re.compile(r"save-(\d+)")
def extract_ver(model_loader):
name = os.path.basename(model_loader.options.load)
m = name_matcher.match(name)
return int(m.group(1))
def reload_model(model_loader, params, mi, actor_name, args):
model = model_loader.load_model(params)
if actor_name not in mi:
mi.add_model(actor_name, model, cuda=(args.gpu >= 0), gpu_id=args.gpu)
else:
mi.update_model(actor_name, model)
mi[actor_name].eval()
def reload(mi, model_loader, params, args, root, ver, actor_name):
if model_loader.options.load is None or model_loader.options.load == "":
print('No previous model loaded, loading from', root)
real_path = os.path.join(root, "save-" + str(ver) + ".bin")
else:
this_root = os.path.dirname(model_loader.options.load)
real_path = os.path.join(this_root, "save-" + str(ver) + ".bin")
if model_loader.options.load != real_path:
model_loader.options.load = real_path
reload_model(model_loader, params, mi, actor_name, args)
else:
print('Warning! Same model, skip loading', real_path)
def main():
print('Python version:', sys.version)
print('PyTorch version:', torch.__version__)
print('CUDA version', torch.version.cuda)
print('Conda env:', os.environ.get("CONDA_DEFAULT_ENV", ""))
# Set game to online model.
actors = ["actor_black", "actor_white"]
additional_to_load = {
("eval_" + actor_name): (
Evaluator.get_option_spec(name="eval_" + actor_name),
lambda object_map, actor_name=actor_name: Evaluator(
object_map, name="eval_" + actor_name,
actor_name=actor_name, stats=None)
)
for i, actor_name in enumerate(actors)
}
additional_to_load.update({
("mi_" + name): (ModelInterface.get_option_spec(), ModelInterface)
for name in actors
})
env = load_env(
os.environ, num_models=2, overrides={'actor_only': True},
additional_to_load=additional_to_load)
GC = env["game"].initialize()
stats = [Stats(), Stats()]
for i in range(len(actors)):
actor_name = actors[i]
stat = stats[i]
e = env["eval_" + actor_name]
print(f'register {actor_name} for e = {e!s}')
e.setup(sampler=env["sampler"], mi=env["mi_" + actor_name])
def actor(batch, e, stat):
reply = e.actor(batch)
stat.feed(batch)
return reply
GC.reg_callback(actor_name,
lambda batch, e=e, stat=stat: actor(batch, e, stat))
root = os.environ.get("root", "./")
print(f'Root: "{root}"')
args = env["game"].options
loop_end = False
def game_start(batch):
print("In game start")
vers = [int(batch["black_ver"][0]), int(batch["white_ver"][0])]
# Use the version number to load models.
for model_loader, ver, actor_name in zip(
env["model_loaders"], vers, actors):
if ver >= 0:
while True:
try:
reload(
env["mi_" + actor_name], model_loader, GC.params,
args, root, ver, actor_name)
break
except BaseException:
import traceback
traceback.print_exc()
time.sleep(10)
def game_end(batch):
nonlocal loop_end
wr = batch.GC.getClient().getGameStats().getWinRateStats()
win_rate = (100.0 * wr.black_wins / wr.total_games
if wr.total_games > 0 else 0.0)
print(f'{datetime.now()!s} B/W: {wr.black_wins}/{wr.white_wins}.'
f'Black winrate: {win_rate:.2f} ({wr.total_games})')
if args.suicide_after_n_games > 0 and \
wr.total_games >= args.suicide_after_n_games:
print(f'#suicide_after_n_games: {args.suicide_after_n_games}, '
f'total_games: {wr.total_games}')
loop_end = True
GC.reg_callback_if_exists("game_start", game_start)
GC.reg_callback_if_exists("game_end", game_end)
GC.start()
if args.eval_model_pair:
if args.eval_model_pair.find(",") >= 0:
black, white = args.eval_model_pair.split(",")
else:
black = extract_ver(env["model_loaders"][0])
white = extract_ver(env["model_loaders"][1])
# Force them to reload in the future.
for model_loader, actor_name in zip(env["model_loaders"], actors):
reload_model(model_loader, GC.params,
env["mi_" + actor_name], actor_name, args)
# We just use one thread to do selfplay.
GC.GC.getClient().setRequest(
int(black), int(white), env['game'].options.resign_thres, 1)
for actor_name in actors:
env["eval_" + actor_name].episode_start(0)
while not loop_end:
GC.run()
GC.stop()
if __name__ == '__main__':
main()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2018-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import os
import sys
import re
import time
import torch
from rlpytorch import load_env, SingleProcessRun, Trainer
matcher = re.compile(r"save-(\d+).bin")
def main():
print('Python version:', sys.version)
print('PyTorch version:', torch.__version__)
print('CUDA version', torch.version.cuda)
print('Conda env:', os.environ.get("CONDA_DEFAULT_ENV", ""))
additional_to_load = {
'trainer': (
Trainer.get_option_spec(),
lambda option_map: Trainer(option_map)),
'runner': (
SingleProcessRun.get_option_spec(),
lambda option_map: SingleProcessRun(option_map)),
}
env = load_env(os.environ, additional_to_load=additional_to_load)
trainer = env['trainer']
runner = env['runner']
GC = env["game"].initialize()
model_loader = env["model_loaders"][0]
model = model_loader.load_model(GC.params)
env["mi"].add_model("model", model, opt=True)
keep_prev_selfplay = env["game"].options.keep_prev_selfplay
model_ver = 0
model_filename = model_loader.options.load
if isinstance(model_filename, str) and model_filename != "":
realpath = os.path.realpath(model_filename)
m = matcher.match(os.path.basename(realpath))
if m:
model_ver = int(m.group(1))
eval_old_model = env["game"].options.eval_old_model
if eval_old_model >= 0:
GC.GC.getServer().setEvalMode(model_ver, eval_old_model)
else:
GC.GC.getServer().setInitialVersion(model_ver)
selfplay_ver = model_ver
root = os.environ["save"]
print(f'Root: "{root}"')
print(f'Keep prev_selfplay: {keep_prev_selfplay!s}')
def train(batch, *args, **kwargs):
# Check whether the version match.
if keep_prev_selfplay or \
(batch["selfplay_ver"] != selfplay_ver).sum() == 0:
trainer.train(batch, *args, **kwargs)
else:
print(f'Get batch whose selfplay ver is different from '
f'{selfplay_ver}, skipping')
runner.inc_episode_counter(-1)
def train_ctrl(batch, *args, **kwargs):
nonlocal selfplay_ver
old_selfplay_ver = selfplay_ver
selfplay_ver = int(batch["selfplay_ver"][0])
print(
f'Train ctrl: selfplay_ver: {old_selfplay_ver} -> {selfplay_ver}')
GC.GC.getServer().waitForSufficientSelfplay(selfplay_ver)
# Reload old models.
real_path = os.path.join(root, "save-" + str(selfplay_ver) + ".bin")
model_loader.options.load = real_path
while True:
try:
model = model_loader.load_model(GC.params)
break
except BaseException:
time.sleep(10)
env["mi"].remove_model("model")
env["mi"].add_model("model", model, opt=True)
trainer.episode_reset()
runner.set_episode_counter(-1)
GC.reg_callback("train", train)
GC.reg_callback("train_ctrl", train_ctrl)
if GC.reg_has_callback("actor"):
args = env["game"].options
env["mi"].add_model(
"actor",
model,
copy=True,
cuda=(args.gpu >= 0),
gpu_id=args.gpu)
GC.reg_callback("actor", trainer.actor)
trainer.setup(
sampler=env["sampler"],
mi=env["mi"],
rl_method=env["method"])
def episode_summary(i):
nonlocal selfplay_ver
ver = trainer.episode_summary(i)
# This might block (when evaluation does not catch up with training).
GC.GC.getServer().notifyNewVersion(selfplay_ver, ver)
offline_training = (env["game"].options.mode == "offline_train")
def after_start():
nonlocal selfplay_ver
if not offline_training:
print("About to wait for sufficient selfplay")
GC.GC.getServer().waitForSufficientSelfplay(selfplay_ver)
runner.setup(GC, after_start=after_start,
episode_summary=episode_summary,
episode_start=trainer.episode_start)
runner.run()
if __name__ == '__main__':
main()
|
addrs = {
"myserver": "[XXX.XXX.XXX.XXX]",
}
|
import os
import re
import sys
from pathlib import Path
import argparse
import torch
import platform
import importlib
import subprocess
import torch._dynamo
import torch.nn as nn
import torch.nn.functional as F
gpu_arch_ver = os.getenv("MATRIX_GPU_ARCH_VERSION")
gpu_arch_type = os.getenv("MATRIX_GPU_ARCH_TYPE")
channel = os.getenv("MATRIX_CHANNEL")
stable_version = os.getenv("MATRIX_STABLE_VERSION")
package_type = os.getenv("MATRIX_PACKAGE_TYPE")
target_os = os.getenv("TARGET_OS")
is_cuda_system = gpu_arch_type == "cuda"
NIGHTLY_ALLOWED_DELTA = 3
MODULES = [
{
"name": "torchvision",
"repo": "https://github.com/pytorch/vision.git",
"smoke_test": "./vision/test/smoke_test.py",
"extension": "extension",
"repo_name": "vision",
},
{
"name": "torchaudio",
"repo": "https://github.com/pytorch/audio.git",
"smoke_test": "./audio/test/smoke_test/smoke_test.py --no-ffmpeg",
"extension": "_extension",
"repo_name": "audio",
},
]
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 32, 3, 1)
self.conv2 = nn.Conv2d(32, 64, 3, 1)
self.fc1 = nn.Linear(9216, 1)
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
x = F.max_pool2d(x, 2)
x = torch.flatten(x, 1)
output = self.fc1(x)
return output
def check_version(package: str) -> None:
# only makes sense to check nightly package where dates are known
if channel == "nightly":
check_nightly_binaries_date(package)
else:
if not torch.__version__.startswith(stable_version):
raise RuntimeError(
f"Torch version mismatch, expected {stable_version} for channel {channel}. But its {torch.__version__}"
)
def check_nightly_binaries_date(package: str) -> None:
from datetime import datetime, timedelta
format_dt = '%Y%m%d'
torch_str = torch.__version__
date_t_str = re.findall("dev\d+", torch.__version__)
date_t_delta = datetime.now() - datetime.strptime(date_t_str[0][3:], format_dt)
if date_t_delta.days >= NIGHTLY_ALLOWED_DELTA:
raise RuntimeError(
f"the binaries are from {date_t_str} and are more than {NIGHTLY_ALLOWED_DELTA} days old!"
)
if(package == "all"):
for module in MODULES:
imported_module = importlib.import_module(module["name"])
module_version = imported_module.__version__
date_m_str = re.findall("dev\d+", module_version)
date_m_delta = datetime.now() - datetime.strptime(date_m_str[0][3:], format_dt)
print(f"Nightly date check for {module['name']} version {module_version}")
if date_m_delta.days > NIGHTLY_ALLOWED_DELTA:
raise RuntimeError(
f"Expected {module['name']} to be less then {NIGHTLY_ALLOWED_DELTA} days. But its {date_m_delta}"
)
def test_cuda_runtime_errors_captured() -> None:
cuda_exception_missed=True
try:
print("Testing test_cuda_runtime_errors_captured")
torch._assert_async(torch.tensor(0, device="cuda"))
torch._assert_async(torch.tensor(0 + 0j, device="cuda"))
except RuntimeError as e:
if re.search("CUDA", f"{e}"):
print(f"Caught CUDA exception with success: {e}")
cuda_exception_missed = False
else:
raise e
if(cuda_exception_missed):
raise RuntimeError( f"Expected CUDA RuntimeError but have not received!")
def smoke_test_cuda(package: str, runtime_error_check: str) -> None:
if not torch.cuda.is_available() and is_cuda_system:
raise RuntimeError(f"Expected CUDA {gpu_arch_ver}. However CUDA is not loaded.")
if(package == 'all' and is_cuda_system):
for module in MODULES:
imported_module = importlib.import_module(module["name"])
# TBD for vision move extension module to private so it will
# be _extention.
version = "N/A"
if module["extension"] == "extension":
version = imported_module.extension._check_cuda_version()
else:
version = imported_module._extension._check_cuda_version()
print(f"{module['name']} CUDA: {version}")
if torch.cuda.is_available():
if torch.version.cuda != gpu_arch_ver:
raise RuntimeError(
f"Wrong CUDA version. Loaded: {torch.version.cuda} Expected: {gpu_arch_ver}"
)
print(f"torch cuda: {torch.version.cuda}")
# todo add cudnn version validation
print(f"torch cudnn: {torch.backends.cudnn.version()}")
print(f"cuDNN enabled? {torch.backends.cudnn.enabled}")
# torch.compile is available only on Linux and python 3.8-3.10
if (sys.platform == "linux" or sys.platform == "linux2") and sys.version_info < (3, 11, 0) and channel == "release":
smoke_test_compile()
elif (sys.platform == "linux" or sys.platform == "linux2") and channel != "release":
smoke_test_compile()
if(runtime_error_check == "enabled"):
test_cuda_runtime_errors_captured()
def smoke_test_conv2d() -> None:
import torch.nn as nn
print("Testing smoke_test_conv2d")
# With square kernels and equal stride
m = nn.Conv2d(16, 33, 3, stride=2)
# non-square kernels and unequal stride and with padding
m = nn.Conv2d(16, 33, (3, 5), stride=(2, 1), padding=(4, 2))
# non-square kernels and unequal stride and with padding and dilation
basic_conv = nn.Conv2d(16, 33, (3, 5), stride=(2, 1), padding=(4, 2), dilation=(3, 1))
input = torch.randn(20, 16, 50, 100)
output = basic_conv(input)
if is_cuda_system:
print("Testing smoke_test_conv2d with cuda")
conv = nn.Conv2d(3, 3, 3).cuda()
x = torch.randn(1, 3, 24, 24).cuda()
with torch.cuda.amp.autocast():
out = conv(x)
supported_dtypes = [torch.float16, torch.float32, torch.float64]
for dtype in supported_dtypes:
print(f"Testing smoke_test_conv2d with cuda for {dtype}")
conv = basic_conv.to(dtype).cuda()
input = torch.randn(20, 16, 50, 100, device="cuda").type(dtype)
output = conv(input)
def smoke_test_linalg() -> None:
print("Testing smoke_test_linalg")
A = torch.randn(5, 3)
U, S, Vh = torch.linalg.svd(A, full_matrices=False)
U.shape, S.shape, Vh.shape
torch.dist(A, U @ torch.diag(S) @ Vh)
U, S, Vh = torch.linalg.svd(A)
U.shape, S.shape, Vh.shape
torch.dist(A, U[:, :3] @ torch.diag(S) @ Vh)
A = torch.randn(7, 5, 3)
U, S, Vh = torch.linalg.svd(A, full_matrices=False)
torch.dist(A, U @ torch.diag_embed(S) @ Vh)
if is_cuda_system:
supported_dtypes = [torch.float32, torch.float64]
for dtype in supported_dtypes:
print(f"Testing smoke_test_linalg with cuda for {dtype}")
A = torch.randn(20, 16, 50, 100, device="cuda").type(dtype)
torch.linalg.svd(A)
def smoke_test_compile() -> None:
supported_dtypes = [torch.float16, torch.float32, torch.float64]
def foo(x: torch.Tensor) -> torch.Tensor:
return torch.sin(x) + torch.cos(x)
for dtype in supported_dtypes:
print(f"Testing smoke_test_compile for {dtype}")
x = torch.rand(3, 3, device="cuda").type(dtype)
x_eager = foo(x)
x_pt2 = torch.compile(foo)(x)
print(torch.allclose(x_eager, x_pt2))
# Reset torch dynamo since we are changing mode
torch._dynamo.reset()
dtype = torch.float32
torch.set_float32_matmul_precision('high')
print(f"Testing smoke_test_compile with mode 'max-autotune' for {dtype}")
x = torch.rand(64, 1, 28, 28, device="cuda").type(torch.float32)
model = Net().to(device="cuda")
x_pt2 = torch.compile(model, mode="max-autotune")(x)
def smoke_test_modules():
cwd = os.getcwd()
for module in MODULES:
if module["repo"]:
if not os.path.exists(f"{cwd}/{module['repo_name']}"):
print(f"Path does not exist: {cwd}/{module['repo_name']}")
subprocess.check_output(f"git clone --depth 1 {module['repo']}", stderr=subprocess.STDOUT, shell=True)
try:
smoke_test_command = f"python3 {module['smoke_test']}"
if target_os == 'windows':
smoke_test_command = f"python {module['smoke_test']}"
output = subprocess.check_output(
smoke_test_command, stderr=subprocess.STDOUT, shell=True,
universal_newlines=True)
except subprocess.CalledProcessError as exc:
raise RuntimeError(
f"Module {module['name']} FAIL: {exc.returncode} Output: {exc.output}"
)
else:
print("Output: \n{}\n".format(output))
def main() -> None:
parser = argparse.ArgumentParser()
parser.add_argument(
"--package",
help="Package to include in smoke testing",
type=str,
choices=["all", "torchonly"],
default="all",
)
parser.add_argument(
"--runtime-error-check",
help="No Runtime Error check",
type=str,
choices=["enabled", "disabled"],
default="enabled",
)
options = parser.parse_args()
print(f"torch: {torch.__version__}")
check_version(options.package)
smoke_test_conv2d()
smoke_test_linalg()
if options.package == "all":
smoke_test_modules()
smoke_test_cuda(options.package, options.runtime_error_check)
if __name__ == "__main__":
main()
|
r"""
It's used to check basic rnn features with cuda.
For example, it would throw exception if some components are missing
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
class SimpleCNN(nn.Module):
def __init__(self):
super().__init__()
self.conv = nn.Conv2d(1, 1, 3)
self.pool = nn.MaxPool2d(2, 2)
def forward(self, inputs):
output = self.pool(F.relu(self.conv(inputs)))
output = output.view(1)
return output
# Mock one infer
device = torch.device("cuda:0")
net = SimpleCNN().to(device)
net_inputs = torch.rand((1, 1, 5, 5), device=device)
outputs = net(net_inputs)
print(outputs)
criterion = nn.MSELoss()
optimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.1)
# Mock one step training
label = torch.full((1,), 1.0, dtype=torch.float, device=device)
loss = criterion(outputs, label)
loss.backward()
optimizer.step()
|
r"""
It's used to check basic rnn features with cuda.
For example, it would throw exception if missing some components are missing
"""
import torch
import torch.nn as nn
device = torch.device("cuda:0")
rnn = nn.RNN(10, 20, 2).to(device)
inputs = torch.randn(5, 3, 10).to(device)
h0 = torch.randn(2, 3, 20).to(device)
output, hn = rnn(inputs, h0)
|
# Logic copied from PEP 513
def is_manylinux1_compatible():
# Only Linux, and only x86-64 / i686
from distutils.util import get_platform
if get_platform() not in ["linux-x86_64", "linux-i686"]:
return False
# Check for presence of _manylinux module
try:
import _manylinux
return bool(_manylinux.manylinux1_compatible)
except (ImportError, AttributeError):
# Fall through to heuristic check below
pass
# Check glibc version. CentOS 5 uses glibc 2.5.
return have_compatible_glibc(2, 5)
def have_compatible_glibc(major, minimum_minor):
import ctypes
process_namespace = ctypes.CDLL(None)
try:
gnu_get_libc_version = process_namespace.gnu_get_libc_version
except AttributeError:
# Symbol doesn't exist -> therefore, we are not linked to
# glibc.
return False
# Call gnu_get_libc_version, which returns a string like "2.5".
gnu_get_libc_version.restype = ctypes.c_char_p
version_str = gnu_get_libc_version()
# py2 / py3 compatibility:
if not isinstance(version_str, str):
version_str = version_str.decode("ascii")
# Parse string and check against requested version.
version = [int(piece) for piece in version_str.split(".")]
assert len(version) == 2
if major != version[0]:
return False
if minimum_minor > version[1]:
return False
return True
import sys
if is_manylinux1_compatible():
print("%s is manylinux1 compatible" % (sys.executable,))
sys.exit(0)
else:
print("%s is NOT manylinux1 compatible" % (sys.executable,))
sys.exit(1)
|
# cf. https://github.com/pypa/manylinux/issues/53
GOOD_SSL = "https://google.com"
BAD_SSL = "https://self-signed.badssl.com"
import sys
print("Testing SSL certificate checking for Python:", sys.version)
if (sys.version_info[:2] < (2, 7)
or sys.version_info[:2] < (3, 4)):
print("This version never checks SSL certs; skipping tests")
sys.exit(0)
if sys.version_info[0] >= 3:
from urllib.request import urlopen
EXC = OSError
else:
from urllib import urlopen
EXC = IOError
print("Connecting to %s should work" % (GOOD_SSL,))
urlopen(GOOD_SSL)
print("...it did, yay.")
print("Connecting to %s should fail" % (BAD_SSL,))
try:
urlopen(BAD_SSL)
# If we get here then we failed:
print("...it DIDN'T!!!!!11!!1one!")
sys.exit(1)
except EXC:
print("...it did, yay.") |
#!/usr/bin/env python
import argparse
import time
from os import path, makedirs
from datetime import datetime
from collections import defaultdict
from typing import Iterator, List, Type, Dict, Set, TypeVar, Optional
from re import sub, match, search
from packaging.version import parse
import boto3
S3 = boto3.resource('s3')
CLIENT = boto3.client('s3')
BUCKET = S3.Bucket('pytorch')
ACCEPTED_FILE_EXTENSIONS = ("whl", "zip", "tar.gz")
ACCEPTED_SUBDIR_PATTERNS = [
r"cu[0-9]+", # for cuda
r"rocm[0-9]+\.[0-9]+", # for rocm
"cpu",
]
PREFIXES_WITH_HTML = {
"whl": "torch_stable.html",
"whl/lts/1.8": "torch_lts.html",
"whl/nightly": "torch_nightly.html",
"whl/test": "torch_test.html",
}
# NOTE: This refers to the name on the wheels themselves and not the name of
# package as specified by setuptools, for packages with "-" (hyphens) in their
# names you need to convert them to "_" (underscores) in order for them to be
# allowed here since the name of the wheels is compared here
PACKAGE_ALLOW_LIST = {
"Pillow",
"certifi",
"charset_normalizer",
"cmake",
"colorama",
"fbgemm_gpu",
"filelock",
"fsspec",
"idna",
"Jinja2",
"lit",
"MarkupSafe",
"mpmath",
"nestedtensor",
"networkx",
"numpy",
"nvidia_cublas_cu11",
"nvidia_cuda_cupti_cu11",
"nvidia_cuda_nvrtc_cu11",
"nvidia_cuda_runtime_cu11",
"nvidia_cudnn_cu11",
"nvidia_cufft_cu11",
"nvidia_curand_cu11",
"nvidia_cusolver_cu11",
"nvidia_cusparse_cu11",
"nvidia_nccl_cu11",
"nvidia_nvtx_cu11",
"nvidia_cublas_cu12",
"nvidia_cuda_cupti_cu12",
"nvidia_cuda_nvrtc_cu12",
"nvidia_cuda_runtime_cu12",
"nvidia_cudnn_cu12",
"nvidia_cufft_cu12",
"nvidia_curand_cu12",
"nvidia_cusolver_cu12",
"nvidia_cusparse_cu12",
"nvidia_nccl_cu12",
"nvidia_nvtx_cu12",
"nvidia_nvjitlink_cu12",
"packaging",
"portalocker",
"pytorch_triton",
"pytorch_triton_rocm",
"requests",
"sympy",
"torch",
"torch_tensorrt",
"torcharrow",
"torchaudio",
"torchcsprng",
"torchdata",
"torchdistx",
"torchmetrics",
"torchrec",
"torchtext",
"torchvision",
"triton",
"tqdm",
"typing_extensions",
"urllib3",
}
# Should match torch-2.0.0.dev20221221+cu118-cp310-cp310-linux_x86_64.whl as:
# Group 1: torch-2.0.0.dev
# Group 2: 20221221
PACKAGE_DATE_REGEX = r"([a-zA-z]*-[0-9.]*.dev)([0-9]*)"
# How many packages should we keep of a specific package?
KEEP_THRESHOLD = 60
S3IndexType = TypeVar('S3IndexType', bound='S3Index')
def extract_package_build_time(full_package_name: str) -> datetime:
result = search(PACKAGE_DATE_REGEX, full_package_name)
if result is not None:
try:
return datetime.strptime(result.group(2), "%Y%m%d")
except ValueError:
# Ignore any value errors since they probably shouldn't be hidden anyways
pass
return datetime.now()
def between_bad_dates(package_build_time: datetime):
start_bad = datetime(year=2022, month=8, day=17)
end_bad = datetime(year=2022, month=12, day=30)
return start_bad <= package_build_time <= end_bad
class S3Index:
def __init__(self: S3IndexType, objects: List[str], prefix: str) -> None:
self.objects = objects
self.prefix = prefix.rstrip("/")
self.html_name = PREFIXES_WITH_HTML[self.prefix]
# should dynamically grab subdirectories like whl/test/cu101
# so we don't need to add them manually anymore
self.subdirs = {
path.dirname(obj) for obj in objects if path.dirname != prefix
}
def nightly_packages_to_show(self: S3IndexType) -> Set[str]:
"""Finding packages to show based on a threshold we specify
Basically takes our S3 packages, normalizes the version for easier
comparisons, then iterates over normalized versions until we reach a
threshold and then starts adding package to delete after that threshold
has been reached
After figuring out what versions we'd like to hide we iterate over
our original object list again and pick out the full paths to the
packages that are included in the list of versions to delete
"""
# also includes versions without GPU specifier (i.e. cu102) for easier
# sorting, sorts in reverse to put the most recent versions first
all_sorted_packages = sorted(
{self.normalize_package_version(obj) for obj in self.objects},
key=lambda name_ver: parse(name_ver.split('-', 1)[-1]),
reverse=True,
)
packages: Dict[str, int] = defaultdict(int)
to_hide: Set[str] = set()
for obj in all_sorted_packages:
full_package_name = path.basename(obj)
package_name = full_package_name.split('-')[0]
package_build_time = extract_package_build_time(full_package_name)
# Hard pass on packages that are included in our allow list
if package_name not in PACKAGE_ALLOW_LIST:
to_hide.add(obj)
continue
if packages[package_name] >= KEEP_THRESHOLD:
to_hide.add(obj)
elif between_bad_dates(package_build_time):
to_hide.add(obj)
else:
packages[package_name] += 1
return set(self.objects).difference({
obj for obj in self.objects
if self.normalize_package_version(obj) in to_hide
})
def is_obj_at_root(self, obj:str) -> bool:
return path.dirname(obj) == self.prefix
def _resolve_subdir(self, subdir: Optional[str] = None) -> str:
if not subdir:
subdir = self.prefix
# make sure we strip any trailing slashes
return subdir.rstrip("/")
def gen_file_list(
self,
subdir: Optional[str]=None,
package_name: Optional[str] = None
) -> Iterator[str]:
objects = (
self.nightly_packages_to_show() if self.prefix == 'whl/nightly'
else self.objects
)
subdir = self._resolve_subdir(subdir) + '/'
for obj in objects:
if package_name is not None:
if self.obj_to_package_name(obj) != package_name:
continue
if self.is_obj_at_root(obj) or obj.startswith(subdir):
yield obj
def get_package_names(self, subdir: Optional[str] = None) -> List[str]:
return sorted(set(self.obj_to_package_name(obj) for obj in self.gen_file_list(subdir)))
def normalize_package_version(self: S3IndexType, obj: str) -> str:
# removes the GPU specifier from the package name as well as
# unnecessary things like the file extension, architecture name, etc.
return sub(
r"%2B.*",
"",
"-".join(path.basename(obj).split("-")[:2])
)
def obj_to_package_name(self, obj: str) -> str:
return path.basename(obj).split('-', 1)[0]
def to_legacy_html(
self,
subdir: Optional[str]=None
) -> str:
"""Generates a string that can be used as the HTML index
Takes our objects and transforms them into HTML that have historically
been used by pip for installing pytorch.
NOTE: These are not PEP 503 compliant but are here for legacy purposes
"""
out: List[str] = []
subdir = self._resolve_subdir(subdir)
is_root = subdir == self.prefix
for obj in self.gen_file_list(subdir):
# Strip our prefix
sanitized_obj = obj.replace(subdir, "", 1)
if sanitized_obj.startswith('/'):
sanitized_obj = sanitized_obj.lstrip("/")
# we include objects at our root prefix so that users can still
# install packages like torchaudio / torchtext even if they want
# to install a specific GPU arch of torch / torchvision
if not is_root and self.is_obj_at_root(obj):
# strip root prefix
sanitized_obj = obj.replace(self.prefix, "", 1).lstrip("/")
sanitized_obj = f"../{sanitized_obj}"
out.append(f'<a href="{sanitized_obj}">{sanitized_obj}</a><br/>')
return "\n".join(sorted(out))
def to_simple_package_html(
self,
subdir: Optional[str],
package_name: str
) -> str:
"""Generates a string that can be used as the package simple HTML index
"""
out: List[str] = []
# Adding html header
out.append('<!DOCTYPE html>')
out.append('<html>')
out.append(' <body>')
out.append(' <h1>Links for {}</h1>'.format(package_name.lower().replace("_","-")))
for obj in sorted(self.gen_file_list(subdir, package_name)):
out.append(f' <a href="/{obj}">{path.basename(obj).replace("%2B","+")}</a><br/>')
# Adding html footer
out.append(' </body>')
out.append('</html>')
out.append('<!--TIMESTAMP {}-->'.format(int(time.time())))
return '\n'.join(out)
def to_simple_packages_html(
self,
subdir: Optional[str],
) -> str:
"""Generates a string that can be used as the simple HTML index
"""
out: List[str] = []
# Adding html header
out.append('<!DOCTYPE html>')
out.append('<html>')
out.append(' <body>')
for pkg_name in sorted(self.get_package_names(subdir)):
out.append(f' <a href="{pkg_name.replace("_","-")}/">{pkg_name.replace("_","-")}</a><br/>')
# Adding html footer
out.append(' </body>')
out.append('</html>')
out.append('<!--TIMESTAMP {}-->'.format(int(time.time())))
return '\n'.join(out)
def upload_legacy_html(self) -> None:
for subdir in self.subdirs:
print(f"INFO Uploading {subdir}/{self.html_name}")
BUCKET.Object(
key=f"{subdir}/{self.html_name}"
).put(
ACL='public-read',
CacheControl='no-cache,no-store,must-revalidate',
ContentType='text/html',
Body=self.to_legacy_html(subdir=subdir)
)
def upload_pep503_htmls(self) -> None:
for subdir in self.subdirs:
print(f"INFO Uploading {subdir}/index.html")
BUCKET.Object(
key=f"{subdir}/index.html"
).put(
ACL='public-read',
CacheControl='no-cache,no-store,must-revalidate',
ContentType='text/html',
Body=self.to_simple_packages_html(subdir=subdir)
)
for pkg_name in self.get_package_names(subdir=subdir):
compat_pkg_name = pkg_name.lower().replace("_", "-")
print(f"INFO Uploading {subdir}/{compat_pkg_name}/index.html")
BUCKET.Object(
key=f"{subdir}/{compat_pkg_name}/index.html"
).put(
ACL='public-read',
CacheControl='no-cache,no-store,must-revalidate',
ContentType='text/html',
Body=self.to_simple_package_html(subdir=subdir, package_name=pkg_name)
)
def save_legacy_html(self) -> None:
for subdir in self.subdirs:
print(f"INFO Saving {subdir}/{self.html_name}")
makedirs(subdir, exist_ok=True)
with open(path.join(subdir, self.html_name), mode="w", encoding="utf-8") as f:
f.write(self.to_legacy_html(subdir=subdir))
def save_pep503_htmls(self) -> None:
for subdir in self.subdirs:
print(f"INFO Saving {subdir}/index.html")
makedirs(subdir, exist_ok=True)
with open(path.join(subdir, "index.html"), mode="w", encoding="utf-8") as f:
f.write(self.to_simple_packages_html(subdir=subdir))
for pkg_name in self.get_package_names(subdir=subdir):
makedirs(path.join(subdir, pkg_name), exist_ok=True)
with open(path.join(subdir, pkg_name, "index.html"), mode="w", encoding="utf-8") as f:
f.write(self.to_simple_package_html(subdir=subdir, package_name=pkg_name))
@classmethod
def from_S3(cls: Type[S3IndexType], prefix: str) -> S3IndexType:
objects = []
prefix = prefix.rstrip("/")
for obj in BUCKET.objects.filter(Prefix=prefix):
is_acceptable = any([path.dirname(obj.key) == prefix] + [
match(
f"{prefix}/{pattern}",
path.dirname(obj.key)
)
for pattern in ACCEPTED_SUBDIR_PATTERNS
]) and obj.key.endswith(ACCEPTED_FILE_EXTENSIONS)
if is_acceptable:
sanitized_key = obj.key.replace("+", "%2B")
objects.append(sanitized_key)
return cls(objects, prefix)
def create_parser() -> argparse.ArgumentParser:
parser = argparse.ArgumentParser("Manage S3 HTML indices for PyTorch")
parser.add_argument(
"prefix",
type=str,
choices=list(PREFIXES_WITH_HTML.keys()) + ["all"]
)
parser.add_argument("--do-not-upload", action="store_true")
parser.add_argument("--generate-pep503", action="store_true")
return parser
def main():
parser = create_parser()
args = parser.parse_args()
action = "Saving" if args.do_not_upload else "Uploading"
if args.prefix == 'all':
for prefix in PREFIXES_WITH_HTML.keys():
print(f"INFO: {action} indices for '{prefix}'")
idx = S3Index.from_S3(prefix=prefix)
if args.do_not_upload:
idx.save_legacy_html()
else:
idx.upload_legacy_html()
else:
print(f"INFO: {action} indices for '{args.prefix}'")
idx = S3Index.from_S3(prefix=args.prefix)
if args.do_not_upload:
idx.save_legacy_html()
if args.generate_pep503:
idx.save_pep503_htmls()
else:
idx.upload_legacy_html()
if args.generate_pep503:
idx.upload_pep503_htmls()
if __name__ == "__main__":
main()
|
#!/usr/bin/env python3
# Downloads domain pytorch and library packages from channel
# And backs them up to S3
# Do not use unless you know what you are doing
# Usage: python backup_conda.py --version 1.6.0
import conda.api
import boto3
from typing import List, Optional
import urllib
import os
import hashlib
import argparse
S3 = boto3.resource('s3')
BUCKET = S3.Bucket('pytorch-backup')
_known_subdirs = ["linux-64", "osx-64", "osx-arm64", "win-64"]
def compute_md5(path:str) -> str:
with open(path, "rb") as f:
return hashlib.md5(f.read()).hexdigest()
def download_conda_package(package:str, version:Optional[str] = None, depends:Optional[str] = None, channel:Optional[str] = None) -> List[str]:
packages = conda.api.SubdirData.query_all(package, channels = [channel] if channel is not None else None, subdirs = _known_subdirs)
rc = []
for pkg in packages:
if version is not None and pkg.version != version:
continue
if depends is not None and depends not in pkg.depends:
continue
print(f"Downloading {pkg.url}...")
os.makedirs(pkg.subdir, exist_ok = True)
fname = f"{pkg.subdir}/{pkg.fn}"
if not os.path.exists(fname):
with open(fname, "wb") as f:
with urllib.request.urlopen(pkg.url) as url:
f.write(url.read())
if compute_md5(fname) != pkg.md5:
print(f"md5 of {fname} is {compute_md5(fname)} does not match {pkg.md5}")
continue
rc.append(fname)
return rc
def upload_to_s3(prefix: str, fnames: List[str]) -> None:
for fname in fnames:
BUCKET.upload_file(fname, f"{prefix}/{fname}")
print(fname)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--version",
help="PyTorch Version to backup",
type=str,
required = True
)
options = parser.parse_args()
rc = download_conda_package("pytorch", channel = "pytorch", version = options.version)
upload_to_s3(f"v{options.version}/conda", rc)
for libname in ["torchvision", "torchaudio", "torchtext"]:
print(f"processing {libname}")
rc = download_conda_package(libname, channel = "pytorch", depends = f"pytorch {options.version}")
upload_to_s3(f"v{options.version}/conda", rc)
|
#!/usr/bin/env python3
# This script is for building AARCH64 wheels using AWS EC2 instances.
# To generate binaries for the release follow these steps:
# 1. Update mappings for each of the Domain Libraries by adding new row to a table like this: "v1.11.0": ("0.11.0", "rc1"),
# 2. Run script with following arguments for each of the supported python versions and specify required RC tag for example: v1.11.0-rc3:
# build_aarch64_wheel.py --key-name <YourPemKey> --use-docker --python 3.8 --branch <RCtag>
import boto3
import os
import subprocess
import sys
import time
from typing import Dict, List, Optional, Tuple, Union
# AMI images for us-east-1, change the following based on your ~/.aws/config
os_amis = {
'ubuntu18_04': "ami-078eece1d8119409f", # login_name: ubuntu
'ubuntu20_04': "ami-052eac90edaa9d08f", # login_name: ubuntu
'ubuntu22_04': "ami-0c6c29c5125214c77", # login_name: ubuntu
'redhat8': "ami-0698b90665a2ddcf1", # login_name: ec2-user
}
ubuntu18_04_ami = os_amis['ubuntu18_04']
def compute_keyfile_path(key_name: Optional[str] = None) -> Tuple[str, str]:
if key_name is None:
key_name = os.getenv("AWS_KEY_NAME")
if key_name is None:
return os.getenv("SSH_KEY_PATH", ""), ""
homedir_path = os.path.expanduser("~")
default_path = os.path.join(homedir_path, ".ssh", f"{key_name}.pem")
return os.getenv("SSH_KEY_PATH", default_path), key_name
ec2 = boto3.resource("ec2")
def ec2_get_instances(filter_name, filter_value):
return ec2.instances.filter(Filters=[{'Name': filter_name, 'Values': [filter_value]}])
def ec2_instances_of_type(instance_type='t4g.2xlarge'):
return ec2_get_instances('instance-type', instance_type)
def ec2_instances_by_id(instance_id):
rc = list(ec2_get_instances('instance-id', instance_id))
return rc[0] if len(rc) > 0 else None
def start_instance(key_name, ami=ubuntu18_04_ami, instance_type='t4g.2xlarge'):
inst = ec2.create_instances(ImageId=ami,
InstanceType=instance_type,
SecurityGroups=['ssh-allworld'],
KeyName=key_name,
MinCount=1,
MaxCount=1,
BlockDeviceMappings=[
{
'DeviceName': '/dev/sda1',
'Ebs': {
'DeleteOnTermination': True,
'VolumeSize': 50,
'VolumeType': 'standard'
}
}
])[0]
print(f'Create instance {inst.id}')
inst.wait_until_running()
running_inst = ec2_instances_by_id(inst.id)
print(f'Instance started at {running_inst.public_dns_name}')
return running_inst
class RemoteHost:
addr: str
keyfile_path: str
login_name: str
container_id: Optional[str] = None
ami: Optional[str] = None
def __init__(self, addr: str, keyfile_path: str, login_name: str = 'ubuntu'):
self.addr = addr
self.keyfile_path = keyfile_path
self.login_name = login_name
def _gen_ssh_prefix(self) -> List[str]:
return ["ssh", "-o", "StrictHostKeyChecking=no", "-i", self.keyfile_path,
f"{self.login_name}@{self.addr}", "--"]
@staticmethod
def _split_cmd(args: Union[str, List[str]]) -> List[str]:
return args.split() if isinstance(args, str) else args
def run_ssh_cmd(self, args: Union[str, List[str]]) -> None:
subprocess.check_call(self._gen_ssh_prefix() + self._split_cmd(args))
def check_ssh_output(self, args: Union[str, List[str]]) -> str:
return subprocess.check_output(self._gen_ssh_prefix() + self._split_cmd(args)).decode("utf-8")
def scp_upload_file(self, local_file: str, remote_file: str) -> None:
subprocess.check_call(["scp", "-i", self.keyfile_path, local_file,
f"{self.login_name}@{self.addr}:{remote_file}"])
def scp_download_file(self, remote_file: str, local_file: Optional[str] = None) -> None:
if local_file is None:
local_file = "."
subprocess.check_call(["scp", "-i", self.keyfile_path,
f"{self.login_name}@{self.addr}:{remote_file}", local_file])
def start_docker(self, image="quay.io/pypa/manylinux2014_aarch64:latest") -> None:
self.run_ssh_cmd("sudo apt-get install -y docker.io")
self.run_ssh_cmd(f"sudo usermod -a -G docker {self.login_name}")
self.run_ssh_cmd("sudo service docker start")
self.run_ssh_cmd(f"docker pull {image}")
self.container_id = self.check_ssh_output(f"docker run -t -d -w /root {image}").strip()
def using_docker(self) -> bool:
return self.container_id is not None
def run_cmd(self, args: Union[str, List[str]]) -> None:
if not self.using_docker():
return self.run_ssh_cmd(args)
assert self.container_id is not None
docker_cmd = self._gen_ssh_prefix() + ['docker', 'exec', '-i', self.container_id, 'bash']
p = subprocess.Popen(docker_cmd, stdin=subprocess.PIPE)
p.communicate(input=" ".join(["source .bashrc && "] + self._split_cmd(args)).encode("utf-8"))
rc = p.wait()
if rc != 0:
raise subprocess.CalledProcessError(rc, docker_cmd)
def check_output(self, args: Union[str, List[str]]) -> str:
if not self.using_docker():
return self.check_ssh_output(args)
assert self.container_id is not None
docker_cmd = self._gen_ssh_prefix() + ['docker', 'exec', '-i', self.container_id, 'bash']
p = subprocess.Popen(docker_cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
(out, err) = p.communicate(input=" ".join(["source .bashrc && "] + self._split_cmd(args)).encode("utf-8"))
rc = p.wait()
if rc != 0:
raise subprocess.CalledProcessError(rc, docker_cmd, output=out, stderr=err)
return out.decode("utf-8")
def upload_file(self, local_file: str, remote_file: str) -> None:
if not self.using_docker():
return self.scp_upload_file(local_file, remote_file)
tmp_file = os.path.join("/tmp", os.path.basename(local_file))
self.scp_upload_file(local_file, tmp_file)
self.run_ssh_cmd(["docker", "cp", tmp_file, f"{self.container_id}:/root/{remote_file}"])
self.run_ssh_cmd(["rm", tmp_file])
def download_file(self, remote_file: str, local_file: Optional[str] = None) -> None:
if not self.using_docker():
return self.scp_download_file(remote_file, local_file)
tmp_file = os.path.join("/tmp", os.path.basename(remote_file))
self.run_ssh_cmd(["docker", "cp", f"{self.container_id}:/root/{remote_file}", tmp_file])
self.scp_download_file(tmp_file, local_file)
self.run_ssh_cmd(["rm", tmp_file])
def download_wheel(self, remote_file: str, local_file: Optional[str] = None) -> None:
if self.using_docker() and local_file is None:
basename = os.path.basename(remote_file)
local_file = basename.replace("-linux_aarch64.whl", "-manylinux2014_aarch64.whl")
self.download_file(remote_file, local_file)
def list_dir(self, path: str) -> List[str]:
return self.check_output(["ls", "-1", path]).split("\n")
def wait_for_connection(addr, port, timeout=15, attempt_cnt=5):
import socket
for i in range(attempt_cnt):
try:
with socket.create_connection((addr, port), timeout=timeout):
return
except (ConnectionRefusedError, socket.timeout):
if i == attempt_cnt - 1:
raise
time.sleep(timeout)
def update_apt_repo(host: RemoteHost) -> None:
time.sleep(5)
host.run_cmd("sudo systemctl stop apt-daily.service || true")
host.run_cmd("sudo systemctl stop unattended-upgrades.service || true")
host.run_cmd("while systemctl is-active --quiet apt-daily.service; do sleep 1; done")
host.run_cmd("while systemctl is-active --quiet unattended-upgrades.service; do sleep 1; done")
host.run_cmd("sudo apt-get update")
time.sleep(3)
host.run_cmd("sudo apt-get update")
def install_condaforge(host: RemoteHost,
suffix: str = "latest/download/Miniforge3-Linux-aarch64.sh") -> None:
print('Install conda-forge')
host.run_cmd(f"curl -OL https://github.com/conda-forge/miniforge/releases/{suffix}")
host.run_cmd(f"sh -f {os.path.basename(suffix)} -b")
host.run_cmd(f"rm -f {os.path.basename(suffix)}")
if host.using_docker():
host.run_cmd("echo 'PATH=$HOME/miniforge3/bin:$PATH'>>.bashrc")
else:
host.run_cmd(['sed', '-i', '\'/^# If not running interactively.*/i PATH=$HOME/miniforge3/bin:$PATH\'', '.bashrc'])
def install_condaforge_python(host: RemoteHost, python_version="3.8") -> None:
if python_version == "3.6":
# Python-3.6 EOLed and not compatible with conda-4.11
install_condaforge(host, suffix="download/4.10.3-10/Miniforge3-4.10.3-10-Linux-aarch64.sh")
host.run_cmd(f"conda install -y python={python_version} numpy pyyaml")
else:
install_condaforge(host, suffix="download/4.11.0-4/Miniforge3-4.11.0-4-Linux-aarch64.sh")
# Pytorch-1.10 or older are not compatible with setuptools=59.6 or newer
host.run_cmd(f"conda install -y python={python_version} numpy pyyaml setuptools>=59.5.0")
def build_OpenBLAS(host: RemoteHost, git_clone_flags: str = "") -> None:
print('Building OpenBLAS')
host.run_cmd(f"git clone https://github.com/xianyi/OpenBLAS -b v0.3.20 {git_clone_flags}")
make_flags = "NUM_THREADS=64 USE_OPENMP=1 NO_SHARED=1 DYNAMIC_ARCH=1 TARGET=ARMV8"
host.run_cmd(f"pushd OpenBLAS && make {make_flags} -j8 && sudo make {make_flags} install && popd && rm -rf OpenBLAS")
def build_ArmComputeLibrary(host: RemoteHost, git_clone_flags: str = "") -> None:
print('Building Arm Compute Library')
acl_build_flags="debug=0 neon=1 opencl=0 os=linux openmp=1 cppthreads=0 arch=armv8a multi_isa=1 build=native"
host.run_cmd(f"git clone https://github.com/ARM-software/ComputeLibrary.git -b v23.05.1 {git_clone_flags}")
host.run_cmd(f"cd ComputeLibrary && scons Werror=1 -j8 {acl_build_flags}")
def embed_libgomp(host: RemoteHost, use_conda, wheel_name) -> None:
host.run_cmd("pip3 install auditwheel")
host.run_cmd("conda install -y patchelf" if use_conda else "sudo apt-get install -y patchelf")
from tempfile import NamedTemporaryFile
with NamedTemporaryFile() as tmp:
tmp.write(embed_library_script.encode('utf-8'))
tmp.flush()
host.upload_file(tmp.name, "embed_library.py")
print('Embedding libgomp into wheel')
if host.using_docker():
host.run_cmd(f"python3 embed_library.py {wheel_name} --update-tag")
else:
host.run_cmd(f"python3 embed_library.py {wheel_name}")
def checkout_repo(host: RemoteHost, *,
branch: str = "main",
url: str,
git_clone_flags: str,
mapping: Dict[str, Tuple[str, str]]) -> Optional[str]:
for prefix in mapping:
if not branch.startswith(prefix):
continue
tag = f"v{mapping[prefix][0]}-{mapping[prefix][1]}"
host.run_cmd(f"git clone {url} -b {tag} {git_clone_flags}")
return mapping[prefix][0]
# Map master to main
if branch == "master" and url.rsplit("/")[-1] in ['vision', 'text', 'audio', 'data']:
branch = "main"
host.run_cmd(f"git clone {url} -b {branch} {git_clone_flags}")
return None
def build_torchvision(host: RemoteHost, *,
branch: str = "main",
use_conda: bool = True,
git_clone_flags: str,
run_smoke_tests: bool = True) -> str:
print('Checking out TorchVision repo')
build_version = checkout_repo(host,
branch=branch,
url="https://github.com/pytorch/vision",
git_clone_flags=git_clone_flags,
mapping={
"v1.7.1": ("0.8.2", "rc2"),
"v1.8.0": ("0.9.0", "rc3"),
"v1.8.1": ("0.9.1", "rc1"),
"v1.9.0": ("0.10.0", "rc1"),
"v1.10.0": ("0.11.1", "rc1"),
"v1.10.1": ("0.11.2", "rc1"),
"v1.10.2": ("0.11.3", "rc1"),
"v1.11.0": ("0.12.0", "rc1"),
"v1.12.0": ("0.13.0", "rc4"),
"v1.12.1": ("0.13.1", "rc6"),
"v1.13.0": ("0.14.0", "rc4"),
"v1.13.1": ("0.14.1", "rc2"),
"v2.0.0": ("0.15.1", "rc2"),
"v2.0.1": ("0.15.2", "rc2"),
})
print("Building TorchVision wheel")
# Please note libnpg and jpeg are required to build image.so extension
if use_conda:
host.run_cmd("conda install -y libpng jpeg")
# Remove .so files to force static linking
host.run_cmd("rm miniforge3/lib/libpng.so miniforge3/lib/libpng16.so miniforge3/lib/libjpeg.so")
# And patch setup.py to include libz dependency for libpng
host.run_cmd(['sed -i -e \'s/image_link_flags\.append("png")/image_link_flags += ["png", "z"]/\' vision/setup.py'])
build_vars = ""
if branch == "nightly":
version = host.check_output(["if [ -f vision/version.txt ]; then cat vision/version.txt; fi"]).strip()
if len(version) == 0:
# In older revisions, version was embedded in setup.py
version = host.check_output(["grep", "\"version = '\"", "vision/setup.py"]).strip().split("'")[1][:-2]
build_date = host.check_output("cd vision && git log --pretty=format:%s -1").strip().split()[0].replace("-", "")
build_vars += f"BUILD_VERSION={version}.dev{build_date}"
elif build_version is not None:
build_vars += f"BUILD_VERSION={build_version} PYTORCH_VERSION={branch[1:].split('-')[0]}"
if host.using_docker():
build_vars += " CMAKE_SHARED_LINKER_FLAGS=-Wl,-z,max-page-size=0x10000"
host.run_cmd(f"cd vision && {build_vars} python3 setup.py bdist_wheel")
vision_wheel_name = host.list_dir("vision/dist")[0]
embed_libgomp(host, use_conda, os.path.join('vision', 'dist', vision_wheel_name))
print('Copying TorchVision wheel')
host.download_wheel(os.path.join('vision', 'dist', vision_wheel_name))
if run_smoke_tests:
host.run_cmd(f"pip3 install {os.path.join('vision', 'dist', vision_wheel_name)}")
host.run_cmd("python3 vision/test/smoke_test.py")
print("Delete vision checkout")
host.run_cmd("rm -rf vision")
return vision_wheel_name
def build_torchdata(host: RemoteHost, *,
branch: str = "master",
use_conda: bool = True,
git_clone_flags: str = "") -> str:
print('Checking out TorchData repo')
git_clone_flags += " --recurse-submodules"
build_version = checkout_repo(host,
branch=branch,
url="https://github.com/pytorch/data",
git_clone_flags=git_clone_flags,
mapping={
"v1.13.1": ("0.5.1", ""),
"v2.0.0": ("0.6.0", "rc5"),
"v2.0.1": ("0.6.1", "rc1"),
})
print('Building TorchData wheel')
build_vars = ""
if branch == 'nightly':
version = host.check_output(["if [ -f data/version.txt ]; then cat data/version.txt; fi"]).strip()
build_date = host.check_output("cd data && git log --pretty=format:%s -1").strip().split()[0].replace("-", "")
build_vars += f"BUILD_VERSION={version}.dev{build_date}"
elif build_version is not None:
build_vars += f"BUILD_VERSION={build_version} PYTORCH_VERSION={branch[1:].split('-')[0]}"
if host.using_docker():
build_vars += " CMAKE_SHARED_LINKER_FLAGS=-Wl,-z,max-page-size=0x10000"
host.run_cmd(f"cd data && {build_vars} python3 setup.py bdist_wheel")
wheel_name = host.list_dir("data/dist")[0]
embed_libgomp(host, use_conda, os.path.join('data', 'dist', wheel_name))
print('Copying TorchData wheel')
host.download_wheel(os.path.join('data', 'dist', wheel_name))
return wheel_name
def build_torchtext(host: RemoteHost, *,
branch: str = "master",
use_conda: bool = True,
git_clone_flags: str = "") -> str:
print('Checking out TorchText repo')
git_clone_flags += " --recurse-submodules"
build_version = checkout_repo(host,
branch=branch,
url="https://github.com/pytorch/text",
git_clone_flags=git_clone_flags,
mapping={
"v1.9.0": ("0.10.0", "rc1"),
"v1.10.0": ("0.11.0", "rc2"),
"v1.10.1": ("0.11.1", "rc1"),
"v1.10.2": ("0.11.2", "rc1"),
"v1.11.0": ("0.12.0", "rc1"),
"v1.12.0": ("0.13.0", "rc2"),
"v1.12.1": ("0.13.1", "rc5"),
"v1.13.0": ("0.14.0", "rc3"),
"v1.13.1": ("0.14.1", "rc1"),
"v2.0.0": ("0.15.1", "rc2"),
"v2.0.1": ("0.15.2", "rc2"),
})
print('Building TorchText wheel')
build_vars = ""
if branch == 'nightly':
version = host.check_output(["if [ -f text/version.txt ]; then cat text/version.txt; fi"]).strip()
build_date = host.check_output("cd text && git log --pretty=format:%s -1").strip().split()[0].replace("-", "")
build_vars += f"BUILD_VERSION={version}.dev{build_date}"
elif build_version is not None:
build_vars += f"BUILD_VERSION={build_version} PYTORCH_VERSION={branch[1:].split('-')[0]}"
if host.using_docker():
build_vars += " CMAKE_SHARED_LINKER_FLAGS=-Wl,-z,max-page-size=0x10000"
host.run_cmd(f"cd text && {build_vars} python3 setup.py bdist_wheel")
wheel_name = host.list_dir("text/dist")[0]
embed_libgomp(host, use_conda, os.path.join('text', 'dist', wheel_name))
print('Copying TorchText wheel')
host.download_wheel(os.path.join('text', 'dist', wheel_name))
return wheel_name
def build_torchaudio(host: RemoteHost, *,
branch: str = "master",
use_conda: bool = True,
git_clone_flags: str = "") -> str:
print('Checking out TorchAudio repo')
git_clone_flags += " --recurse-submodules"
build_version = checkout_repo(host,
branch=branch,
url="https://github.com/pytorch/audio",
git_clone_flags=git_clone_flags,
mapping={
"v1.9.0": ("0.9.0", "rc2"),
"v1.10.0": ("0.10.0", "rc5"),
"v1.10.1": ("0.10.1", "rc1"),
"v1.10.2": ("0.10.2", "rc1"),
"v1.11.0": ("0.11.0", "rc1"),
"v1.12.0": ("0.12.0", "rc3"),
"v1.12.1": ("0.12.1", "rc5"),
"v1.13.0": ("0.13.0", "rc4"),
"v1.13.1": ("0.13.1", "rc2"),
"v2.0.0": ("2.0.1", "rc3"),
"v2.0.1": ("2.0.2", "rc2"),
})
print('Building TorchAudio wheel')
build_vars = ""
if branch == 'nightly':
version = host.check_output(["grep", "\"version = '\"", "audio/setup.py"]).strip().split("'")[1][:-2]
build_date = host.check_output("cd audio && git log --pretty=format:%s -1").strip().split()[0].replace("-", "")
build_vars += f"BUILD_VERSION={version}.dev{build_date}"
elif build_version is not None:
build_vars += f"BUILD_VERSION={build_version} PYTORCH_VERSION={branch[1:].split('-')[0]}"
if host.using_docker():
build_vars += " CMAKE_SHARED_LINKER_FLAGS=-Wl,-z,max-page-size=0x10000"
host.run_cmd(f"cd audio && export FFMPEG_ROOT=$(pwd)/third_party/ffmpeg && export USE_FFMPEG=1 \
&& ./packaging/ffmpeg/build.sh \
&& {build_vars} python3 setup.py bdist_wheel")
wheel_name = host.list_dir("audio/dist")[0]
embed_libgomp(host, use_conda, os.path.join('audio', 'dist', wheel_name))
print('Copying TorchAudio wheel')
host.download_wheel(os.path.join('audio', 'dist', wheel_name))
return wheel_name
def configure_system(host: RemoteHost, *,
compiler: str = "gcc-8",
use_conda: bool = True,
python_version: str = "3.8") -> None:
if use_conda:
install_condaforge_python(host, python_version)
print('Configuring the system')
if not host.using_docker():
update_apt_repo(host)
host.run_cmd("sudo apt-get install -y ninja-build g++ git cmake gfortran unzip")
else:
host.run_cmd("yum install -y sudo")
host.run_cmd("conda install -y ninja scons")
if not use_conda:
host.run_cmd("sudo apt-get install -y python3-dev python3-yaml python3-setuptools python3-wheel python3-pip")
host.run_cmd("pip3 install dataclasses typing-extensions")
# Install and switch to gcc-8 on Ubuntu-18.04
if not host.using_docker() and host.ami == ubuntu18_04_ami and compiler == 'gcc-8':
host.run_cmd("sudo apt-get install -y g++-8 gfortran-8")
host.run_cmd("sudo update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-8 100")
host.run_cmd("sudo update-alternatives --install /usr/bin/g++ g++ /usr/bin/g++-8 100")
host.run_cmd("sudo update-alternatives --install /usr/bin/gfortran gfortran /usr/bin/gfortran-8 100")
if not use_conda:
print("Installing Cython + numpy from PyPy")
host.run_cmd("sudo pip3 install Cython")
host.run_cmd("sudo pip3 install numpy")
def build_domains(host: RemoteHost, *,
branch: str = "master",
use_conda: bool = True,
git_clone_flags: str = "") -> Tuple[str, str, str, str]:
vision_wheel_name = build_torchvision(host, branch=branch, use_conda=use_conda, git_clone_flags=git_clone_flags)
audio_wheel_name = build_torchaudio(host, branch=branch, use_conda=use_conda, git_clone_flags=git_clone_flags)
data_wheel_name = build_torchdata(host, branch=branch, use_conda=use_conda, git_clone_flags=git_clone_flags)
text_wheel_name = build_torchtext(host, branch=branch, use_conda=use_conda, git_clone_flags=git_clone_flags)
return (vision_wheel_name, audio_wheel_name, data_wheel_name, text_wheel_name)
def start_build(host: RemoteHost, *,
branch: str = "master",
compiler: str = "gcc-8",
use_conda: bool = True,
python_version: str = "3.8",
pytorch_only: bool = False,
pytorch_build_number: Optional[str] = None,
shallow_clone: bool = True,
enable_mkldnn: bool = False) -> Tuple[str, str, str, str, str]:
git_clone_flags = " --depth 1 --shallow-submodules" if shallow_clone else ""
if host.using_docker() and not use_conda:
print("Auto-selecting conda option for docker images")
use_conda = True
if not host.using_docker():
print("Disable mkldnn for host builds")
enable_mkldnn = False
configure_system(host,
compiler=compiler,
use_conda=use_conda,
python_version=python_version)
build_OpenBLAS(host, git_clone_flags)
if host.using_docker():
print("Move libgfortant.a into a standard location")
# HACK: pypa gforntran.a is compiled without PIC, which leads to the following error
# libgfortran.a(error.o)(.text._gfortrani_st_printf+0x34): unresolvable R_AARCH64_ADR_PREL_PG_HI21 relocation against symbol `__stack_chk_guard@@GLIBC_2.17'
# Workaround by copying gfortran library from the host
host.run_ssh_cmd("sudo apt-get install -y gfortran-8")
host.run_cmd("mkdir -p /usr/lib/gcc/aarch64-linux-gnu/8")
host.run_ssh_cmd(["docker", "cp", "/usr/lib/gcc/aarch64-linux-gnu/8/libgfortran.a",
f"{host.container_id}:/opt/rh/devtoolset-10/root/usr/lib/gcc/aarch64-redhat-linux/10/"
])
print('Checking out PyTorch repo')
host.run_cmd(f"git clone --recurse-submodules -b {branch} https://github.com/pytorch/pytorch {git_clone_flags}")
print('Building PyTorch wheel')
build_opts = ""
if pytorch_build_number is not None:
build_opts += f" --build-number {pytorch_build_number}"
# Breakpad build fails on aarch64
build_vars = "USE_BREAKPAD=0 "
if branch == 'nightly':
build_date = host.check_output("cd pytorch && git log --pretty=format:%s -1").strip().split()[0].replace("-", "")
version = host.check_output("cat pytorch/version.txt").strip()[:-2]
build_vars += f"BUILD_TEST=0 PYTORCH_BUILD_VERSION={version}.dev{build_date} PYTORCH_BUILD_NUMBER=1"
if branch.startswith("v1.") or branch.startswith("v2."):
build_vars += f"BUILD_TEST=0 PYTORCH_BUILD_VERSION={branch[1:branch.find('-')]} PYTORCH_BUILD_NUMBER=1"
if host.using_docker():
build_vars += " CMAKE_SHARED_LINKER_FLAGS=-Wl,-z,max-page-size=0x10000"
if enable_mkldnn:
build_ArmComputeLibrary(host, git_clone_flags)
print("build pytorch with mkldnn+acl backend")
build_vars += " USE_MKLDNN=ON USE_MKLDNN_ACL=ON"
host.run_cmd(f"cd pytorch && export ACL_ROOT_DIR=$HOME/ComputeLibrary && {build_vars} python3 setup.py bdist_wheel{build_opts}")
print('Repair the wheel')
pytorch_wheel_name = host.list_dir("pytorch/dist")[0]
host.run_cmd(f"export LD_LIBRARY_PATH=$HOME/acl/build:$HOME/pytorch/build/lib && auditwheel repair $HOME/pytorch/dist/{pytorch_wheel_name}")
print('replace the original wheel with the repaired one')
pytorch_repaired_wheel_name = host.list_dir("wheelhouse")[0]
host.run_cmd(f"cp $HOME/wheelhouse/{pytorch_repaired_wheel_name} $HOME/pytorch/dist/{pytorch_wheel_name}")
else:
print("build pytorch without mkldnn backend")
host.run_cmd(f"cd pytorch && {build_vars} python3 setup.py bdist_wheel{build_opts}")
print("Deleting build folder")
host.run_cmd("cd pytorch && rm -rf build")
pytorch_wheel_name = host.list_dir("pytorch/dist")[0]
embed_libgomp(host, use_conda, os.path.join('pytorch', 'dist', pytorch_wheel_name))
print('Copying the wheel')
host.download_wheel(os.path.join('pytorch', 'dist', pytorch_wheel_name))
print('Installing PyTorch wheel')
host.run_cmd(f"pip3 install pytorch/dist/{pytorch_wheel_name}")
if pytorch_only:
return (pytorch_wheel_name, None, None, None, None)
domain_wheels = build_domains(host, branch=branch, use_conda=use_conda, git_clone_flags=git_clone_flags)
return (pytorch_wheel_name, *domain_wheels)
embed_library_script = """
#!/usr/bin/env python3
from auditwheel.patcher import Patchelf
from auditwheel.wheeltools import InWheelCtx
from auditwheel.elfutils import elf_file_filter
from auditwheel.repair import copylib
from auditwheel.lddtree import lddtree
from subprocess import check_call
import os
import shutil
import sys
from tempfile import TemporaryDirectory
def replace_tag(filename):
with open(filename, 'r') as f:
lines = f.read().split("\\n")
for i,line in enumerate(lines):
if not line.startswith("Tag: "):
continue
lines[i] = line.replace("-linux_", "-manylinux2014_")
print(f'Updated tag from {line} to {lines[i]}')
with open(filename, 'w') as f:
f.write("\\n".join(lines))
class AlignedPatchelf(Patchelf):
def set_soname(self, file_name: str, new_soname: str) -> None:
check_call(['patchelf', '--page-size', '65536', '--set-soname', new_soname, file_name])
def replace_needed(self, file_name: str, soname: str, new_soname: str) -> None:
check_call(['patchelf', '--page-size', '65536', '--replace-needed', soname, new_soname, file_name])
def embed_library(whl_path, lib_soname, update_tag=False):
patcher = AlignedPatchelf()
out_dir = TemporaryDirectory()
whl_name = os.path.basename(whl_path)
tmp_whl_name = os.path.join(out_dir.name, whl_name)
with InWheelCtx(whl_path) as ctx:
torchlib_path = os.path.join(ctx._tmpdir.name, 'torch', 'lib')
ctx.out_wheel=tmp_whl_name
new_lib_path, new_lib_soname = None, None
for filename, elf in elf_file_filter(ctx.iter_files()):
if not filename.startswith('torch/lib'):
continue
libtree = lddtree(filename)
if lib_soname not in libtree['needed']:
continue
lib_path = libtree['libs'][lib_soname]['path']
if lib_path is None:
print(f"Can't embed {lib_soname} as it could not be found")
break
if lib_path.startswith(torchlib_path):
continue
if new_lib_path is None:
new_lib_soname, new_lib_path = copylib(lib_path, torchlib_path, patcher)
patcher.replace_needed(filename, lib_soname, new_lib_soname)
print(f'Replacing {lib_soname} with {new_lib_soname} for {filename}')
if update_tag:
# Add manylinux2014 tag
for filename in ctx.iter_files():
if os.path.basename(filename) != 'WHEEL':
continue
replace_tag(filename)
shutil.move(tmp_whl_name, whl_path)
if __name__ == '__main__':
embed_library(sys.argv[1], 'libgomp.so.1', len(sys.argv) > 2 and sys.argv[2] == '--update-tag')
"""
def run_tests(host: RemoteHost, whl: str, branch='master') -> None:
print('Configuring the system')
update_apt_repo(host)
host.run_cmd("sudo apt-get install -y python3-pip git")
host.run_cmd("sudo pip3 install Cython")
host.run_cmd("sudo pip3 install numpy")
host.upload_file(whl, ".")
host.run_cmd(f"sudo pip3 install {whl}")
host.run_cmd("python3 -c 'import torch;print(torch.rand((3,3))'")
host.run_cmd(f"git clone -b {branch} https://github.com/pytorch/pytorch")
host.run_cmd("cd pytorch/test; python3 test_torch.py -v")
def get_instance_name(instance) -> Optional[str]:
if instance.tags is None:
return None
for tag in instance.tags:
if tag['Key'] == 'Name':
return tag['Value']
return None
def list_instances(instance_type: str) -> None:
print(f"All instances of type {instance_type}")
for instance in ec2_instances_of_type(instance_type):
print(f"{instance.id} {get_instance_name(instance)} {instance.public_dns_name} {instance.state['Name']}")
def terminate_instances(instance_type: str) -> None:
print(f"Terminating all instances of type {instance_type}")
instances = list(ec2_instances_of_type(instance_type))
for instance in instances:
print(f"Terminating {instance.id}")
instance.terminate()
print("Waiting for termination to complete")
for instance in instances:
instance.wait_until_terminated()
def parse_arguments():
from argparse import ArgumentParser
parser = ArgumentParser("Builid and test AARCH64 wheels using EC2")
parser.add_argument("--key-name", type=str)
parser.add_argument("--debug", action="store_true")
parser.add_argument("--build-only", action="store_true")
parser.add_argument("--test-only", type=str)
parser.add_argument("--os", type=str, choices=list(os_amis.keys()), default='ubuntu20_04')
parser.add_argument("--python-version", type=str, choices=['3.6', '3.7', '3.8', '3.9', '3.10', '3.11'], default=None)
parser.add_argument("--alloc-instance", action="store_true")
parser.add_argument("--list-instances", action="store_true")
parser.add_argument("--pytorch-only", action="store_true")
parser.add_argument("--keep-running", action="store_true")
parser.add_argument("--terminate-instances", action="store_true")
parser.add_argument("--instance-type", type=str, default="t4g.2xlarge")
parser.add_argument("--branch", type=str, default="master")
parser.add_argument("--use-docker", action="store_true")
parser.add_argument("--compiler", type=str, choices=['gcc-7', 'gcc-8', 'gcc-9', 'clang'], default="gcc-8")
parser.add_argument("--use-torch-from-pypi", action="store_true")
parser.add_argument("--pytorch-build-number", type=str, default=None)
parser.add_argument("--disable-mkldnn", action="store_true")
return parser.parse_args()
if __name__ == '__main__':
args = parse_arguments()
ami = os_amis[args.os]
keyfile_path, key_name = compute_keyfile_path(args.key_name)
if args.list_instances:
list_instances(args.instance_type)
sys.exit(0)
if args.terminate_instances:
terminate_instances(args.instance_type)
sys.exit(0)
if len(key_name) == 0:
raise Exception("""
Cannot start build without key_name, please specify
--key-name argument or AWS_KEY_NAME environment variable.""")
if len(keyfile_path) == 0 or not os.path.exists(keyfile_path):
raise Exception(f"""
Cannot find keyfile with name: [{key_name}] in path: [{keyfile_path}], please
check `~/.ssh/` folder or manually set SSH_KEY_PATH environment variable.""")
# Starting the instance
inst = start_instance(key_name, ami=ami, instance_type=args.instance_type)
instance_name = f'{args.key_name}-{args.os}'
if args.python_version is not None:
instance_name += f'-py{args.python_version}'
inst.create_tags(DryRun=False, Tags=[{
'Key': 'Name',
'Value': instance_name,
}])
addr = inst.public_dns_name
wait_for_connection(addr, 22)
host = RemoteHost(addr, keyfile_path)
host.ami = ami
if args.use_docker:
update_apt_repo(host)
host.start_docker()
if args.test_only:
run_tests(host, args.test_only)
sys.exit(0)
if args.alloc_instance:
if args.python_version is None:
sys.exit(0)
install_condaforge_python(host, args.python_version)
sys.exit(0)
python_version = args.python_version if args.python_version is not None else '3.8'
if args.use_torch_from_pypi:
configure_system(host,
compiler=args.compiler,
python_version=python_version)
print("Installing PyTorch wheel")
host.run_cmd("pip3 install torch")
build_domains(host,
branch=args.branch,
git_clone_flags=" --depth 1 --shallow-submodules")
else:
start_build(host,
branch=args.branch,
compiler=args.compiler,
python_version=python_version,
pytorch_only=args.pytorch_only,
pytorch_build_number=args.pytorch_build_number,
enable_mkldnn=not args.disable_mkldnn)
if not args.keep_running:
print(f'Waiting for instance {inst.id} to terminate')
inst.terminate()
inst.wait_until_terminated()
|
#!/usr/bin/env python3
# encoding: UTF-8
import os
import subprocess
from pygit2 import Repository
from typing import List
def list_dir(path: str) -> List[str]:
''''
Helper for getting paths for Python
'''
return subprocess.check_output(["ls", "-1", path]).decode().split("\n")
def build_ArmComputeLibrary(git_clone_flags: str = "") -> None:
'''
Using ArmComputeLibrary for aarch64 PyTorch
'''
print('Building Arm Compute Library')
os.system("cd / && mkdir /acl")
os.system(f"git clone https://github.com/ARM-software/ComputeLibrary.git -b v23.05.1 {git_clone_flags}")
os.system('sed -i -e \'s/"armv8.2-a"/"armv8-a"/g\' ComputeLibrary/SConscript; '
'sed -i -e \'s/-march=armv8.2-a+fp16/-march=armv8-a/g\' ComputeLibrary/SConstruct; '
'sed -i -e \'s/"-march=armv8.2-a"/"-march=armv8-a"/g\' ComputeLibrary/filedefs.json')
os.system("cd ComputeLibrary; export acl_install_dir=/acl; "
"scons Werror=1 -j8 debug=0 neon=1 opencl=0 os=linux openmp=1 cppthreads=0 arch=armv8.2-a multi_isa=1 build=native build_dir=$acl_install_dir/build; "
"cp -r arm_compute $acl_install_dir; "
"cp -r include $acl_install_dir; "
"cp -r utils $acl_install_dir; "
"cp -r support $acl_install_dir; "
"cp -r src $acl_install_dir; cd /")
def complete_wheel(folder: str):
'''
Complete wheel build and put in artifact location
'''
wheel_name = list_dir(f"/{folder}/dist")[0]
if "pytorch" in folder:
print("Repairing Wheel with AuditWheel")
os.system(f"cd /{folder}; auditwheel repair dist/{wheel_name}")
repaired_wheel_name = list_dir(f"/{folder}/wheelhouse")[0]
print(f"Moving {repaired_wheel_name} wheel to /{folder}/dist")
os.system(f"mv /{folder}/wheelhouse/{repaired_wheel_name} /{folder}/dist/")
else:
repaired_wheel_name = wheel_name
print(f"Copying {repaired_wheel_name} to artfacts")
os.system(f"mv /{folder}/dist/{repaired_wheel_name} /artifacts/")
return repaired_wheel_name
def parse_arguments():
'''
Parse inline arguments
'''
from argparse import ArgumentParser
parser = ArgumentParser("AARCH64 wheels python CD")
parser.add_argument("--debug", action="store_true")
parser.add_argument("--build-only", action="store_true")
parser.add_argument("--test-only", type=str)
parser.add_argument("--enable-mkldnn", action="store_true")
return parser.parse_args()
if __name__ == '__main__':
'''
Entry Point
'''
args = parse_arguments()
enable_mkldnn = args.enable_mkldnn
repo = Repository('/pytorch')
branch = repo.head.name
if branch == 'HEAD':
branch = 'master'
git_clone_flags = " --depth 1 --shallow-submodules"
print('Building PyTorch wheel')
build_vars = "CMAKE_SHARED_LINKER_FLAGS=-Wl,-z,max-page-size=0x10000 "
os.system("python setup.py clean")
override_package_version = os.getenv("OVERRIDE_PACKAGE_VERSION")
if override_package_version is not None:
version = override_package_version
build_vars += f"BUILD_TEST=0 PYTORCH_BUILD_VERSION={version} PYTORCH_BUILD_NUMBER=1 "
else:
if branch == 'nightly' or branch == 'master':
build_date = subprocess.check_output(['git', 'log', '--pretty=format:%cs', '-1'], cwd='/pytorch').decode().replace('-', '')
version = subprocess.check_output(['cat', 'version.txt'], cwd='/pytorch').decode().strip()[:-2]
build_vars += f"BUILD_TEST=0 PYTORCH_BUILD_VERSION={version}.dev{build_date} PYTORCH_BUILD_NUMBER=1 "
if branch.startswith("v1.") or branch.startswith("v2."):
build_vars += f"BUILD_TEST=0 PYTORCH_BUILD_VERSION={branch[1:branch.find('-')]} PYTORCH_BUILD_NUMBER=1 "
if enable_mkldnn:
build_ArmComputeLibrary(git_clone_flags)
print("build pytorch with mkldnn+acl backend")
build_vars += "USE_MKLDNN=ON USE_MKLDNN_ACL=ON " \
"ACL_ROOT_DIR=/acl " \
"LD_LIBRARY_PATH=/pytorch/build/lib:/acl/build:$LD_LIBRARY_PATH " \
"ACL_INCLUDE_DIR=/acl/build " \
"ACL_LIBRARY=/acl/build "
else:
print("build pytorch without mkldnn backend")
os.system(f"cd /pytorch; {build_vars} python3 setup.py bdist_wheel")
pytorch_wheel_name = complete_wheel("pytorch")
print(f"Build Compelete. Created {pytorch_wheel_name}..")
|
#!/usr/bin/env python3
from auditwheel.patcher import Patchelf
from auditwheel.wheeltools import InWheelCtx
from auditwheel.elfutils import elf_file_filter
from auditwheel.repair import copylib
from auditwheel.lddtree import lddtree
from subprocess import check_call
import os
import shutil
import sys
from tempfile import TemporaryDirectory
def replace_tag(filename):
with open(filename, 'r') as f:
lines = f.read().split("\\n")
for i,line in enumerate(lines):
if not line.startswith("Tag: "):
continue
lines[i] = line.replace("-linux_", "-manylinux2014_")
print(f'Updated tag from {line} to {lines[i]}')
with open(filename, 'w') as f:
f.write("\\n".join(lines))
class AlignedPatchelf(Patchelf):
def set_soname(self, file_name: str, new_soname: str) -> None:
check_call(['patchelf', '--page-size', '65536', '--set-soname', new_soname, file_name])
def replace_needed(self, file_name: str, soname: str, new_soname: str) -> None:
check_call(['patchelf', '--page-size', '65536', '--replace-needed', soname, new_soname, file_name])
def embed_library(whl_path, lib_soname, update_tag=False):
patcher = AlignedPatchelf()
out_dir = TemporaryDirectory()
whl_name = os.path.basename(whl_path)
tmp_whl_name = os.path.join(out_dir.name, whl_name)
with InWheelCtx(whl_path) as ctx:
torchlib_path = os.path.join(ctx._tmpdir.name, 'torch', 'lib')
ctx.out_wheel=tmp_whl_name
new_lib_path, new_lib_soname = None, None
for filename, elf in elf_file_filter(ctx.iter_files()):
if not filename.startswith('torch/lib'):
continue
libtree = lddtree(filename)
if lib_soname not in libtree['needed']:
continue
lib_path = libtree['libs'][lib_soname]['path']
if lib_path is None:
print(f"Can't embed {lib_soname} as it could not be found")
break
if lib_path.startswith(torchlib_path):
continue
if new_lib_path is None:
new_lib_soname, new_lib_path = copylib(lib_path, torchlib_path, patcher)
patcher.replace_needed(filename, lib_soname, new_lib_soname)
print(f'Replacing {lib_soname} with {new_lib_soname} for {filename}')
if update_tag:
# Add manylinux2014 tag
for filename in ctx.iter_files():
if os.path.basename(filename) != 'WHEEL':
continue
replace_tag(filename)
shutil.move(tmp_whl_name, whl_path)
if __name__ == '__main__':
embed_library(sys.argv[1], 'libgomp.so.1', len(sys.argv) > 2 and sys.argv[2] == '--update-tag')
|
from conda.cli.python_api import Commands, run_command
from tabulate import tabulate
from datetime import datetime
import json
PLATFORMS = ["osx-64", "linux-64", "win-64"]
PYTHON_VERSIONS = ["3.10", "3.9", "3.8", "3.7"]
CUDA_CUDNN_VERSION = [
("11.7", "8.5.0"), ("cpu", None)
]
CHANNEL = "pytorch-test"
VERSION = "1.13.*"
def generate_expected_builds(platform: str) -> set:
builds = set()
for py_version in PYTHON_VERSIONS:
if platform == "osx-64":
# macos builds support cpu only.
builds.add(f"py{py_version}_0")
continue
for cuda_version, cudnn_version in CUDA_CUDNN_VERSION:
if platform == "win-64":
cudnn_version = "8"
if cuda_version == "cpu":
builds.add(f"py{py_version}_{cuda_version}_0")
else:
builds.add(f"py{py_version}_cuda{cuda_version}_cudnn{cudnn_version}_0")
return builds
def size_format(size_num) -> str:
for unit in ["", "K", "M", "G"]:
if abs(size_num) < 1024.0:
return f"{size_num:3.1f}{unit}B"
size_num /= 1024.0
return f"{size_num:3.1f}TB"
def main() -> None:
# Iterate over platform to gather build information of available conda version.
for platform in PLATFORMS:
expected_builds = generate_expected_builds(platform)
# Actual builds available in Conda
stdout, stderr, return_code = run_command(
Commands.SEARCH, f"{CHANNEL}::*[name=pytorch version={VERSION} subdir={platform}]", "--json")
if return_code != 0:
raise Exception(stderr)
available_versions = json.loads(stdout)
output_data = []
headers = ["File Name", "Date", "Size"]
actual_builds = set()
for version in available_versions["pytorch"]:
actual_builds.add(version["build"])
output_data.append((
version["fn"],
datetime.fromtimestamp(version["timestamp"] / 1000),
size_format(version["size"])
))
assert len(expected_builds) > 0, "expected builds set should not be empty."
assert expected_builds == actual_builds, (
f"Missing following builds in conda: {expected_builds.difference(actual_builds)} for platform {platform}"
)
print(f"\nSuccessfully verified following binaries are available in Conda for {platform}...")
print(tabulate(output_data, headers=headers, tablefmt="grid"))
if __name__ == "__main__":
main()
|
#!/usr/bin/env python3.7
from datetime import datetime, time
import json
import requests
import itertools
import sqlite3
import os
import sys
from typing import Callable, Dict, Generator, List, MutableSet, Optional
def get_executor_price_rate(executor):
(etype, eclass) = executor['type'], executor['resource_class']
assert etype in ['machine', 'external', 'docker', 'macos', 'runner'], f'Unexpected type {etype}:{eclass}'
if etype == 'machine':
return {
'medium': 10,
'large': 20,
'xlarge': 100,
'2xlarge': 200,
'gpu.medium': 160,
'gpu.large': 320,
'gpu.small': 80,
'windows.medium': 40,
'windows.large': 120,
'windows.xlarge': 210,
'windows.2xlarge': 500,
'windows.gpu.nvidia.medium': 500,
'gpu.nvidia.small': 160,
'gpu.nvidia.medium': 240,
'gpu.nvidia.large': 1000,
}[eclass]
if etype == 'macos':
return {
'medium': 50,
'large': 100,
}[eclass]
if etype == 'docker':
return {
'small': 5,
'medium': 10,
'medium+': 15,
'large': 20,
'xlarge': 40,
'2xlarge': 80,
'2xlarge+': 100,
}[eclass]
if etype == 'runner' or etype == 'external':
return {
'pytorch/amd-gpu': 0,
}[eclass]
raise RuntimeError(f'Undefined executor {etype}:{eclass}')
price_per_credit = 6e-4
def get_circleci_token() -> str:
token_file_path = os.path.join(os.getenv('HOME'), '.circleci_token')
token = os.getenv('CIRCLECI_TOKEN')
if token is not None:
return token
if not os.path.exists(token_file_path):
raise RuntimeError('Can not get CirclCI token'
' neither from CIRCLECI_TOKEN environment variable,'
' nor via ~/.circleci_token file')
with open(token_file_path) as f:
return f.read().strip()
def is_workflow_in_progress(workflow: Dict) -> bool:
return workflow['status'] in ['running', 'not_run', 'failing', 'on_hold']
def str2date(val: str) -> datetime:
assert val is not None
return datetime.fromisoformat(val[:-1] if val.endswith('Z') else val)
class CircleCICache:
def __init__(self, token: Optional[str], db_name: str = 'circleci-cache.db') -> None:
file_folder = os.path.dirname(__file__)
self.url_prefix = 'https://circleci.com/api/v2'
self.session = requests.session()
self.headers = {
'Accept': 'application/json',
'Circle-Token': token,
} if token is not None else None
self.db = sqlite3.connect(os.path.join(file_folder, db_name))
self.db.execute('CREATE TABLE IF NOT EXISTS jobs(slug TEXT NOT NULL, job_id INTEGER NOT NULL, json TEXT NOT NULL);')
self.db.execute('CREATE TABLE IF NOT EXISTS artifacts(slug TEXT NOT NULL, job_id INTEGER NOT NULL, json TEXT NOT NULL);')
self.db.execute('CREATE UNIQUE INDEX IF NOT EXISTS jobs_key on jobs(slug, job_id);')
self.db.execute('CREATE TABLE IF NOT EXISTS workflows(id TEXT NOT NULL PRIMARY KEY, json TEXT NOT NULL);')
self.db.execute('CREATE TABLE IF NOT EXISTS pipeline_workflows(id TEXT NOT NULL PRIMARY KEY, json TEXT NOT NULL);')
self.db.execute('CREATE TABLE IF NOT EXISTS pipelines(id TEXT NOT NULL PRIMARY KEY, json TEXT NOT NULL, branch TEXT, revision TEXT);')
self.db.commit()
def is_offline(self) -> bool:
return self.headers is None
def _get_paged_items_list(self, url: str, params: Optional[Dict] = None, item_count: Optional[int] = -1) -> List:
rc, token, run_once = [], None, False
def _should_quit():
nonlocal run_once, rc, token
if not run_once:
run_once = True
return False
if token is None:
return True
if item_count is None:
return True
return item_count >= 0 and len(rc) >= item_count
if params is None:
params = {}
while not _should_quit():
if token is not None:
params['page-token'] = token
r = self.session.get(url, params=params, headers=self.headers)
try:
j = r.json()
except json.JSONDecodeError:
print(f"Failed to decode {rc}", file=sys.stderr)
raise
if 'message' in j:
raise RuntimeError(f'Failed to get list from {url}: {j["message"]}')
token = j['next_page_token']
rc.extend(j['items'])
return rc
def get_pipelines(self, project: str = 'github/pytorch/pytorch', branch: Optional[str] = None, item_count: Optional[int] = None) -> List:
if self.is_offline():
c = self.db.cursor()
cmd = "SELECT json from pipelines"
if branch is not None:
cmd += f" WHERE branch='{branch}'"
if item_count is not None and item_count > 0:
cmd += f" LIMIT {item_count}"
c.execute(cmd)
return [json.loads(val[0]) for val in c.fetchall()]
rc = self._get_paged_items_list(f'{self.url_prefix}/project/{project}/pipeline', {'branch': branch} if branch is not None else {}, item_count)
for pipeline in rc:
vcs = pipeline['vcs']
pid, branch, revision, pser = pipeline['id'], vcs['branch'], vcs['revision'], json.dumps(pipeline)
self.db.execute("INSERT OR REPLACE INTO pipelines(id, branch, revision, json) VALUES (?, ?, ?, ?)", (pid, branch, revision, pser))
self.db.commit()
return rc
def get_pipeline_workflows(self, pipeline) -> List:
c = self.db.cursor()
c.execute("SELECT json FROM pipeline_workflows WHERE id=?", (pipeline,))
rc = c.fetchone()
if rc is not None:
rc = json.loads(rc[0])
if not any(is_workflow_in_progress(w) for w in rc) or self.is_offline():
return rc
if self.is_offline():
return []
rc = self._get_paged_items_list(f'{self.url_prefix}/pipeline/{pipeline}/workflow')
self.db.execute("INSERT OR REPLACE INTO pipeline_workflows(id, json) VALUES (?, ?)", (pipeline, json.dumps(rc)))
self.db.commit()
return rc
def get_workflow_jobs(self, workflow, should_cache=True) -> List:
c = self.db.cursor()
c.execute("select json from workflows where id=?", (workflow,))
rc = c.fetchone()
if rc is not None:
return json.loads(rc[0])
if self.is_offline():
return []
rc = self._get_paged_items_list(f'{self.url_prefix}/workflow/{workflow}/job')
if should_cache:
self.db.execute("INSERT INTO workflows(id, json) VALUES (?, ?)", (workflow, json.dumps(rc)))
self.db.commit()
return rc
def get_job(self, project_slug, job_number) -> Dict:
c = self.db.cursor()
c.execute("select json from jobs where slug=? and job_id = ?", (project_slug, job_number))
rc = c.fetchone()
if rc is not None:
return json.loads(rc[0])
if self.is_offline():
return {}
r = self.session.get(f'{self.url_prefix}/project/{project_slug}/job/{job_number}', headers=self.headers)
try:
rc = r.json()
except json.JSONDecodeError:
print(f"Failed to decode {rc}", file=sys.stderr)
raise
self.db.execute("INSERT INTO jobs(slug,job_id, json) VALUES (?, ?, ?)", (project_slug, job_number, json.dumps(rc)))
self.db.commit()
return rc
def get_job_artifacts(self, project_slug, job_number) -> List[Dict]:
c = self.db.cursor()
c.execute("select json from artifacts where slug=? and job_id = ?", (project_slug, job_number))
rc = c.fetchone()
if rc is not None:
return json.loads(rc[0])
if self.is_offline():
return [{}]
rc = self._get_paged_items_list(f"{self.url_prefix}/project/{project_slug}/{job_number}/artifacts")
self.db.execute("INSERT INTO artifacts(slug,job_id, json) VALUES (?, ?, ?)", (project_slug, job_number, json.dumps(rc)))
self.db.commit()
return rc
def get_pipeline_jobs(self, project: str = 'github/pytorch/pytorch', branch: Optional[str] = None, item_count: Optional[int] = None) -> Generator:
for pipeline in self.get_pipelines(project, branch, item_count):
for workflow in self.get_pipeline_workflows(pipeline['id']):
in_progress = is_workflow_in_progress(workflow)
for job in self.get_workflow_jobs(workflow['id'], should_cache=not in_progress):
yield (pipeline, workflow, job)
def get_jobs_summary(self, slug='gh/pytorch/pytorch', workflow='build') -> Dict:
items = self._get_paged_items_list(f'{self.url_prefix}/insights/{slug}/workflows/{workflow}/jobs')
return {item['name']: item for item in items}
def get_job_timeseries(self, job_name: str,
slug: str = 'gh/pytorch/pytorch',
workflow: str = 'build',
branch: Optional[str] = None) -> List:
params = {'branch': branch} if branch is not None else {}
items = self._get_paged_items_list(f'{self.url_prefix}/insights/{slug}/workflows/build/jobs/{job_name}', params)
return [(str2date(x['started_at']), x['duration']) for x in items if x['status'] == 'success']
def aggregate_by_day(series):
rc = {}
for (ts, val) in series:
date = datetime.combine(ts.date(), time())
valcount = [val, 1.0]
if date not in rc:
rc[date] = valcount
else:
rc[date] = [sum(x) for x in zip(rc[date], valcount)]
return [(x, rc[x][0] / rc[x][1]) for x in sorted(rc.keys())]
def filter_names(names: List[str], name_filter: Optional[str] = None) -> List[str]:
import re
if name_filter is None:
return names
filters = name_filter.split(",")
return [name for name in names if any(re.match(filter, name) for filter in filters)]
def common_prefix(names: List[str]) -> str:
if len(names) == 0 or len(names[0]) == 0:
return ''
if len(names) == 1:
return names[0]
rc = names[0][0]
while rc != names[0] and all(name.startswith(rc) for name in names[1:]):
rc = names[0][:len(rc) + 1]
return rc[:-1]
def plot_graph(name_filter: Optional[str] = None,
output_file: Optional[str] = None,
branch: Optional[str] = None) -> None:
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
ci_cache = CircleCICache(token=get_circleci_token())
summary = ci_cache.get_jobs_summary()
test_jobs = [name for name in summary.keys() if name.startswith('pytorch') and 'test' in name]
filtered_jobs = filter_names(test_jobs, name_filter)
prefix = common_prefix(filtered_jobs)
if len(filtered_jobs) == 0:
print(f'Filter "{name_filter}" does not match to any of {test_jobs}')
return
series = []
labels = []
styles = [f'{color}{style}' for (style, color) in itertools.product(['-', '--', '-.', ':'], ['b', 'g', 'r', 'c', 'm', 'y', 'k'])]
fig, ax = plt.subplots()
for name in test_jobs:
label = f"{name}(p95 = {int(summary[name]['metrics']['duration_metrics']['p95']/60)} min)"
if name not in filtered_jobs:
print(label)
continue
ts = ci_cache.get_job_timeseries(name, branch=branch)
if len(ts) == 0:
print(f'{label} time series is empty!')
continue
print(f'{label} time series has {len(ts)} elements')
labels.append(label[len(prefix):])
series.append(ts)
x, y = zip(*aggregate_by_day(ts))
plt.plot(x, [i / 60.0 for i in y], styles[len(labels) % len(styles)])
plt.legend(labels, loc='upper left')
plt.title(f'{prefix} timeseries')
ax.set_ylabel("Duration (m)")
# Format date
ax.xaxis.set_major_formatter(mdates.DateFormatter('%b %d'))
# Rotate tick labels
plt.setp(ax.get_xticklabels(), rotation=45, ha='right', rotation_mode='anchor')
if output_file is not None:
plt.savefig(output_file)
else:
plt.show()
def print_line(line: str, padding: Optional[int] = None, newline: bool = True) -> None:
if padding is not None and len(line) < padding:
line += ' ' * (padding - len(line))
print(line, end='\n' if newline else '\r', flush=True)
def fetch_status(branch=None, item_count=50):
isatty = sys.stdout.isatty()
padding = os.get_terminal_size().columns - 1 if isatty else None
ci_cache = CircleCICache(token=get_circleci_token())
print(f"About to fetch {item_count} latest pipelines against {branch if branch is not None else 'all branches'}")
pipelines = ci_cache.get_pipelines(branch=branch, item_count=item_count)
total_price, total_master_price = 0, 0
for pipeline_idx, pipeline in enumerate(pipelines):
revision = pipeline['vcs']['revision']
branch = pipeline['vcs']['branch']
workflows = ci_cache.get_pipeline_workflows(pipeline['id'])
known_job_ids = []
for workflow in workflows:
url = f'https://app.circleci.com/pipelines/github/pytorch/pytorch/{workflow["pipeline_number"]}/workflows/{workflow["id"]}'
if is_workflow_in_progress(workflow):
print_line(f'Skipping {url} name:{workflow["name"]} status:{workflow["status"]}',
newline=not sys.stdout.isatty())
continue
rerun = False
total_credits, test_credits, gpu_credits, wincpu_credits, wingpu_credits = 0, 0, 0, 0, 0
jobs = ci_cache.get_workflow_jobs(workflow['id'])
for job in jobs:
job_name, job_status, job_number = job['name'], job['status'], job.get('job_number', None)
if job_status in ['blocked', 'canceled', 'unauthorized', 'running', 'not_run', 'failing']:
continue
if job_number is None:
print(job)
continue
if job_number in known_job_ids:
rerun = True
continue
job_info = ci_cache.get_job(job['project_slug'], job_number)
if 'executor' not in job_info:
print(f'executor not found in {job_info}')
continue
job_executor = job_info['executor']
resource_class = job_executor['resource_class']
if resource_class is None:
print(f'resource_class is none for {job_info}')
continue
job_on_gpu = 'gpu' in resource_class
job_on_win = 'windows' in resource_class
if job_status != 'infrastructure_fail':
duration = str2date(job_info['stopped_at']) - str2date(job_info['started_at'])
job_credits = get_executor_price_rate(job_executor) * int(job_info['duration']) * 1e-3 / 60
else:
job_credits, duration = 0, 0
job_cost = job_credits * price_per_credit
total_credits += job_credits
if 'test' in job_name or job_name.startswith('smoke_'):
test_credits += job_credits
elif job_on_gpu:
print(f'Running build job {job_name} on GPU!!!')
if job_on_gpu:
gpu_credits += job_credits
if job_on_win:
wingpu_credits += job_credits
if job_on_win and not job_on_gpu:
wincpu_credits += job_credits
known_job_ids.append(job_number)
print_line(f' {job_name} {job_status} {duration} ${job_cost:.2f}',
padding=padding, newline=not isatty)
# Increment totals
total_price += total_credits * price_per_credit
if branch in ['master', 'nightly', 'postnightly', 'release/1.6']:
total_master_price += total_credits * price_per_credit
# skip small jobs
if total_credits * price_per_credit < .1:
continue
workflow_status = f'[{pipeline_idx}/{len(pipelines)}]'
workflow_status += f' {url} {workflow["name"]} status:{workflow["status"]}'
workflow_status += f' price: ${total_credits * price_per_credit:.2f}'
workflow_status += ' (Rerun?)' if rerun else ''
workflow_status += f'\n\t\tdate: {workflow["created_at"]} branch:{branch} revision:{revision}'
workflow_status += f'\n\t\ttotal credits: {int(total_credits)}'
if test_credits != 0:
workflow_status += f' testing: {100 * test_credits / total_credits:.1f}%'
if gpu_credits != 0:
workflow_status += f' GPU testing: {100 * gpu_credits / total_credits:.1f}%'
if wingpu_credits != 0:
workflow_status += f' WINGPU/GPU: {100 * wingpu_credits / gpu_credits:.1f}%'
if wincpu_credits != 0:
workflow_status += f' Win CPU: {100 * wincpu_credits / total_credits:.1f}%'
workflow_status += f' Total: ${total_price:.2f} master fraction: {100 * total_master_price/ total_price:.1f}%'
print_line(workflow_status, padding=padding)
def plot_heatmap(cov_matrix, names):
import numpy as np
import matplotlib.pyplot as plt
assert cov_matrix.shape == (len(names), len(names))
fig, ax = plt.subplots()
ax.imshow(cov_matrix)
ax.set_xticks(np.arange(len(names)))
ax.set_yticks(np.arange(len(names)))
ax.set_xticklabels(names)
ax.set_yticklabels(names)
# Rotate tick labels
plt.setp(ax.get_xticklabels(), rotation=45, ha='right', rotation_mode='anchor')
# Annotate values
for i in range(len(names)):
for j in range(len(names)):
ax.text(j, i, f'{cov_matrix[i, j]:.2f}', ha='center', va='center', color='w')
plt.show()
def filter_service_jobs(name):
if name.startswith('docker'):
return True
if name.startswith('binary'):
return True
return False
def filter_cuda_test(name):
if filter_service_jobs(name):
return False
if 'libtorch' in name:
return False
if 'test' not in name:
return False
# Skip jit-profiling tests
if 'jit-profiling' in name:
return False
if 'cuda11' in name:
return False
# Skip VS2017 tests
if 'vs2017' in name:
return False
return 'cuda' in name and 'nogpu' not in name
def filter_cuda_build(name):
if filter_service_jobs(name):
return False
if 'libtorch' in name:
return False
return 'cuda' in name and name.endswith('build')
def filter_windows_test(name):
if filter_service_jobs(name):
return False
# Skip jit-profiling tests
if 'jit-profiling' in name:
return False
return 'test' in name and 'windows' in name
def compute_covariance(branch='master', name_filter: Optional[Callable[[str], bool]] = None):
import numpy as np
revisions: MutableSet[str] = set()
job_summary: Dict[str, Dict[str, float]] = {}
# Extract data
print(f"Computing covariance for {branch if branch is not None else 'all branches'}")
ci_cache = CircleCICache(None)
pipelines = ci_cache.get_pipelines(branch=branch)
for pipeline in pipelines:
if pipeline['trigger']['type'] == 'schedule':
continue
revision = pipeline['vcs']['revision']
pipeline_jobs: Dict[str, float] = {}
blocked_jobs: MutableSet[str] = set()
workflows = ci_cache.get_pipeline_workflows(pipeline['id'])
for workflow in workflows:
if is_workflow_in_progress(workflow):
continue
jobs = ci_cache.get_workflow_jobs(workflow['id'])
for job in jobs:
job_name = job['name']
job_status = job['status']
# Handle renames
if job_name == 'pytorch_linux_xenial_cuda10_1_cudnn7_py3_NO_AVX2_test':
job_name = 'pytorch_linux_xenial_cuda10_1_cudnn7_py3_nogpu_NO_AVX2_test'
if job_name == 'pytorch_linux_xenial_cuda10_1_cudnn7_py3_NO_AVX_NO_AVX2_test':
job_name = 'pytorch_linux_xenial_cuda10_1_cudnn7_py3_nogpu_NO_AVX_test'
if job_status in ['infrastructure_fail', 'canceled']:
continue
if callable(name_filter) and not name_filter(job_name):
continue
if job_status == 'blocked':
blocked_jobs.add(job_name)
continue
if job_name in blocked_jobs:
blocked_jobs.remove(job_name)
result = 1.0 if job_status == 'success' else -1.0
pipeline_jobs[job_name] = result
# Skip build with blocked job [which usually means build failed due to the test failure]
if len(blocked_jobs) != 0:
continue
# Skip all success workflows
if all(result == 1.0 for result in pipeline_jobs.values()):
continue
revisions.add(revision)
for job_name in pipeline_jobs:
if job_name not in job_summary:
job_summary[job_name] = {}
job_summary[job_name][revision] = pipeline_jobs[job_name]
# Analyze results
job_names = sorted(job_summary.keys())
# revisions = sorted(revisions)
job_data = np.zeros((len(job_names), len(revisions)), dtype=np.float)
print(f"Number of observations: {len(revisions)}")
for job_idx, job_name in enumerate(job_names):
job_row = job_summary[job_name]
for rev_idx, revision in enumerate(revisions):
if revision in job_row:
job_data[job_idx, rev_idx] = job_row[revision]
success_rate = job_data[job_idx, ].sum(where=job_data[job_idx, ] > 0.0) / len(job_row)
present_rate = 1.0 * len(job_row) / len(revisions)
print(f"{job_name}: missing {100.0 * (1.0 - present_rate):.2f}% success rate: {100 * success_rate:.2f}%")
cov_matrix = np.corrcoef(job_data)
plot_heatmap(cov_matrix, job_names)
def print_artifacts(branch, item_count, name_filter: Callable[[str], bool]) -> None:
ci_cache = CircleCICache(token=get_circleci_token())
for pipeline, _, job in ci_cache.get_pipeline_jobs(branch=branch, item_count=item_count):
revision = pipeline['vcs']['revision']
if not name_filter(job["name"]):
continue
job_number = job.get("job_number")
if job_number is None:
continue
artifacts = ci_cache.get_job_artifacts('gh/pytorch/pytorch', job_number)
for artifact in artifacts:
name = os.path.basename(artifact['path'])
url = artifact["url"]
print(f"{revision} {name} {url}")
def print_duration(branch, item_count, name_filter: Callable[[str], bool]) -> None:
ci_cache = CircleCICache(token=get_circleci_token())
for pipeline, workflow, job in ci_cache.get_pipeline_jobs(branch=branch, item_count=item_count):
job_name, job_status, job_number = job['name'], job['status'], job.get("job_number")
revision = pipeline['vcs']['revision']
if not name_filter(job_name) or job_number is None:
continue
if job_status in ['blocked', 'canceled', 'unauthorized', 'running', 'not_run', 'failing']:
continue
started_at = str2date(job['started_at'])
stopped_at = str2date(job['stopped_at'])
duration = stopped_at - started_at
print(f"{job_name} {revision} {duration} {started_at}")
def parse_arguments():
from argparse import ArgumentParser
parser = ArgumentParser(description="Download and analyze circle logs")
parser.add_argument('--plot-graph', type=str, nargs='?', help="Plot job time trends", const='')
parser.add_argument('--output', type=str, help="Output file name for the graphs")
parser.add_argument('--get_artifacts', type=str)
parser.add_argument('--print-duration', type=str)
parser.add_argument('--branch', type=str)
parser.add_argument('--item_count', type=int, default=100)
parser.add_argument('--compute_covariance', choices=['cuda_test', 'cuda_build', 'windows_test'])
return parser.parse_args()
if __name__ == '__main__':
args = parse_arguments()
if args.get_artifacts is not None:
print_artifacts(branch=args.branch,
item_count=args.item_count,
name_filter=lambda x: args.get_artifacts in x)
sys.exit(0)
if args.print_duration is not None:
print_duration(branch=args.branch,
item_count=args.item_count,
name_filter=lambda x: args.print_duration in x)
sys.exit(0)
if args.compute_covariance is not None:
name_filter = {
'cuda_test': filter_cuda_test,
'cuda_build': filter_cuda_build,
'windows_test': filter_windows_test,
}[args.compute_covariance]
compute_covariance(branch=args.branch, name_filter=name_filter)
sys.exit(0)
if args.plot_graph is not None:
plot_graph(args.plot_graph, args.output, args.branch)
sys.exit(0)
fetch_status(branch=args.branch, item_count=args.item_count)
|
#!/usr/bin/env python3
# Tool for analyzing sizes of CUDA kernels for various GPU architectures
import os
import struct
import subprocess
import sys
from tempfile import TemporaryDirectory
from typing import Dict
# Try to auto-import elftools
try:
from elftools.elf.elffile import ELFFile
except ModuleNotFoundError:
print(f'elftools module not found, trying to install it from pip')
from pip._internal import main as pip_main
try:
pip_main(["install", "pyelftools", "--user"])
except SystemExit:
print(f'PIP installation failed, please install it manually by invoking "{sys.executable} -mpip install pyelftools --user"')
sys.exit(-1)
from elftools.elf.elffile import ELFFile
# From https://stackoverflow.com/questions/1094841/reusable-library-to-get-human-readable-version-of-file-size
def sizeof_fmt(num, suffix='B'):
for unit in ['', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi']:
if abs(num) < 1024.0:
return "%3.1f%s%s" % (num, unit, suffix)
num /= 1024.0
return "%.1f%s%s" % (num, 'Yi', suffix)
def compute_cubin_sizes(file_name, section_name='.nv_fatbin', debug=False):
with open(file_name, 'rb') as f:
elf_file = ELFFile(f)
nv_fatbin = elf_file.get_section_by_name(section_name)
if nv_fatbin is None:
return {}
data = nv_fatbin.data()
idx, offs = 0, 0
elf_sizes = {}
while offs < len(data):
(magic, version, header_size, fatbin_size) = struct.unpack('IHHL', data[offs: offs + 16])
if magic != 0xba55ed50 or version != 1:
raise RuntimeError(f"Unexpected fatbin magic {hex(magic)} or version {version}")
if debug:
print(f"Found fatbin at {offs} header_size={header_size} fatbin_size={fatbin_size}")
offs += header_size
fatbin_end = offs + fatbin_size
while offs < fatbin_end:
(kind, version, hdr_size, elf_size, empty, code_ver, sm_ver) = struct.unpack('HHILLIH', data[offs: offs + 30])
if version != 0x0101 or kind not in [1, 2]:
raise RuntimeError(f"Unexpected cubin version {hex(version)} or kind {kind}")
sm_ver = f'{"ptx" if kind == 1 else "sm"}_{sm_ver}'
if debug:
print(f" {idx}: elf_size={elf_size} code_ver={hex(code_ver)} sm={sm_ver}")
if sm_ver not in elf_sizes:
elf_sizes[sm_ver] = 0
elf_sizes[sm_ver] += elf_size
idx, offs = idx + 1, offs + hdr_size + elf_size
offs = fatbin_end
return elf_sizes
class ArFileCtx:
def __init__(self, ar_name: str) -> None:
self.ar_name = os.path.abspath(ar_name)
self._tmpdir = TemporaryDirectory()
def __enter__(self) -> str:
self._pwd = os.getcwd()
rc = self._tmpdir.__enter__()
subprocess.check_call(['ar', 'x', self.ar_name])
return rc
def __exit__(self, ex, value, tb) -> None:
os.chdir(self._pwd)
return self._tmpdir.__exit__(ex, value, tb)
def dict_add(rc: Dict[str, int], b: Dict[str, int]) -> Dict[str, int]:
for key, val in b.items():
rc[key] = (rc[key] if key in rc else 0) + val
return rc
def main():
if sys.platform != 'linux':
print('This script only works with Linux ELF files')
return
if len(sys.argv) < 2:
print(f"{sys.argv[0]} invoked without any arguments trying to infer location of libtorch_cuda")
import torch
fname = os.path.join(os.path.dirname(torch.__file__), 'lib', 'libtorch_cuda.so')
else:
fname = sys.argv[1]
if not os.path.exists(fname):
print(f"Can't find {fname}")
sys.exit(-1)
section_names = ['.nv_fatbin', '__nv_relfatbin']
results = {name: {} for name in section_names}
print(f"Analyzing {fname}")
if os.path.splitext(fname)[1] == '.a':
with ArFileCtx(fname):
for fname in os.listdir("."):
if not fname.endswith(".o"): continue
for section_name in section_names:
elf_sizes = compute_cubin_sizes(fname, section_name)
dict_add(results[section_name], elf_sizes)
else:
for section_name in ['.nv_fatbin', '__nv_relfatbin']:
dict_add(results[section_name], compute_cubin_sizes(fname, section_name))
for section_name in section_names:
elf_sizes = results[section_name]
print(f"{section_name} size {sizeof_fmt(sum(elf_sizes.values()))}")
for (sm_ver, total_size) in elf_sizes.items():
print(f" {sm_ver}: {sizeof_fmt(total_size)}")
if __name__ == '__main__':
main()
|
from collections import defaultdict
from datetime import datetime, timedelta, timezone
import gzip
import multiprocessing
import os
import re
import urllib
from tqdm import tqdm
import botocore
import boto3
S3 = boto3.resource('s3')
CLIENT = boto3.client('s3')
BUCKET = S3.Bucket('pytorch')
class CacheEntry:
_size = None
def __init__(self, download_uri: str):
self.download_uri = download_uri
self.bytes_sent = 0
@property
def os_type(self) -> str:
os_type = "linux"
if "win" in self.download_uri:
os_type = "windows"
elif "macosx" in self.download_uri:
os_type = "macos"
return os_type
@property
def target_arch(self) -> str:
target_arch = "cpu"
result = re.search(r"cu[0-9]+", self.download_uri)
if result:
target_arch = result[0]
return target_arch
@property
def package_name(self) -> str:
filename_contents = os.path.basename(self.download_uri).split('-')
return filename_contents[0]
@property
def package_version(self) -> str:
if "dev" in self.download_uri:
results = re.search(
r"[0-9]+\.[0-9]+\.[0-9]+\.dev[0-9]+",
self.download_uri
)
else:
results = re.search(
r"[0-9]+\.[0-9]+\.[0-9]+", self.download_uri
)
if not results:
raise Exception("Wtf there's no version o.O")
return results[0]
@property
def size(self) -> int:
if self._size is None:
for key in BUCKET.objects.filter(
Prefix=self.download_uri.lstrip("/")
):
self._size = key.size
if self._size is None:
raise Exception(
f"No object found for prefix {self.download_uri}"
)
return self._size
@property
def downloads(self):
return self.bytes_sent // self.size
def parse_logs(log_directory: str) -> dict:
bytes_cache = dict()
for (dirpath, _, filenames) in os.walk(log_directory):
for filename in tqdm(filenames):
with gzip.open(os.path.join(dirpath, filename), 'r') as gf:
string = gf.read().decode("utf-8")
entries = []
entries += string.splitlines()[2:]
for entry in entries:
columns = entry.split('\t')
bytes_sent = int(columns[3])
download_uri = urllib.parse.unquote(
urllib.parse.unquote(columns[7])
)
status = columns[8]
if not all([
status.startswith("2"),
download_uri.endswith((".whl", ".zip"))
]):
continue
if not bytes_cache.get(download_uri):
bytes_cache[download_uri] = CacheEntry(download_uri)
bytes_cache[download_uri].bytes_sent += bytes_sent
return bytes_cache
def output_results(bytes_cache: dict) -> None:
os_results = defaultdict(int)
arch_results = defaultdict(int)
package_results = defaultdict(lambda: defaultdict(int))
for _, val in tqdm(bytes_cache.items()):
try:
os_results[val.os_type] += val.downloads
arch_results[val.target_arch] += val.downloads
package_results[val.package_name][val.package_version] += (
val.downloads
)
except Exception:
pass
print("=-=-= Results =-=-=")
print("=-=-= OS =-=-=")
total_os_num = sum(os_results.values())
for os_type, num in os_results.items():
print(
f"\t* {os_type}: {num} ({(num/total_os_num)*100:.2f}%)"
)
print("=-=-= ARCH =-=-=")
total_arch_num = sum(arch_results.values())
for arch_type, num in arch_results.items():
print(
f"\t* {arch_type}: {num} ({(num/total_arch_num) * 100:.2f}%)"
)
print("=-=-= By Package =-=-=")
for package_name, upper_val in package_results.items():
print(f"=-=-= {package_name} =-=-=")
total_package_num = sum(upper_val.values())
for package_version, num in upper_val.items():
print(
f"\t* {package_version}: {num} ({(num/total_package_num) * 100:.2f}%)"
)
def download_logs(log_directory: str, since: float):
dt_now = datetime.now(timezone.utc)
dt_end = datetime(dt_now.year, dt_now.month, dt_now.day, tzinfo=timezone.utc)
dt_start = dt_end - timedelta(days=1, hours=1) # Add 1 hour padding to account for potentially missed logs due to timing
for key in tqdm(BUCKET.objects.filter(Prefix='cflogs')):
remote_fname = key.key
local_fname = os.path.join(log_directory, remote_fname)
# Only download things from yesterday
dt_modified = key.last_modified.replace(tzinfo=timezone.utc)
if dt_start >= dt_modified or dt_end < dt_modified:
continue
# TODO: Do this in parallel
if not os.path.exists(local_fname):
dirname = os.path.dirname(local_fname)
if not os.path.exists(dirname):
os.makedirs(dirname)
CLIENT.download_file("pytorch", remote_fname, local_fname)
if __name__ == "__main__":
print("Downloading logs")
download_logs('cache', 1)
print("Parsing logs")
cache = parse_logs('cache/cflogs/')
print("Calculating results")
output_results(cache)
|
import argparse
import boto3
import bz2
import json
import os
import re
import requests
import pandas as pd
from datetime import datetime, timedelta
from tqdm import tqdm
from typing import Any, Dict, Optional, List
S3 = boto3.resource('s3')
CLIENT = boto3.client('s3')
BUCKET = S3.Bucket('ossci-metrics')
GITHUB_API_BASE = "https://api.github.com/"
GITHUB_COMMITS_API = "repos/pytorch/pytorch/commits"
STRF_FORMAT = "%Y-%m-%dT%H:%M:%SZ"
CACHE_PICKLE = "cache/test_time/dataframe.pickle"
def _get_latests_git_commit_sha_list(lookback: int):
sha_since = (datetime.utcnow() - timedelta(hours = lookback)).strftime(STRF_FORMAT)
resp = requests.get(GITHUB_API_BASE + GITHUB_COMMITS_API + f"?since={sha_since}")
if resp.status_code == 200:
return [e.get('sha') for e in resp.json()]
else:
return []
def _json_to_df(data: Dict[str, Any], granularity: str) -> pd.DataFrame:
reformed_data = list()
for fname, fdata in data['files'].items():
if granularity == 'file':
reformed_data.append({
"job": data['job'],
"sha": data['sha'],
'file': fname,
'file_total_sec': fdata['total_seconds'],
})
else:
for sname, sdata in fdata['suites'].items():
if granularity == 'suite':
reformed_data.append({
"job": data['job'],
"sha": data['sha'],
'suite': sname,
'suite_total_sec': sdata['total_seconds'],
})
else:
for cname, cdata in sdata['cases'].items():
reformed_data.append({
"job": data['job'],
"sha": data['sha'],
'case': cname,
'case_status': cdata['status'],
'case_sec': cdata['seconds'],
})
df = pd.json_normalize(reformed_data)
return df
def download_stats(folder: str, lookback: int):
commit_sha_list = _get_latests_git_commit_sha_list(lookback)
for commit_sha in commit_sha_list:
for key in tqdm(BUCKET.objects.filter(Prefix=f'test_time/{commit_sha}')):
remote_fname = key.key
local_fname = os.path.join(folder, remote_fname)
# TODO: Do this in parallel
if not os.path.exists(local_fname):
dirname = os.path.dirname(local_fname)
if not os.path.exists(dirname):
os.makedirs(dirname)
# only download when there's a cache miss
if not os.path.exists(local_fname) or not os.path.isfile(local_fname):
print(f"\nDownloading {remote_fname}...")
CLIENT.download_file("ossci-metrics", remote_fname, local_fname)
def parse_and_export_stats(folder: str, granularity: str, commit_sha_lists: Optional[List[str]] = None):
dataframe = None
for (dirpath, _, filenames) in os.walk(folder):
for filename in tqdm(filenames):
splits = dirpath.split("/")
job_name = splits[-1]
sha = splits[-2]
if not commit_sha_lists or sha in commit_sha_lists:
with bz2.open(os.path.join(dirpath, filename), 'r') as zf:
string = zf.read().decode("utf-8")
data = json.loads(string)
# create a deep json with sha and job info
data['sha'] = sha
data['job'] = job_name
df = _json_to_df(data, granularity)
dataframe = df if dataframe is None else dataframe.append(df)
return dataframe
def main():
parser = argparse.ArgumentParser(
__file__,
description="download and cache test stats locally, both raw and pandas format",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument(
'--lookback',
type=int,
help='lookback in # of hours',
default=24,
)
parser.add_argument(
'--output',
help='output filename',
default='cache/df.pickle',
)
parser.add_argument(
'--cache_folder',
help='cache folder',
default='cache',
)
parser.add_argument(
'--granularity',
choices=['file', 'suite', 'case'],
help='granularity of stats summary',
default='file',
)
args = parser.parse_args()
lookback = args.lookback
cache_folder = args.cache_folder
output = args.output
granularity = args.granularity
print("Downloading test stats")
download_stats(cache_folder, lookback)
print("Parsing test stats and write to pd dataframe")
if not os.path.exists(output):
dataframe = parse_and_export_stats(f'{cache_folder}/test_time/', granularity)
dataframe.to_pickle(output)
if __name__ == "__main__":
main()
|
#!/usr/bin/env python3
from typing import Dict, List
from subprocess import check_output
import os
import sys
def get_defined_symbols(fname: str, verbose: bool = False) -> Dict[str, int]:
if verbose:
print(f"Processing {fname}...", end='', flush=True)
if sys.platform == 'darwin':
lines = check_output(['nm', '--defined-only', '-n', fname]).decode('ascii').split("\n")[:-1]
rc = {}
for idx, line in enumerate(lines):
addr, stype, name = line.split(' ')
size = 4 if idx + 1 == len(lines) else (int(lines[idx + 1].split(' ')[0], 16) - int(addr, 16))
rc[name] = size
else:
lines = check_output(['nm', '--print-size', '--defined-only', fname]).decode('ascii').split('\n')
rc = {e[3]: int(e[1], 16) for e in [line.split() for line in lines] if len(e) == 4}
if verbose:
print("done")
return rc
def get_deps(fname: str) -> List[str]:
if sys.platform == 'darwin':
rc = []
lines = check_output(['otool', '-l', fname]).decode('ascii').split("\n")[1:-1]
for idx, line in enumerate(lines):
if line.strip() != 'cmd LC_LOAD_DYLIB':
continue
path = lines[idx + 2].strip()
assert path.startswith('name')
rc.append(os.path.basename(path.split(' ')[1]))
return rc
lines = check_output(['readelf', '--dynamic', fname]).decode('ascii').split('\n')
return [line.split('[')[1][:-1] for line in lines if '(NEEDED)' in line]
def humansize(size):
if size < 1024:
return f"{size} bytes"
if size < 1024**2:
return f"{int(size/1024)} Kb"
if size < 1024**3:
return f"{size/(1024.0**2):.2f} Mb"
return f"{size/(1024.0**3):.2f} Gb"
def print_sizes(libname, depth: int = 2) -> None:
libs = [libname]
depth = 2
symbols = {os.path.basename(libname): get_defined_symbols(libname, verbose=True)}
for _ in range(depth):
for lib in libs:
dirname = os.path.dirname(lib)
for dep in get_deps(lib):
path = os.path.join(dirname, dep)
if not os.path.exists(path):
continue
if path not in libs:
libs.append(path)
symbols[dep] = get_defined_symbols(path, verbose=True)
for lib in libs:
lib_symbols = symbols[os.path.basename(lib)]
lib_keys = set(lib_symbols.keys())
rc = f"{lib} symbols size {humansize(sum(lib_symbols.values()))}"
for dep in get_deps(lib):
if dep not in symbols:
continue
dep_overlap = lib_keys.intersection(set(symbols[dep].keys()))
overlap_size = sum(lib_symbols[k] for k in dep_overlap)
if overlap_size > 0:
rc += f" {dep} overlap is {humansize(overlap_size)}"
print(rc)
def print_symbols_overlap(libname1: str, libname2: str) -> None:
sym1 = get_defined_symbols(libname1, verbose=True)
sym2 = get_defined_symbols(libname2, verbose=True)
sym1_size = sum(sym1.values())
sym2_size = sum(sym2.values())
sym_overlap = set(sym1.keys()).intersection(set(sym2.keys()))
overlap_size = sum(sym1[s] for s in sym_overlap)
if overlap_size == 0:
print(f"{libname1} symbols size {humansize(sym1_size)} does not overlap with {libname2}")
return
print(f"{libname1} symbols size {humansize(sym1_size)} overlap {humansize(overlap_size)} ({100.0 * overlap_size/sym1_size :.2f}%)")
for sym in sym_overlap:
print(sym)
if __name__ == '__main__':
if len(sys.argv) == 3:
print_symbols_overlap(sys.argv[1], sys.argv[2])
else:
print_sizes(sys.argv[1] if len(sys.argv) > 1 else "lib/libtorch_cuda.so")
|
#!/usr/bin/env python3
from datetime import datetime, timedelta
from typing import Any, Dict, List, Iterable, Optional, Union
from urllib.request import urlopen, Request
from urllib.error import HTTPError
import json
import enum
import os
class IssueState(enum.Enum):
OPEN = "open"
CLOSED = "closed"
ALL = "all"
def __str__(self):
return self.value
class GitCommit:
commit_hash: str
title: str
body: str
author: str
author_date: datetime
commit_date: Optional[datetime]
def __init__(self,
commit_hash: str,
author: str,
author_date: datetime,
title: str,
body: str,
commit_date: Optional[datetime] = None) -> None:
self.commit_hash = commit_hash
self.author = author
self.author_date = author_date
self.commit_date = commit_date
self.title = title
self.body = body
def __contains__(self, item: Any) -> bool:
return item in self.body or item in self.title
def get_revert_revision(commit: GitCommit) -> Optional[str]:
import re
body_rc = re.search("Original Phabricator Diff: (D\\d+)", commit.body)
if commit.title.startswith("Back out \"") and body_rc is not None:
return body_rc.group(1)
rc = re.match("Revert (D\\d+):", commit.title)
if rc is None:
return None
return rc.group(1)
def get_diff_revision(commit: GitCommit) -> Optional[str]:
import re
rc = re.search("\\s*Differential Revision: (D\\d+)", commit.body)
if rc is None:
return None
return rc.group(1)
def get_ghf_revert_revision(commit: GitCommit) -> Optional[str]:
import re
rc = re.search("\\s*This reverts commit ([0-9a-f]+).", commit.body)
if all([
commit.title.startswith("Revert"),
commit.author == "PyTorch MergeBot <[email protected]>",
rc is not None
]):
return rc.group(1)
return None
def is_revert(commit: GitCommit) -> bool:
return get_revert_revision(commit) is not None or get_ghf_revert_revision(commit) is not None
def parse_medium_format(lines: Union[str, List[str]]) -> GitCommit:
"""
Expect commit message generated using `--format=medium --date=unix` format, i.e.:
commit <sha1>
Author: <author>
Date: <author date>
<title line>
<full commit message>
"""
if isinstance(lines, str):
lines = lines.split("\n")
# TODO: Handle merge commits correctly
if len(lines) > 1 and lines[1].startswith("Merge:"):
del lines[1]
assert len(lines) > 5
assert lines[0].startswith("commit")
assert lines[1].startswith("Author: ")
assert lines[2].startswith("Date: ")
assert len(lines[3]) == 0
return GitCommit(commit_hash=lines[0].split()[1].strip(),
author=lines[1].split(":", 1)[1].strip(),
author_date=datetime.fromtimestamp(int(lines[2].split(":", 1)[1].strip())),
title=lines[4].strip(),
body="\n".join(lines[5:]),
)
def parse_fuller_format(lines: Union[str, List[str]]) -> GitCommit:
"""
Expect commit message generated using `--format=fuller --date=unix` format, i.e.:
commit <sha1>
Author: <author>
AuthorDate: <author date>
Commit: <committer>
CommitDate: <committer date>
<title line>
<full commit message>
"""
if isinstance(lines, str):
lines = lines.split("\n")
# TODO: Handle merge commits correctly
if len(lines) > 1 and lines[1].startswith("Merge:"):
del lines[1]
assert len(lines) > 7
assert lines[0].startswith("commit")
assert lines[1].startswith("Author: ")
assert lines[2].startswith("AuthorDate: ")
assert lines[3].startswith("Commit: ")
assert lines[4].startswith("CommitDate: ")
assert len(lines[5]) == 0
return GitCommit(commit_hash=lines[0].split()[1].strip(),
author=lines[1].split(":", 1)[1].strip(),
author_date=datetime.fromtimestamp(int(lines[2].split(":", 1)[1].strip())),
commit_date=datetime.fromtimestamp(int(lines[4].split(":", 1)[1].strip())),
title=lines[6].strip(),
body="\n".join(lines[7:]),
)
def _check_output(items: List[str], encoding='utf-8') -> str:
from subprocess import check_output
return check_output(items).decode(encoding)
def get_git_remotes(path: str) -> Dict[str, str]:
keys = _check_output(["git", "-C", path, "remote"]).strip().split("\n")
return {key: _check_output(["git", "-C", path, "remote", "get-url", key]).strip() for key in keys}
class GitRepo:
def __init__(self, path, remote='upstream'):
self.repo_dir = path
self.remote = remote
def _run_git_cmd(self, *args) -> str:
return _check_output(['git', '-C', self.repo_dir] + list(args))
def _run_git_log(self, revision_range) -> List[GitCommit]:
log = self._run_git_cmd('log', '--format=fuller',
'--date=unix', revision_range, '--', '.').split("\n")
rc: List[GitCommit] = []
cur_msg: List[str] = []
for line in log:
if line.startswith("commit"):
if len(cur_msg) > 0:
rc.append(parse_fuller_format(cur_msg))
cur_msg = []
cur_msg.append(line)
if len(cur_msg) > 0:
rc.append(parse_fuller_format(cur_msg))
return rc
def get_commit_list(self, from_ref, to_ref) -> List[GitCommit]:
return self._run_git_log(f"{self.remote}/{from_ref}..{self.remote}/{to_ref}")
def get_ghstack_orig_branches(self) -> List[str]:
return [x.strip() for x in self._run_git_cmd("branch", "--remotes", "--list", self.remote + "/gh/*/orig").strip().split("\n")]
def show_ref(self, ref) -> str:
return self._run_git_cmd("show-ref", ref).split(" ")[0]
def merge_base(self, ref1, ref2) -> str:
return self._run_git_cmd("merge-base", ref1, ref2).strip()
def rev_list(self, ref):
return self._run_git_cmd("rev-list", f"{self.remote}/main..{ref}").strip().split()
def build_commit_dict(commits: List[GitCommit]) -> Dict[str, GitCommit]:
rc = {}
for commit in commits:
assert commit.commit_hash not in rc
rc[commit.commit_hash] = commit
return rc
def fetch_json(url: str, params: Optional[Dict[str, Any]] = None) -> List[Dict[str, Any]]:
headers = {'Accept': 'application/vnd.github.v3+json'}
token = os.environ.get("GITHUB_TOKEN")
if token is not None and url.startswith('https://api.github.com/'):
headers['Authorization'] = f'token {token}'
if params is not None and len(params) > 0:
url += '?' + '&'.join(f"{name}={val}" for name, val in params.items())
try:
with urlopen(Request(url, headers=headers)) as data:
return json.load(data)
except HTTPError as err:
if err.code == 403 and all(key in err.headers for key in ['X-RateLimit-Limit', 'X-RateLimit-Used']):
print(f"Rate limit exceeded: {err.headers['X-RateLimit-Used']}/{err.headers['X-RateLimit-Limit']}")
raise
def fetch_multipage_json(url: str, params: Optional[Dict[str, Any]] = None) -> List[Dict[str, Any]]:
if params is None:
params = {}
assert "page" not in params
page_idx, rc, prev_len, params = 1, [], -1, params.copy()
while len(rc) > prev_len:
prev_len = len(rc)
params["page"] = page_idx
page_idx += 1
rc += fetch_json(url, params)
return rc
def gh_get_milestones(org='pytorch', project='pytorch', state: IssueState = IssueState.OPEN) -> List[Dict[str, Any]]:
url = f'https://api.github.com/repos/{org}/{project}/milestones'
return fetch_multipage_json(url, {"state": state})
def gh_get_milestone_issues(org: str, project: str, milestone_idx: int, state: IssueState = IssueState.OPEN):
url = f'https://api.github.com/repos/{org}/{project}/issues'
return fetch_multipage_json(url, {"milestone": milestone_idx, "state": state})
def gh_get_ref_statuses(org: str, project: str, ref: str) -> Dict[str, Any]:
url = f'https://api.github.com/repos/{org}/{project}/commits/{ref}/status'
params = {"page": 1, "per_page": 100}
nrc = rc = fetch_json(url, params)
while "statuses" in nrc and len(nrc["statuses"]) == 100:
params["page"] += 1
nrc = fetch_json(url, params)
if "statuses" in nrc:
rc["statuses"] += nrc["statuses"]
return rc
def extract_statuses_map(json: Dict[str, Any]):
return {s["context"]: s["state"] for s in json["statuses"]}
class PeriodStats:
commits: int
reverts: int
authors: int
date: datetime
def __init__(self, date: datetime, commits: int, reverts: int, authors: int) -> None:
self.date = date
self.commits = commits
self.reverts = reverts
self.authors = authors
def get_monthly_stats(commits: List[GitCommit]) -> Iterable[PeriodStats]:
y, m, total, reverts, authors = None, None, 0, 0, set()
for commit in commits:
commit_date = commit.commit_date if commit.commit_date is not None else commit.author_date
if y != commit_date.year or m != commit_date.month:
if y is not None:
yield PeriodStats(datetime(y, m, 1), total, reverts, len(authors))
y, m, total, reverts, authors = commit_date.year, commit_date.month, 0, 0, set()
if is_revert(commit):
reverts += 1
total += 1
authors.add(commit.author)
def print_monthly_stats(commits: List[GitCommit]) -> None:
stats = list(get_monthly_stats(commits))
for idx, stat in enumerate(stats):
y = stat.date.year
m = stat.date.month
total, reverts, authors = stat.commits, stat.reverts, stat.authors
reverts_ratio = 100.0 * reverts / total
if idx + 1 < len(stats):
commits_growth = 100.0 * (stat.commits / stats[idx + 1].commits - 1)
else:
commits_growth = float('nan')
print(f"{y}-{m:02d}: commits {total} ({commits_growth:+.1f}%) reverts {reverts} ({reverts_ratio:.1f}%) authors {authors}")
def print_reverts(commits: List[GitCommit]) -> None:
for commit in commits:
if not is_revert(commit):
continue
print(f"{commit.commit_date} {commit.title} {commit.commit_hash} {commit.body}")
def analyze_reverts(commits: List[GitCommit]):
for idx, commit in enumerate(commits):
revert_id = get_revert_revision(commit)
if revert_id is None:
continue
orig_commit = None
for i in range(1, 100):
orig_commit = commits[idx + i]
if get_diff_revision(orig_commit) == revert_id:
break
if orig_commit is None:
print(f"Failed to find original commit for {commit.title}")
continue
print(f"{commit.commit_hash} is a revert of {orig_commit.commit_hash}: {orig_commit.title}")
revert_statuses = gh_get_ref_statuses("pytorch", "pytorch", commit.commit_hash)
orig_statuses = gh_get_ref_statuses("pytorch", "pytorch", orig_commit.commit_hash)
orig_sm = extract_statuses_map(orig_statuses)
revert_sm = extract_statuses_map(revert_statuses)
for k in revert_sm.keys():
if k not in orig_sm:
continue
if orig_sm[k] != revert_sm[k]:
print(f"{k} {orig_sm[k]}->{revert_sm[k]}")
def print_contributor_stats(commits, delta: Optional[timedelta] = None) -> None:
authors: Dict[str, int] = {}
now = datetime.now()
# Default delta is one non-leap year
if delta is None:
delta = timedelta(days=365)
for commit in commits:
date, author = commit.commit_date, commit.author
if now - date > delta:
break
if author not in authors:
authors[author] = 0
authors[author] += 1
print(f"{len(authors)} contributors made {sum(authors.values())} commits in last {delta.days} days")
for count, author in sorted(((commit, author) for author, commit in authors.items()), reverse=True):
print(f"{author}: {count}")
def commits_missing_in_branch(repo: GitRepo, branch: str, orig_branch: str, milestone_idx: int) -> None:
def get_commits_dict(x, y):
return build_commit_dict(repo.get_commit_list(x, y))
main_commits = get_commits_dict(orig_branch, 'main')
release_commits = get_commits_dict(orig_branch, branch)
print(f"len(main_commits)={len(main_commits)}")
print(f"len(release_commits)={len(release_commits)}")
print("URL;Title;Status")
for issue in gh_get_milestone_issues('pytorch', 'pytorch', milestone_idx, IssueState.ALL):
html_url, state = issue["html_url"], issue["state"]
# Skip closed states if they were landed before merge date
if state == "closed":
mentioned_after_cut = any(html_url in commit_message for commit_message in main_commits.values())
# If issue is not mentioned after cut, that it must be present in release branch
if not mentioned_after_cut:
continue
mentioned_in_release = any(html_url in commit_message for commit_message in release_commits.values())
# if Issue is mentioned is release branch, than it was picked already
if mentioned_in_release:
continue
print(f'{html_url};{issue["title"]};{state}')
def analyze_stacks(repo: GitRepo) -> None:
from tqdm.contrib.concurrent import thread_map
branches = repo.get_ghstack_orig_branches()
stacks_by_author: Dict[str, List[int]] = {}
for branch,rv_commits in thread_map(lambda x: (x, repo.rev_list(x)), branches, max_workers=10):
author = branch.split("/")[2]
if author not in stacks_by_author:
stacks_by_author[author]=[]
stacks_by_author[author].append(len(rv_commits))
for author, slen in sorted(stacks_by_author.items(), key=lambda x:len(x[1]), reverse=True):
if len(slen) == 1:
print(f"{author} has 1 stack of depth {slen[0]}")
continue
print(f"{author} has {len(slen)} stacks max depth is {max(slen)} avg depth is {sum(slen)/len(slen):.2f} mean is {slen[len(slen)//2]}")
def parse_arguments():
from argparse import ArgumentParser
parser = ArgumentParser(description="Print GitHub repo stats")
parser.add_argument("--repo-path",
type=str,
help="Path to PyTorch git checkout",
default=os.path.expanduser("~/git/pytorch/pytorch"))
parser.add_argument("--milestone-id", type=str)
parser.add_argument("--branch", type=str)
parser.add_argument("--remote",
type=str,
help="Remote to base off of",
default="")
parser.add_argument("--analyze-reverts", action="store_true")
parser.add_argument("--print-reverts", action="store_true")
parser.add_argument("--contributor-stats", action="store_true")
parser.add_argument("--missing-in-branch", action="store_true")
parser.add_argument("--analyze-stacks", action="store_true")
return parser.parse_args()
def main():
import time
args = parse_arguments()
remote = args.remote
if not remote:
remotes = get_git_remotes(args.repo_path)
# Pick best remote
remote = next(iter(remotes.keys()))
for key in remotes:
if remotes[key].endswith('github.com/pytorch/pytorch'):
remote = key
repo = GitRepo(args.repo_path, remote)
if args.analyze_stacks:
analyze_stacks(repo)
return
if args.missing_in_branch:
# Use milestone idx or search it along milestone titles
try:
milestone_idx = int(args.milestone_id)
except ValueError:
milestone_idx = -1
milestones = gh_get_milestones()
for milestone in milestones:
if milestone.get('title', '') == args.milestone_id:
milestone_idx = int(milestone.get('number', '-2'))
if milestone_idx < 0:
print(f'Could not find milestone {args.milestone_id}')
return
commits_missing_in_branch(repo,
args.branch,
f'orig/{args.branch}',
milestone_idx)
return
print(f"Parsing git history with remote {remote}...", end='', flush=True)
start_time = time.time()
x = repo._run_git_log(f"{remote}/main")
print(f"done in {time.time()-start_time:.1f} sec")
if args.analyze_reverts:
analyze_reverts(x)
elif args.contributor_stats:
print_contributor_stats(x)
elif args.print_reverts:
print_reverts(x[:2**9])
else:
print_monthly_stats(x)
if __name__ == "__main__":
main()
|
import distutils.command.clean
import glob
import os
import shutil
import subprocess
import sys
import torch
from setuptools import find_packages, setup
from torch.utils.cpp_extension import (
BuildExtension,
CppExtension,
CUDA_HOME,
CUDAExtension,
)
version = open("version.txt", "r").read().strip()
sha = "Unknown"
package_name = "torchcsprng"
cwd = os.path.dirname(os.path.abspath(__file__))
try:
sha = (
subprocess.check_output(["git", "rev-parse", "HEAD"], cwd=cwd)
.decode("ascii")
.strip()
)
except Exception:
pass
if os.getenv("BUILD_VERSION"):
version = os.getenv("BUILD_VERSION")
elif sha != "Unknown":
version += "+" + sha[:7]
print("Building wheel {}-{}".format(package_name, version))
def write_version_file():
version_path = os.path.join(cwd, "torchcsprng", "version.py")
with open(version_path, "w") as f:
f.write("__version__ = '{}'\n".format(version))
f.write("git_version = {}\n".format(repr(sha)))
# f.write("from torchcsprng.extension import _check_cuda_version\n")
# f.write("if _check_cuda_version() > 0:\n")
# f.write(" cuda = _check_cuda_version()\n")
write_version_file()
with open("README.md", "r") as fh:
long_description = fh.read()
requirements = [
"torch",
]
def append_flags(flags, flags_to_append):
for flag in flags_to_append:
if not flag in flags:
flags.append(flag)
return flags
def get_extensions():
build_cuda = torch.cuda.is_available() or os.getenv("FORCE_CUDA", "0") == "1"
module_name = "torchcsprng"
extensions_dir = os.path.join(cwd, module_name, "csrc")
openmp = "ATen parallel backend: OpenMP" in torch.__config__.parallel_info()
main_file = glob.glob(os.path.join(extensions_dir, "*.cpp"))
source_cpu = glob.glob(os.path.join(extensions_dir, "cpu", "*.cpp"))
sources = main_file + source_cpu
extension = CppExtension
define_macros = []
cxx_flags = os.getenv("CXX_FLAGS", "")
if cxx_flags == "":
cxx_flags = []
else:
cxx_flags = cxx_flags.split(" ")
if openmp:
if sys.platform == "linux":
cxx_flags = append_flags(cxx_flags, ["-fopenmp"])
elif sys.platform == "win32":
cxx_flags = append_flags(cxx_flags, ["/openmp"])
# elif sys.platform == 'darwin':
# cxx_flags = append_flags(cxx_flags, ['-Xpreprocessor', '-fopenmp'])
if build_cuda:
extension = CUDAExtension
source_cuda = glob.glob(os.path.join(extensions_dir, "cuda", "*.cu"))
sources += source_cuda
define_macros += [("WITH_CUDA", None)]
nvcc_flags = os.getenv("NVCC_FLAGS", "")
if nvcc_flags == "":
nvcc_flags = []
else:
nvcc_flags = nvcc_flags.split(" ")
nvcc_flags = append_flags(nvcc_flags, ["--expt-extended-lambda", "-Xcompiler"])
extra_compile_args = {
"cxx": cxx_flags,
"nvcc": nvcc_flags,
}
else:
extra_compile_args = {
"cxx": cxx_flags,
}
ext_modules = [
extension(
module_name + "._C",
sources,
define_macros=define_macros,
extra_compile_args=extra_compile_args,
)
]
return ext_modules
class clean(distutils.command.clean.clean):
def run(self):
with open(".gitignore", "r") as f:
ignores = f.read()
start_deleting = False
for wildcard in filter(None, ignores.split("\n")):
if (
wildcard
== "# do not change or delete this comment - `python setup.py clean` deletes everything after this line"
):
start_deleting = True
if not start_deleting:
continue
for filename in glob.glob(wildcard):
try:
os.remove(filename)
except OSError:
shutil.rmtree(filename, ignore_errors=True)
# It's an old-style class in Python 2.7...
distutils.command.clean.clean.run(self)
setup(
# Metadata
name=package_name,
version=version,
author="Pavel Belevich",
author_email="[email protected]",
url="https://github.com/pytorch/csprng",
description="Cryptographically secure pseudorandom number generators for PyTorch",
long_description=long_description,
long_description_content_type="text/markdown",
license="BSD-3",
# Package info
packages=find_packages(exclude=("test",)),
classifiers=[
"Intended Audience :: Developers",
"Intended Audience :: Education",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: BSD License",
"Programming Language :: C++",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Topic :: Scientific/Engineering",
"Topic :: Scientific/Engineering :: Mathematics",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
"Topic :: Software Development",
"Topic :: Software Development :: Libraries",
"Topic :: Software Development :: Libraries :: Python Modules",
],
python_requires=">=3.6",
install_requires=requirements,
ext_modules=get_extensions(),
test_suite="test",
cmdclass={
"build_ext": BuildExtension,
"clean": clean,
},
)
|
# -*- coding: utf-8 -*-
"""Helper script to package wheels and relocate binaries."""
import glob
import hashlib
import io
# Standard library imports
import os
import os.path as osp
import platform
import shutil
import subprocess
import sys
import zipfile
from base64 import urlsafe_b64encode
# Third party imports
if sys.platform == "linux":
from auditwheel.lddtree import lddtree
from wheel.bdist_wheel import get_abi_tag
ALLOWLIST = {
"libgcc_s.so.1",
"libstdc++.so.6",
"libm.so.6",
"libdl.so.2",
"librt.so.1",
"libc.so.6",
"libnsl.so.1",
"libutil.so.1",
"libpthread.so.0",
"libresolv.so.2",
"libX11.so.6",
"libXext.so.6",
"libXrender.so.1",
"libICE.so.6",
"libSM.so.6",
"libGL.so.1",
"libgobject-2.0.so.0",
"libgthread-2.0.so.0",
"libglib-2.0.so.0",
"ld-linux-x86-64.so.2",
"ld-2.17.so",
}
WINDOWS_ALLOWLIST = {
"MSVCP140.dll",
"KERNEL32.dll",
"VCRUNTIME140_1.dll",
"VCRUNTIME140.dll",
"api-ms-win-crt-heap-l1-1-0.dll",
"api-ms-win-crt-runtime-l1-1-0.dll",
"api-ms-win-crt-stdio-l1-1-0.dll",
"api-ms-win-crt-filesystem-l1-1-0.dll",
"api-ms-win-crt-string-l1-1-0.dll",
"api-ms-win-crt-environment-l1-1-0.dll",
"api-ms-win-crt-math-l1-1-0.dll",
"api-ms-win-crt-convert-l1-1-0.dll",
}
HERE = osp.dirname(osp.abspath(__file__))
PACKAGE_ROOT = osp.dirname(osp.dirname(HERE))
PLATFORM_ARCH = platform.machine()
PYTHON_VERSION = sys.version_info
def read_chunks(file, size=io.DEFAULT_BUFFER_SIZE):
"""Yield pieces of data from a file-like object until EOF."""
while True:
chunk = file.read(size)
if not chunk:
break
yield chunk
def rehash(path, blocksize=1 << 20):
"""Return (hash, length) for path using hashlib.sha256()"""
h = hashlib.sha256()
length = 0
with open(path, "rb") as f:
for block in read_chunks(f, size=blocksize):
length += len(block)
h.update(block)
digest = "sha256=" + urlsafe_b64encode(h.digest()).decode("latin1").rstrip("=")
# unicode/str python2 issues
return (digest, str(length)) # type: ignore
def unzip_file(file, dest):
"""Decompress zip `file` into directory `dest`."""
with zipfile.ZipFile(file, "r") as zip_ref:
zip_ref.extractall(dest)
def is_program_installed(basename):
"""
Return program absolute path if installed in PATH.
Otherwise, return None
On macOS systems, a .app is considered installed if
it exists.
"""
if sys.platform == "darwin" and basename.endswith(".app") and osp.exists(basename):
return basename
for path in os.environ["PATH"].split(os.pathsep):
abspath = osp.join(path, basename)
if osp.isfile(abspath):
return abspath
def find_program(basename):
"""
Find program in PATH and return absolute path
Try adding .exe or .bat to basename on Windows platforms
(return None if not found)
"""
names = [basename]
if os.name == "nt":
# Windows platforms
extensions = (".exe", ".bat", ".cmd", ".dll")
if not basename.endswith(extensions):
names = [basename + ext for ext in extensions] + [basename]
for name in names:
path = is_program_installed(name)
if path:
return path
def patch_new_path(library_path, new_dir):
library = osp.basename(library_path)
name, *rest = library.split(".")
rest = ".".join(rest)
hash_id = hashlib.sha256(library_path.encode("utf-8")).hexdigest()[:8]
new_name = ".".join([name, hash_id, rest])
return osp.join(new_dir, new_name)
def find_dll_dependencies(dumpbin, binary):
out = subprocess.run([dumpbin, "/dependents", binary], stdout=subprocess.PIPE)
out = out.stdout.strip().decode("utf-8")
start_index = out.find("dependencies:") + len("dependencies:")
end_index = out.find("Summary")
dlls = out[start_index:end_index].strip()
dlls = dlls.split(os.linesep)
dlls = [dll.strip() for dll in dlls]
return dlls
def relocate_elf_library(patchelf, output_dir, output_library, binary):
"""
Relocate an ELF shared library to be packaged on a wheel.
Given a shared library, find the transitive closure of its dependencies,
rename and copy them into the wheel while updating their respective rpaths.
"""
print("Relocating {0}".format(binary))
binary_path = osp.join(output_library, binary)
ld_tree = lddtree(binary_path)
tree_libs = ld_tree["libs"]
binary_queue = [(n, binary) for n in ld_tree["needed"]]
binary_paths = {binary: binary_path}
binary_dependencies = {}
while binary_queue != []:
library, parent = binary_queue.pop(0)
library_info = tree_libs[library]
print(library)
if library_info["path"] is None:
print("Omitting {0}".format(library))
continue
if library in ALLOWLIST:
# Omit glibc/gcc/system libraries
print("Omitting {0}".format(library))
continue
parent_dependencies = binary_dependencies.get(parent, [])
parent_dependencies.append(library)
binary_dependencies[parent] = parent_dependencies
if library in binary_paths:
continue
binary_paths[library] = library_info["path"]
binary_queue += [(n, library) for n in library_info["needed"]]
print("Copying dependencies to wheel directory")
new_libraries_path = osp.join(output_dir, "torchcsprng.libs")
os.makedirs(new_libraries_path)
new_names = {binary: binary_path}
for library in binary_paths:
if library != binary:
library_path = binary_paths[library]
new_library_path = patch_new_path(library_path, new_libraries_path)
print("{0} -> {1}".format(library, new_library_path))
shutil.copyfile(library_path, new_library_path)
new_names[library] = new_library_path
print("Updating dependency names by new files")
for library in binary_paths:
if library != binary:
if library not in binary_dependencies:
continue
library_dependencies = binary_dependencies[library]
new_library_name = new_names[library]
for dep in library_dependencies:
new_dep = osp.basename(new_names[dep])
print("{0}: {1} -> {2}".format(library, dep, new_dep))
subprocess.check_output(
[patchelf, "--replace-needed", dep, new_dep, new_library_name],
cwd=new_libraries_path,
)
print("Updating library rpath")
subprocess.check_output(
[patchelf, "--set-rpath", "$ORIGIN", new_library_name],
cwd=new_libraries_path,
)
subprocess.check_output(
[patchelf, "--print-rpath", new_library_name], cwd=new_libraries_path
)
print("Update library dependencies")
library_dependencies = binary_dependencies[binary]
for dep in library_dependencies:
new_dep = osp.basename(new_names[dep])
print("{0}: {1} -> {2}".format(binary, dep, new_dep))
subprocess.check_output(
[patchelf, "--replace-needed", dep, new_dep, binary], cwd=output_library
)
print("Update library rpath")
subprocess.check_output(
[patchelf, "--set-rpath", "$ORIGIN:$ORIGIN/../torchcsprng.libs", binary_path],
cwd=output_library,
)
def relocate_dll_library(dumpbin, output_dir, output_library, binary):
"""
Relocate a DLL/PE shared library to be packaged on a wheel.
Given a shared library, find the transitive closure of its dependencies,
rename and copy them into the wheel.
"""
print("Relocating {0}".format(binary))
binary_path = osp.join(output_library, binary)
library_dlls = find_dll_dependencies(dumpbin, binary_path)
binary_queue = [(dll, binary) for dll in library_dlls]
binary_paths = {binary: binary_path}
binary_dependencies = {}
while binary_queue != []:
library, parent = binary_queue.pop(0)
if library in WINDOWS_ALLOWLIST or library.startswith("api-ms-win"):
print("Omitting {0}".format(library))
continue
library_path = find_program(library)
if library_path is None:
print("{0} not found".format(library))
continue
if osp.basename(osp.dirname(library_path)) == "system32":
continue
print("{0}: {1}".format(library, library_path))
parent_dependencies = binary_dependencies.get(parent, [])
parent_dependencies.append(library)
binary_dependencies[parent] = parent_dependencies
if library in binary_paths:
continue
binary_paths[library] = library_path
downstream_dlls = find_dll_dependencies(dumpbin, library_path)
binary_queue += [(n, library) for n in downstream_dlls]
print("Copying dependencies to wheel directory")
package_dir = osp.join(output_dir, "torchcsprng")
for library in binary_paths:
if library != binary:
library_path = binary_paths[library]
new_library_path = osp.join(package_dir, library)
print("{0} -> {1}".format(library, new_library_path))
shutil.copyfile(library_path, new_library_path)
def compress_wheel(output_dir, wheel, wheel_dir, wheel_name):
"""Create RECORD file and compress wheel distribution."""
print("Update RECORD file in wheel")
dist_info = glob.glob(osp.join(output_dir, "*.dist-info"))[0]
record_file = osp.join(dist_info, "RECORD")
with open(record_file, "w") as f:
for root, _, files in os.walk(output_dir):
for this_file in files:
full_file = osp.join(root, this_file)
rel_file = osp.relpath(full_file, output_dir)
if full_file == record_file:
f.write("{0},,\n".format(rel_file))
else:
digest, size = rehash(full_file)
f.write("{0},{1},{2}\n".format(rel_file, digest, size))
print("Compressing wheel")
base_wheel_name = osp.join(wheel_dir, wheel_name)
shutil.make_archive(base_wheel_name, "zip", output_dir)
os.remove(wheel)
shutil.move("{0}.zip".format(base_wheel_name), wheel)
shutil.rmtree(output_dir)
def patch_linux():
# Get patchelf location
patchelf = find_program("patchelf")
if patchelf is None:
raise FileNotFoundError(
"Patchelf was not found in the system, please"
" make sure that is available on the PATH."
)
# Find wheel
print("Finding wheels...")
wheels = glob.glob(osp.join(PACKAGE_ROOT, "dist", "*.whl"))
output_dir = osp.join(PACKAGE_ROOT, "dist", ".wheel-process")
image_binary = "image.so"
video_binary = "video_reader.so"
torchcsprng_binaries = [image_binary, video_binary]
for wheel in wheels:
if osp.exists(output_dir):
shutil.rmtree(output_dir)
os.makedirs(output_dir)
print("Unzipping wheel...")
wheel_file = osp.basename(wheel)
wheel_dir = osp.dirname(wheel)
print("{0}".format(wheel_file))
wheel_name, _ = osp.splitext(wheel_file)
unzip_file(wheel, output_dir)
print("Finding ELF dependencies...")
output_library = osp.join(output_dir, "torchcsprng")
for binary in torchcsprng_binaries:
if osp.exists(osp.join(output_library, binary)):
relocate_elf_library(patchelf, output_dir, output_library, binary)
compress_wheel(output_dir, wheel, wheel_dir, wheel_name)
def patch_win():
# Get dumpbin location
dumpbin = find_program("dumpbin")
if dumpbin is None:
raise FileNotFoundError(
"Dumpbin was not found in the system, please"
" make sure that is available on the PATH."
)
# Find wheel
print("Finding wheels...")
wheels = glob.glob(osp.join(PACKAGE_ROOT, "dist", "*.whl"))
output_dir = osp.join(PACKAGE_ROOT, "dist", ".wheel-process")
image_binary = "image.pyd"
video_binary = "video_reader.pyd"
torchcsprng_binaries = [image_binary, video_binary]
for wheel in wheels:
if osp.exists(output_dir):
shutil.rmtree(output_dir)
os.makedirs(output_dir)
print("Unzipping wheel...")
wheel_file = osp.basename(wheel)
wheel_dir = osp.dirname(wheel)
print("{0}".format(wheel_file))
wheel_name, _ = osp.splitext(wheel_file)
unzip_file(wheel, output_dir)
print("Finding DLL/PE dependencies...")
output_library = osp.join(output_dir, "torchcsprng")
for binary in torchcsprng_binaries:
if osp.exists(osp.join(output_library, binary)):
relocate_dll_library(dumpbin, output_dir, output_library, binary)
compress_wheel(output_dir, wheel, wheel_dir, wheel_name)
if __name__ == "__main__":
if sys.platform == "linux":
patch_linux()
elif sys.platform == "win32":
patch_win()
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import math
import os
import random
import time
import unittest
import numpy as np
import torch
from Crypto.Cipher import AES
from Crypto.Util import Counter
from scipy import stats
try:
import torchcsprng as csprng
except ImportError:
raise RuntimeError("CSPRNG not available")
IS_SANDCASTLE = (
os.getenv("SANDCASTLE") == "1" or os.getenv("TW_JOB_USER") == "sandcastle"
)
IS_FBCODE = os.getenv("PYTORCH_TEST_FBCODE") == "1"
def to_numpy(t, dtype=torch.float):
if t.dtype == torch.bfloat16:
t = t.to(dtype)
return t.numpy()
def to_bytes(t):
if t.dtype == torch.bfloat16:
t = t.view(torch.int16)
return t.cpu().numpy().view(np.int8)
class TestCSPRNG(unittest.TestCase):
all_generators = [
csprng.create_random_device_generator(),
csprng.create_random_device_generator("/dev/urandom"),
csprng.create_mt19937_generator(),
csprng.create_mt19937_generator(42),
]
int_dtypes = [torch.uint8, torch.int8, torch.int16, torch.int32, torch.int64]
standard_fp_dtypes = [torch.float, torch.double]
non_standard_fp_dtypes = [torch.half, torch.bfloat16]
fp_dtypes = standard_fp_dtypes + non_standard_fp_dtypes
num_dtypes = int_dtypes + fp_dtypes
all_dtypes = num_dtypes + [torch.bool]
size = 1000
all_devices = (
["cpu", "cuda"]
if (torch.cuda.is_available() and csprng.supports_cuda())
else ["cpu"]
)
def test_random_kstest(self):
for device in self.all_devices:
for gen in self.all_generators:
for dtype in self.num_dtypes:
if dtype == torch.float:
to_inc = 2**24
elif dtype == torch.double:
to_inc = 2**53
elif dtype == torch.half:
to_inc = 2**11
elif dtype == torch.bfloat16:
to_inc = 2**8
else:
to_inc = torch.iinfo(dtype).max
t = torch.empty(self.size, dtype=dtype, device=device).random_(
generator=gen
)
res = stats.kstest(
to_numpy(t.cpu()), stats.randint.cdf, args=(0, to_inc)
)
self.assertTrue(res.statistic < 0.1)
no_cuda = not torch.cuda.is_available() or not csprng.supports_cuda()
no_cuda_message = (
"CUDA is not available or csprng was not compiled with CUDA support"
)
@unittest.skipIf(no_cuda, no_cuda_message)
def test_random_cpu_vs_cuda(self):
for dtype in self.num_dtypes:
gen = csprng.create_mt19937_generator(42)
cpu_t = torch.empty(self.size, dtype=dtype, device="cpu").random_(
generator=gen
)
gen = csprng.create_mt19937_generator(42)
cuda_t = torch.empty(self.size, dtype=dtype, device="cuda").random_(
generator=gen
)
self.assertTrue((cpu_t == cuda_t.cpu()).all())
def test_random_to_kstest(self):
to_ = 42
for device in self.all_devices:
for gen in self.all_generators:
for dtype in self.num_dtypes:
t = torch.zeros(self.size, dtype=dtype, device=device).random_(
to_, generator=gen
)
res = stats.kstest(
to_numpy(t.cpu()), stats.randint.cdf, args=(0, to_)
)
self.assertTrue(res.statistic < 0.1)
@unittest.skipIf(no_cuda, no_cuda_message)
def test_random_to_cpu_vs_cuda(self):
to_ = 42
for dtype in self.num_dtypes:
gen = csprng.create_mt19937_generator(42)
cpu_t = torch.zeros(self.size, dtype=dtype, device="cpu").random_(
to_, generator=gen
)
gen = csprng.create_mt19937_generator(42)
cuda_t = torch.zeros(self.size, dtype=dtype, device="cuda").random_(
to_, generator=gen
)
self.assertTrue((cpu_t == cuda_t.cpu()).all())
def test_random_from_to_kstest(self):
for device in self.all_devices:
for gen in self.all_generators:
for dtype in self.num_dtypes:
for from_ in [0, 24, 42]:
for to_ in [42, 99, 123]:
if from_ < to_:
t = torch.zeros(
self.size, dtype=dtype, device=device
).random_(from_, to_, generator=gen)
res = stats.kstest(
to_numpy(t.cpu()),
stats.randint.cdf,
args=(from_, to_),
)
self.assertTrue(res.statistic < 0.2)
@unittest.skipIf(no_cuda, no_cuda_message)
def test_random_from_to_cpu_vs_cuda(self):
for dtype in self.num_dtypes:
for from_ in [0, 24, 42]:
for to_ in [42, 99, 123]:
if from_ < to_:
gen = csprng.create_mt19937_generator(42)
cpu_t = torch.zeros(
self.size, dtype=dtype, device="cpu"
).random_(from_, to_, generator=gen)
gen = csprng.create_mt19937_generator(42)
cuda_t = torch.zeros(
self.size, dtype=dtype, device="cuda"
).random_(from_, to_, generator=gen)
self.assertTrue((cpu_t == cuda_t.cpu()).all())
def test_random_bool(self):
for device in self.all_devices:
for gen in self.all_generators:
t = torch.empty(self.size, dtype=torch.bool, device=device)
t.fill_(False)
t.random_(generator=gen)
self.assertEqual(t.min(), False)
self.assertEqual(t.max(), True)
self.assertTrue(
0.4 < (t.eq(True)).to(torch.int).sum().item() / self.size < 0.6
)
t.fill_(True)
t.random_(generator=gen)
self.assertEqual(t.min(), False)
self.assertEqual(t.max(), True)
self.assertTrue(
0.4 < (t.eq(True)).to(torch.int).sum().item() / self.size < 0.6
)
@unittest.skipIf(no_cuda, no_cuda_message)
def test_random_bool_cpu_vs_cuda(self):
gen = csprng.create_mt19937_generator(42)
cpu_t = torch.empty(self.size, dtype=torch.bool, device="cpu").random_(
generator=gen
)
gen = csprng.create_mt19937_generator(42)
cuda_t = torch.empty(self.size, dtype=torch.bool, device="cuda").random_(
generator=gen
)
self.assertTrue((cpu_t == cuda_t.cpu()).all())
def test_uniform_kstest(self):
for device in self.all_devices:
for gen in self.all_generators:
for dtype in self.fp_dtypes:
for from_ in [-42, 0, 4.2]:
for to_ in [-4.2, 0, 42]:
if to_ > from_:
t = torch.empty(
self.size, dtype=dtype, device=device
).uniform_(from_, to_, generator=gen)
res = stats.kstest(
to_numpy(t.cpu(), torch.double),
"uniform",
args=(from_, (to_ - from_)),
)
self.assertTrue(res.statistic < 0.1)
@unittest.skipIf(no_cuda, no_cuda_message)
def test_uniform_cpu_vs_cuda(self):
for dtype in self.fp_dtypes:
for from_ in [-42, 0, 4.2]:
for to_ in [-4.2, 0, 42]:
if to_ > from_:
gen = csprng.create_mt19937_generator(42)
cpu_t = torch.empty(
self.size, dtype=dtype, device="cpu"
).uniform_(from_, to_, generator=gen)
gen = csprng.create_mt19937_generator(42)
cuda_t = torch.empty(
self.size, dtype=dtype, device="cuda"
).uniform_(from_, to_, generator=gen)
self.assertTrue(torch.allclose(cpu_t, cuda_t.cpu(), 1e-9))
def test_normal_kstest(self):
for device in self.all_devices:
for gen in self.all_generators:
for dtype in self.fp_dtypes:
for mean in [-3, 0, 7]:
for std in [1, 5, 7]:
t = torch.empty(
self.size, dtype=dtype, device=device
).normal_(mean=mean, std=std, generator=gen)
res = stats.kstest(
to_numpy(t.cpu(), torch.double),
"norm",
args=(mean, std),
)
self.assertTrue(res.statistic < 0.1)
@unittest.skipIf(no_cuda, no_cuda_message)
def test_normal_cpu_vs_cuda(self):
for dtype in self.fp_dtypes:
for mean in [-3, 0, 7]:
for std in [1, 5, 7]:
gen = csprng.create_mt19937_generator(42)
cpu_t = torch.empty(self.size, dtype=dtype, device="cpu").normal_(
mean=mean, std=std, generator=gen
)
gen = csprng.create_mt19937_generator(42)
cuda_t = torch.empty(self.size, dtype=dtype, device="cuda").normal_(
mean=mean, std=std, generator=gen
)
self.assertTrue(torch.allclose(cpu_t, cuda_t.cpu(), 1e-9))
def test_log_normal_kstest(self):
for device in self.all_devices:
for gen in self.all_generators:
for dtype in self.fp_dtypes:
for mean in [-3, 0, 7]:
for std in [1, 5, 7]:
t = torch.empty(
self.size, dtype=dtype, device=device
).log_normal_(mean=mean, std=std, generator=gen)
res = stats.kstest(
to_numpy(t.cpu(), torch.double),
"lognorm",
args=(std, 0, math.exp(mean)),
)
if dtype in [torch.half, torch.bfloat16]:
self.assertTrue(res.statistic < 0.4)
else:
self.assertTrue(res.statistic < 0.1)
@unittest.skipIf(no_cuda, no_cuda_message)
def test_log_normal_cpu_vs_cuda(self):
for dtype in self.fp_dtypes:
for mean in [-3, 0, 7]:
for std in [1, 5, 7]:
gen = csprng.create_mt19937_generator(42)
cpu_t = torch.empty(
self.size, dtype=dtype, device="cpu"
).log_normal_(mean=mean, std=std, generator=gen)
gen = csprng.create_mt19937_generator(42)
cuda_t = torch.empty(
self.size, dtype=dtype, device="cuda"
).log_normal_(mean=mean, std=std, generator=gen)
self.assertTrue(
torch.allclose(cpu_t, cuda_t.cpu(), 1e-4, equal_nan=True)
)
def test_exponential_kstest(self):
for device in self.all_devices:
for gen in self.all_generators:
for dtype in self.fp_dtypes:
for lambd in [0.5, 1.0, 5.0]:
t = torch.empty(
self.size, dtype=dtype, device=device
).exponential_(lambd=lambd, generator=gen)
res = stats.kstest(
to_numpy(t.cpu(), torch.double),
"expon",
args=(
0,
1 / lambd,
),
)
self.assertTrue(res.statistic < 0.1)
@unittest.skipIf(no_cuda, no_cuda_message)
@unittest.skip("https://github.com/pytorch/pytorch/issues/38662")
def test_exponential_cpu_vs_cuda(self):
for dtype in self.fp_dtypes:
for lambd in [0.5, 1.0, 5.0]:
gen = csprng.create_mt19937_generator(42)
cpu_t = torch.empty(self.size, dtype=dtype, device="cpu").exponential_(
lambd=lambd, generator=gen
)
gen = csprng.create_mt19937_generator(42)
cuda_t = torch.empty(
self.size, dtype=dtype, device="cuda"
).exponential_(lambd=lambd, generator=gen)
self.assertTrue(torch.allclose(cpu_t, cuda_t.cpu(), 1e-9))
def test_cauchy_kstest(self):
for device in self.all_devices:
for gen in self.all_generators:
for dtype in self.fp_dtypes:
for median in [-10, 0, 50]:
for sigma in [0.5, 1.0, 10.0]:
t = torch.empty(
self.size, dtype=dtype, device=device
).cauchy_(median=median, sigma=sigma, generator=gen)
res = stats.kstest(
to_numpy(t.cpu(), torch.double),
"cauchy",
args=(median, sigma),
)
if dtype in [torch.half, torch.bfloat16]:
self.assertTrue(res.statistic < 0.4)
else:
self.assertTrue(res.statistic < 0.1)
@unittest.skipIf(no_cuda, no_cuda_message)
def test_cauchy_cpu_vs_cuda(self):
for dtype in self.fp_dtypes:
for median in [-10, 0, 50]:
for sigma in [0.5, 1.0, 10.0]:
gen = csprng.create_mt19937_generator(42)
cpu_t = torch.empty(self.size, dtype=dtype, device="cpu").cauchy_(
median=median, sigma=sigma, generator=gen
)
gen = csprng.create_mt19937_generator(42)
cuda_t = torch.empty(self.size, dtype=dtype, device="cuda").cauchy_(
median=median, sigma=sigma, generator=gen
)
self.assertTrue(torch.allclose(cpu_t, cuda_t.cpu(), 1e-9))
def test_geometric(self):
for device in self.all_devices:
for gen in self.all_generators:
for dtype in self.fp_dtypes:
for p in [0.2, 0.5, 0.8]:
t = torch.empty(
self.size, dtype=dtype, device=device
).geometric_(p=p, generator=gen)
# actual = np.histogram(t.cpu().to(torch.double), np.arange(1, 100))[0]
# expected = stats.geom(p).pmf(np.arange(1, 99)) * self.size
# res = stats.chisquare(actual, expected)
# self.assertAlmostEqual(res.pvalue, 1.0, delta=0.5) TODO https://github.com/pytorch/csprng/issues/7
@unittest.skipIf(no_cuda, no_cuda_message)
def test_geometric_cpu_vs_cuda(self):
for dtype in self.fp_dtypes:
for p in [0.2, 0.5, 0.8]:
gen = csprng.create_mt19937_generator(42)
cpu_t = torch.empty(self.size, dtype=dtype, device="cpu").geometric_(
p=p, generator=gen
)
gen = csprng.create_mt19937_generator(42)
cuda_t = torch.empty(self.size, dtype=dtype, device="cuda").geometric_(
p=p, generator=gen
)
self.assertTrue(
torch.allclose(cpu_t, cuda_t.cpu(), 1e-9, equal_nan=True)
)
def test_non_contiguous_vs_contiguous(self):
size = 10
for device in self.all_devices:
for dtype in self.all_dtypes:
for i in range(10):
t = torch.zeros([size, size, size], dtype=dtype, device=device)
x1 = random.randrange(0, size)
y1 = random.randrange(0, size)
z1 = random.randrange(0, size)
x2 = random.randrange(x1 + 1, max(x1 + 2, size))
y2 = random.randrange(y1 + 1, max(y1 + 2, size))
z2 = random.randrange(z1 + 1, max(z1 + 2, size))
maybe_non_contiguous = t[x1:x2, y1:y2, z1:z2]
assert maybe_non_contiguous.numel() > 0
if not maybe_non_contiguous.is_contiguous():
seed = random.randrange(1000)
non_contiguous = maybe_non_contiguous
gen = csprng.create_mt19937_generator(seed)
non_contiguous.random_(generator=gen)
contiguous = torch.zeros_like(non_contiguous)
gen = csprng.create_mt19937_generator(seed)
contiguous.random_(generator=gen)
assert contiguous.is_contiguous()
self.assertTrue((non_contiguous == contiguous).all())
for x in range(0, size):
for y in range(0, size):
for z in range(0, size):
if (
not x1 <= x < x2
and not y1 <= y < y2
and not z1 <= z < z2
):
self.assertTrue(t[x, y, z] == 0)
@unittest.skipIf(IS_SANDCASTLE or IS_FBCODE, "Does not work on Sandcastle")
@unittest.skipIf(torch.get_num_threads() < 2, "requires multithreading CPU")
def test_cpu_parallel(self):
urandom_gen = csprng.create_random_device_generator("/dev/urandom")
def measure(size):
t = torch.empty(size, dtype=torch.float32, device="cpu")
start = time.time()
for i in range(20):
t.normal_(generator=urandom_gen)
finish = time.time()
return finish - start
time_for_1K = measure(1000)
time_for_1M = measure(1000000)
# Pessimistic check that parallel execution gives >= 1.5 performance boost
self.assertTrue(time_for_1M / time_for_1K < 1000 / 1.5)
@unittest.skipIf(IS_SANDCASTLE or IS_FBCODE, "Does not work on Sandcastle")
def test_version(self):
self.assertTrue(csprng.__version__)
self.assertTrue(csprng.git_version)
def test_randperm(self):
for device in self.all_devices:
for gen in self.all_generators:
for dtype in self.int_dtypes:
for size in range(0, 20):
expected = torch.arange(size, dtype=dtype, device=device)
actual = torch.randperm(
size, dtype=dtype, device=device, generator=gen
)
actual_out = torch.empty(1, dtype=dtype, device=device)
torch.randperm(size, out=actual_out, generator=gen)
if size >= 10:
self.assertTrue(not torch.allclose(expected, actual))
self.assertTrue(not torch.allclose(expected, actual_out))
actual = actual.sort()[0]
actual_out = actual.sort()[0]
self.assertTrue(torch.allclose(expected, actual))
self.assertTrue(torch.allclose(expected, actual_out))
def test_encrypt_decrypt(self):
key_size_bytes = 16
block_size_bytes = 16
def sizeof(dtype):
if dtype == torch.bool:
return 1
elif dtype.is_floating_point:
return torch.finfo(dtype).bits // 8
else:
return torch.iinfo(dtype).bits // 8
def pad(data, pad_size):
if len(data) % pad_size == 0:
return data
length = pad_size - (len(data) % pad_size)
return data + bytes([0]) * length
def create_aes(m, k):
if m == "ecb":
return AES.new(k.tobytes(), AES.MODE_ECB)
elif m == "ctr":
ctr = Counter.new(
AES.block_size * 8, initial_value=0, little_endian=True
)
return AES.new(k.tobytes(), AES.MODE_CTR, counter=ctr)
else:
return None
for key_dtype in self.all_dtypes:
key_size = key_size_bytes // sizeof(key_dtype)
key = torch.empty(key_size, dtype=key_dtype).random_()
key_np = to_bytes(key)
for initial_dtype in self.all_dtypes:
for initial_size in [0, 4, 8, 15, 16, 23, 42]:
initial = torch.empty(initial_size, dtype=initial_dtype).random_()
initial_np = to_bytes(initial)
initial_size_bytes = initial_size * sizeof(initial_dtype)
for encrypted_dtype in self.all_dtypes:
encrypted_size = (
(initial_size_bytes + block_size_bytes - 1)
// block_size_bytes
* block_size_bytes
// sizeof(encrypted_dtype)
)
encrypted = torch.zeros(encrypted_size, dtype=encrypted_dtype)
for decrypted_dtype in self.all_dtypes:
decrypted_size = (
initial_size_bytes + sizeof(decrypted_dtype) - 1
) // sizeof(decrypted_dtype)
decrypted = torch.zeros(
decrypted_size, dtype=decrypted_dtype
)
for mode in ["ecb", "ctr"]:
for device in self.all_devices:
key = key.to(device)
initial = initial.to(device)
encrypted = encrypted.to(device)
decrypted = decrypted.to(device)
csprng.encrypt(
initial, encrypted, key, "aes128", mode
)
encrypted_np = to_bytes(encrypted)
aes = create_aes(mode, key_np)
encrypted_expected = np.frombuffer(
aes.encrypt(
pad(initial_np.tobytes(), block_size_bytes)
),
dtype=np.int8,
)
self.assertTrue(
np.array_equal(encrypted_np, encrypted_expected)
)
csprng.decrypt(
encrypted, decrypted, key, "aes128", mode
)
decrypted_np = to_bytes(decrypted)[
:initial_size_bytes
]
aes = create_aes(mode, key_np)
decrypted_expected = np.frombuffer(
aes.decrypt(
pad(
encrypted_np.tobytes(), block_size_bytes
)
),
dtype=np.int8,
)[:initial_size_bytes]
self.assertTrue(
np.array_equal(decrypted_np, decrypted_expected)
)
self.assertTrue(
np.array_equal(initial_np, decrypted_np)
)
def test_encrypt_decrypt_inplace(self):
key_size_bytes = 16
def sizeof(dtype):
if dtype == torch.bool:
return 1
elif dtype.is_floating_point:
return torch.finfo(dtype).bits // 8
else:
return torch.iinfo(dtype).bits // 8
def create_aes(m, k):
if m == "ecb":
return AES.new(k.tobytes(), AES.MODE_ECB)
elif m == "ctr":
ctr = Counter.new(
AES.block_size * 8, initial_value=0, little_endian=True
)
return AES.new(k.tobytes(), AES.MODE_CTR, counter=ctr)
else:
return None
for key_dtype in self.all_dtypes:
key_size = key_size_bytes // sizeof(key_dtype)
key = torch.empty(key_size, dtype=key_dtype).random_()
key_np = to_bytes(key)
for initial_dtype in self.all_dtypes:
for initial_size_bytes in [0, 16, 256]:
initial_size = initial_size_bytes // sizeof(initial_dtype)
initial = torch.empty(initial_size, dtype=initial_dtype).random_()
initial_np = to_bytes(initial)
initial_np_copy = np.copy(initial_np)
for mode in ["ecb", "ctr"]:
for device in self.all_devices:
key = key.to(device)
initial = initial.to(device)
csprng.encrypt(initial, initial, key, "aes128", mode)
encrypted_np = to_bytes(initial)
aes = create_aes(mode, key_np)
encrypted_expected = np.frombuffer(
aes.encrypt(initial_np_copy.tobytes()), dtype=np.int8
)
self.assertTrue(
np.array_equal(encrypted_np, encrypted_expected)
)
encrypted_np_copy = np.copy(encrypted_np)
csprng.decrypt(initial, initial, key, "aes128", mode)
decrypted_np = to_bytes(initial)
aes = create_aes(mode, key_np)
decrypted_expected = np.frombuffer(
aes.decrypt(encrypted_np_copy.tobytes()), dtype=np.int8
)
self.assertTrue(
np.array_equal(decrypted_np, decrypted_expected)
)
self.assertTrue(
np.array_equal(initial_np_copy, decrypted_np)
)
if __name__ == "__main__":
unittest.main()
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import torch
from torchcsprng._C import *
try:
from .version import __version__, git_version # noqa: F401
except ImportError:
pass
|
import accimage
import numpy as np
import imageio
import os
ACCIMAGE_SAVE = os.environ.get('ACCIMAGE_SAVE', '')
if len(ACCIMAGE_SAVE) and ACCIMAGE_SAVE.lower() not in {'0', 'false', 'no'}:
SAVE_IMAGES = True
else:
SAVE_IMAGES = False
def image_to_np(image):
"""
Returns:
np.ndarray: Image converted to array with shape (width, height, channels)
"""
image_np = np.empty([image.channels, image.height, image.width], dtype=np.uint8)
image.copyto(image_np)
image_np = np.transpose(image_np, (1, 2, 0))
return image_np
def save_image(path, image):
imageio.imwrite(path, image_to_np(image))
def test_reading_image():
image = accimage.Image("chicago.jpg")
if SAVE_IMAGES:
save_image('test_reading_image.jpg', image)
assert image.width == 1920
assert image.height == 931
def test_reading_image_from_memory():
from_file = accimage.Image("chicago.jpg")
bytes = open("chicago.jpg", "rb").read()
from_bytes = accimage.Image(bytes)
if SAVE_IMAGES:
save_image('test_reading_image_from_memory.jpg', from_bytes)
assert from_bytes.width == 1920
assert from_bytes.height == 931
np.testing.assert_array_equal(image_to_np(from_file), image_to_np(from_bytes))
def test_resizing():
image = accimage.Image("chicago.jpg")
image.resize(size=(200, 200))
if SAVE_IMAGES:
save_image('test_resizing.jpg', image)
assert image.width == 200
assert image.height == 200
def test_cropping():
image = accimage.Image("chicago.jpg")
image.crop(box=(50, 50, 150, 150))
if SAVE_IMAGES:
save_image('test_cropping.jpg', image)
assert image.width == 100
assert image.height == 100
def test_flipping():
image = accimage.Image("chicago.jpg")
original_image_np = image_to_np(image)
FLIP_LEFT_RIGHT = 0
image.transpose(FLIP_LEFT_RIGHT)
if SAVE_IMAGES:
save_image('test_flipping.jpg', image)
new_image_np = image_to_np(image)
assert image.width == 1920
assert image.height == 931
np.testing.assert_array_equal(new_image_np[:, ::-1, :], original_image_np)
|
from distutils.core import setup, Extension
accimage = Extension(
"accimage",
include_dirs=["/usr/local/opt/jpeg-turbo/include", "/opt/intel/ipp/include"],
libraries=["jpeg", "ippi", "ipps"],
library_dirs=["/usr/local/opt/jpeg-turbo/lib", "/opt/intel/ipp/lib"],
sources=["accimagemodule.c", "jpegloader.c", "imageops.c"],
)
setup(
name="accimage",
version="0.2.0",
description="Accelerated image loader and preprocessor for Torch",
author="Marat Dukhan",
author_email="[email protected]",
ext_modules=[accimage],
)
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Obtains credentials and passes them as CLI args to stack invocation
"""
import os
import argparse
import json
import subprocess
import sys
import tools.deployment.args_assembly as args_assembly
THIS_DIRECTORY = os.path.abspath(os.path.dirname(__file__))
DEFAULT_CREDENTIALS_DIRECTORY = os.path.join(THIS_DIRECTORY, "../circleci-failure-tracker-credentials")
def parse_args():
parser = argparse.ArgumentParser(description='Run webapp locally')
parser.add_argument('--github-app-pem-filepath', dest='github_app_pem_filepath',
default=os.path.join(DEFAULT_CREDENTIALS_DIRECTORY, "circleci-failure-attribution.private-key.pem"),
help='File containing GitHub personal access token')
parser.add_argument('--circleci-api-token-file', dest='circleci_api_token_file',
default=os.path.join(DEFAULT_CREDENTIALS_DIRECTORY, "circleci-api-token.txt"),
help='File containing GitHub personal access token')
parser.add_argument('--aws-sqs-queue-url-file', dest='aws_sqs_queue_url_file',
default=os.path.join(DEFAULT_CREDENTIALS_DIRECTORY, "aws-sqs-queue-url.txt"),
help='File containing AWS SQS queue URL')
# Note: the "local" credentials use "github-client-id" and "github-client-secret" for
# the GitHub app named "circleci-failure-attribution-dev", while
# the "remote" credentials use a client id and secret for the GitHub app named "circleci-failure-attribution".
# The local credentials should be used along with ngrok
# (or something similar, like localtunnel: https://localtunnel.github.io/www/) for exposing the app
# on a local port.
parser.add_argument('--prod-app', dest='prod_app', action="store_true", help='For production deployment (default is local). Implies --remote-db')
parser.add_argument('--prod-db', dest='prod_db', action="store_true", help='Use production (remote) database (default is local)')
parser.add_argument('--credentials-json-basedir', dest='credentials_json_basedir',
default=DEFAULT_CREDENTIALS_DIRECTORY,
help='Path to JSON file containing various webapp credentials')
parser.add_argument('--dockerrun-json-output-path', dest='dockerrun_json',
default="Dockerrun.aws.json",
help='Path to write Dockerrun.aws.json file')
parser.add_argument('--no-force-ssl', dest='no_force_ssl', action="store_true", help='Do not force SSL redirection in args placed into Dockerrun.aws.json')
parser.add_argument('--port-override', dest='port_override', type=int, help='Override of local port')
parser.add_argument('--entrypoint', dest='entrypoint_override', help='Entrypoint binary name (excluding leading path) for Dockerrun.aws.json')
parser.add_argument('--notification-ingester', dest='notification_ingester', action="store_true", help='Build for the notification ingester application')
parser.add_argument('--gitdir', dest='repo_gitdir', help='PyTorch git directory')
parser.add_argument('--oneoff', dest='run_oneoff', action='store_true', help='Run oneoff test suite')
return parser.parse_args()
def gen_credentials_filename(is_db, is_remote, suffix=None):
credential_type = "database" if is_db else "app"
locality_suffix = "remote" if is_remote else "local"
arglist = [credential_type, "credentials", locality_suffix]
if suffix:
arglist.append(suffix)
return "-".join(arglist) + ".json"
if __name__ == "__main__":
options = parse_args()
if options.run_oneoff and options.repo_gitdir is None:
print("--gitdir must be defined to run oneoff unittests")
sys.exit(-1)
using_prod_db = options.prod_app or options.prod_db
app_credentials_json_path = os.path.join(options.credentials_json_basedir, gen_credentials_filename(False, options.prod_app))
db_credentials_json_path = os.path.join(options.credentials_json_basedir, gen_credentials_filename(True, using_prod_db))
db_mview_credentials_json_path = os.path.join(options.credentials_json_basedir, gen_credentials_filename(True, using_prod_db, "mview-refresher"))
with open(app_credentials_json_path) as fh_app, open(db_credentials_json_path) as fh_db, open(db_mview_credentials_json_path) as fh_mview_db:
github_app_pem_content = open(options.github_app_pem_filepath).read().strip()
circleci_api_token = open(options.circleci_api_token_file).read().strip()
aws_sqs_queue_url = open(options.aws_sqs_queue_url_file).read().strip()
nondefault_cli_arglist = args_assembly.generate_app_nondefault_cli_arglist(
json.load(fh_app),
json.load(fh_db),
json.load(fh_mview_db),
github_app_pem_content,
circleci_api_token,
aws_sqs_queue_url,
options.notification_ingester,
options.no_force_ssl,
options.port_override,
run_oneoff=options.run_oneoff)
if options.prod_app:
args_assembly.generate_dockerrun_aws_json(options.dockerrun_json, nondefault_cli_arglist, options.entrypoint_override)
else:
os.system('find -name "*.tix" -delete')
default_binary_name = args_assembly.ONEOFF_BINARY_NAME if options.run_oneoff else args_assembly.WEBAPP_BINARY_NAME
binary_name = options.entrypoint_override if options.entrypoint_override else default_binary_name
cli_args = [
"stack",
"run",
binary_name,
"--",
] + ([
"--local",
"--data-path",
"static",
] if binary_name != args_assembly.ONEOFF_BINARY_NAME else [
"--repo-git-dir",
options.repo_gitdir,
]) + nondefault_cli_arglist
command_string = " ".join(cli_args)
print("Executing command:", command_string)
subprocess.check_call(cli_args, cwd="app")
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import sys
import subprocess
import argparse
import requests
def get_linear_commits(repo_path):
"""
Returns the most recent sequence of commits
that have a linear ancestry, ordered from oldest to newest
"""
command_args = [
"git",
"rev-list",
"--parents",
"origin/master",
]
command_string = " ".join(command_args)
print("Command:", command_string)
output = subprocess.check_output(command_args, cwd=repo_path)
linear_commits = []
for line in output.decode('utf-8').splitlines():
stripped = line.strip()
splitted = stripped.split()
if len(splitted) > 2:
print("First merge commit: " + str(splitted))
break
else:
linear_commits.append(splitted[0])
return list(reversed(linear_commits))
def upload_commits(hostname, auth_token, commits):
url = hostname + '/api/populate-master-commits'
headers_dict = {
'content-type': 'application/json',
'token': auth_token,
}
r = requests.post(url, verify=False, json=commits, headers=headers_dict)
print(r.json())
print(r.status_code)
def parse_args():
parser = argparse.ArgumentParser(description='Fetch master commits')
parser.add_argument('--repo-path', dest='repo_path', required=True, help='PyTorch repo path')
parser.add_argument('--token', dest='token', required=True, help='GitHub auth token')
parser.add_argument('--hostname', dest='hostname', required=True, help='Server hostname')
return parser.parse_args()
if __name__ == "__main__":
options = parse_args()
linear_commits = get_linear_commits(options.repo_path)
print("Count:", len(linear_commits))
upload_commits(options.hostname, options.token, linear_commits)
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import sys
import subprocess
import argparse
import requests
import json
PARENT_DIRECTORY = os.path.abspath(os.path.dirname(__file__))
def get_first_merge_commit(repo_path):
"""
Returns the most recent sequence of commits
that have a linear ancestry, ordered from oldest to newest
"""
command_args = [
"git",
"rev-list",
"--parents",
"origin/master",
]
command_string = " ".join(command_args)
# print("Command:", command_string)
output = subprocess.check_output(command_args, cwd=repo_path)
for line in output.decode('utf-8').splitlines():
stripped = line.strip()
splitted = stripped.split()
if len(splitted) > 2:
return splitted[0]
def get_metadata_aspect(repo_path, commit_sha1, format_specifier):
my_command = "git log --format=" + format_specifier + " -n 1 " + commit_sha1
commit_message = subprocess.check_output(my_command, cwd=repo_path, shell=True)
return commit_message.strip()
KEYS_AND_FORMAT_SPECIFIERS = {
"message": "%B",
"sha1": "%H",
"subject": "%f",
"tree_sha1": "%T",
"author_name": "%an",
"author_email": "%aE",
"author_date": "%ai",
"committer_name": "%cN",
"committer_email": "%cE",
"committer_date": "%ci",
}
def get_all_metadata_aspects(repo_path, commit_sha1):
return {k: get_metadata_aspect(repo_path, commit_sha1, v) for k, v in KEYS_AND_FORMAT_SPECIFIERS.items()}
def get_log_json_list(repo_path, merge_commit, maybe_single_commit):
"""
Returns the most recent sequence of commits
that have a linear ancestry, ordered from oldest to newest
"""
command_args = [
os.path.join(PARENT_DIRECTORY, "git-log2json.sh"),
]
if maybe_single_commit:
command_args.extend(["-n1", maybe_single_commit])
else:
command_args.append(merge_commit + ".." + "origin/master")
# print("command: " + " ".join(command_args))
output = subprocess.check_output(command_args, cwd=repo_path)
old_json = json.loads(output)
# Get sanitized commit messages
new_json = []
for i, item in enumerate(old_json):
print("progress: %d/%d" % (i + 1, len(old_json)))
commit_sha1 = item["sha1"]
item["message"] = get_metadata_aspect(repo_path, commit_sha1, "%B")
new_json.append(item)
return new_json
def upload_commits(hostname, auth_token, commits):
url = hostname + '/api/populate-master-commit-metadata'
headers_dict = {
'content-type': 'application/json',
'token': auth_token,
}
r = requests.post(url, verify=False, json=commits, headers=headers_dict)
print(r.json())
print(r.status_code)
def get_last_excluded_commit(options):
if options.from_scratch:
return get_first_merge_commit(options.repo_path)
else:
print("Determining latest commit that has metadata...")
url = options.hostname + '/api/latest-master-commit-with-metadata'
r = requests.get(url, verify=False)
parsed_json = r.json()
print(parsed_json)
if parsed_json["success"]:
return parsed_json["payload"]
else:
return get_first_merge_commit(options.repo_path)
def get_commit_infos_from_list(options):
# First, update the repo
# 'git fetch --force origin "refs/pull/*:refs/remotes/origin/pr/*"'
url = options.hostname + '/api/broken-commits-without-metadata'
r = requests.get(url, verify=False)
parsed_json = r.json()
print("Getting metdata for %d commits..." % len(parsed_json))
metadata_list = []
failed_sha1s = []
for i, commit_sha1 in enumerate(parsed_json):
print("Progress: %d/%d" % (i + 1, len(parsed_json)))
try:
metadata_list.append(get_all_metadata_aspects(options.repo_path, commit_sha1))
except:
print("Skipping", commit_sha1)
failed_sha1s.append(commit_sha1)
return metadata_list
def parse_args():
parser = argparse.ArgumentParser(description='Fetch master commits')
parser.add_argument('--repo-path', dest='repo_path', required=True, help='PyTorch repo path')
parser.add_argument('--token', dest='token', required=True, help='GitHub auth token')
parser.add_argument('--hostname', dest='hostname', required=True, help='Server hostname')
parser.add_argument('--from-scratch', dest='from_scratch', action="store_true", help='Populate the database from scratch')
parser.add_argument('--single-commit', dest='single_commit', help='Single commit to retrieve')
parser.add_argument('--commit-list-from-api', dest='commit_list_from_api', action="store_true", help='Get list of commits from API')
return parser.parse_args()
if __name__ == "__main__":
options = parse_args()
if options.commit_list_from_api:
commit_list_json = get_commit_infos_from_list(options)
else:
merge_commit = get_last_excluded_commit(options)
print("Starting (excluded) commit:", merge_commit)
commit_list_json = get_log_json_list(options.repo_path, merge_commit, options.single_commit)
print("Populating metadata for", len(commit_list_json), "commits...")
upload_commits(options.hostname, options.token, commit_list_json)
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from optparse import OptionParser, OptionGroup
import pygraphviz as pgv
import psycopg2
import sys
# Query found here:
# https://stackoverflow.com/a/46594226/105137
def writedeps(conn):
sql = """WITH RECURSIVE view_deps AS (
SELECT DISTINCT dependent_ns.nspname as dependent_schema
, dependent_view.relname as dependent_view
, source_ns.nspname as source_schema
, source_table.relname as source_table
FROM pg_depend
JOIN pg_rewrite ON pg_depend.objid = pg_rewrite.oid
JOIN pg_class as dependent_view ON pg_rewrite.ev_class = dependent_view.oid
JOIN pg_class as source_table ON pg_depend.refobjid = source_table.oid
JOIN pg_namespace dependent_ns ON dependent_ns.oid = dependent_view.relnamespace
JOIN pg_namespace source_ns ON source_ns.oid = source_table.relnamespace
WHERE NOT (dependent_ns.nspname = source_ns.nspname AND dependent_view.relname = source_table.relname)
UNION
SELECT DISTINCT dependent_ns.nspname as dependent_schema
, dependent_view.relname as dependent_view
, source_ns.nspname as source_schema
, source_table.relname as source_table
FROM pg_depend
JOIN pg_rewrite ON pg_depend.objid = pg_rewrite.oid
JOIN pg_class as dependent_view ON pg_rewrite.ev_class = dependent_view.oid
JOIN pg_class as source_table ON pg_depend.refobjid = source_table.oid
JOIN pg_namespace dependent_ns ON dependent_ns.oid = dependent_view.relnamespace
JOIN pg_namespace source_ns ON source_ns.oid = source_table.relnamespace
INNER JOIN view_deps vd
ON vd.dependent_schema = source_ns.nspname
AND vd.dependent_view = source_table.relname
AND NOT (dependent_ns.nspname = vd.dependent_schema AND dependent_view.relname = vd.dependent_view)
)
SELECT *
FROM view_deps
WHERE dependent_schema = 'public'
ORDER BY source_schema, source_table;"""
G=pgv.AGraph(directed=True)
with conn.cursor() as cursor:
cursor.execute(sql)
for row in cursor.fetchall():
dependent_schema, dependent_view, source_schema, source_table = row
print('"%s" -> "%s";' % (dependent_view, source_table))
G.add_edge(dependent_view, source_table, color='blue')
G.layout(prog='dot')
G.draw('view-dependencies.png')
def main():
parser = OptionParser()
group = OptionGroup(parser, "Database Options")
group.add_option("--dbname", action="store", dest="dbname",
help="The database name.")
group.add_option("--dbhost", action="store", dest="dbhost",
default="localhost", help="The database host.")
group.add_option("--dbuser", action="store", dest="dbuser",
help="The database username.")
group.add_option("--dbpass", action="store", dest="dbpass",
help="The database password.")
parser.add_option_group(group)
(options, args) = parser.parse_args()
if not options.dbname:
print("Please supply a database name, see --help for more info.")
sys.exit(1)
try:
conn = psycopg2.connect("dbname='%s' user='%s' host='%s' password='%s'"
% (options.dbname, options.dbuser, options.dbhost, options.dbpass))
writedeps(conn)
except psycopg2.OperationalError as e:
print("Failed to connect to database,",)
print("perhaps you need to supply auth details:\n %s" % str(e))
print("Use --help for more info.")
sys.exit(1)
if __name__ == "__main__":
main()
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from timeit import default_timer as timer
import logging
import boto3
from botocore.exceptions import ClientError
import time
import urllib.request
import zipfile
import io
MAX_LOG_URLS_RETRIEVAL_RETRIES = 5
LOG_DIR_PREFIX = "var/log/eb-docker/containers/eb-current-app"
def process_zip_file(file_obj, worker_instance_id):
relevant_lines_for_log = []
with zipfile.ZipFile(file_obj) as zip_ref:
log_files = filter(lambda info: info.filename.startswith(LOG_DIR_PREFIX), zip_ref.infolist())
sorted_log_files_list = sorted(log_files, key=lambda x: x.date_time, reverse=True)
# Only examine one log per zip file
for info in sorted_log_files_list[:1]:
with zip_ref.open(info) as log_fh:
log_lines = log_fh.readlines()
with open(worker_instance_id + ".log", "wb") as output_fh:
for line in log_lines:
output_fh.write(line)
for line in log_lines:
line_string = line.decode('UTF-8').strip()
if line_string.startswith("Posted to: /worker/scan-sha1"):
relevant_lines_for_log.append(line_string)
return relevant_lines_for_log
def get_eb_worker_logs(eb_environment_id):
eb_client = boto3.client('elasticbeanstalk', region_name='us-east-2')
try:
msg = eb_client.request_environment_info(
EnvironmentId=eb_environment_id,
InfoType='bundle',
)
print("First message:", msg)
for i in range(MAX_LOG_URLS_RETRIEVAL_RETRIES):
msg2 = eb_client.retrieve_environment_info(
EnvironmentId=eb_environment_id,
InfoType='bundle',
)
environment_info_list = msg2.get("EnvironmentInfo", [])
if environment_info_list:
log_timestamp_url_tuples_by_instance_id = {}
for log_item in environment_info_list:
s3_url = log_item['Message']
log_timestamp = log_item['SampleTimestamp']
ec2_instance_id = log_item['Ec2InstanceId']
log_timestamp_url_tuples_by_instance_id.setdefault(ec2_instance_id, []).append((log_timestamp, s3_url, ec2_instance_id))
log_timestamp_url_tuples = list(map(lambda x: x[0], sorted(log_timestamp_url_tuples_by_instance_id.values(), key=lambda x: x[0], reverse=True)))
print("Log URL count:", len(log_timestamp_url_tuples))
return log_timestamp_url_tuples
else:
print("Environment info was empty on iteration %d. Sleeping..." % i)
time.sleep(5)
except ClientError as e:
logging.error(e)
return None
def run():
start = timer()
log_list = get_eb_worker_logs('e-ev8fq2dhbv')
for timestamp, url, instance_id in log_list:
print("timestamp:", timestamp)
print("url:", url)
with urllib.request.urlopen(url) as download_file_obj:
in_memory_file = io.BytesIO(download_file_obj.read())
relevant_lines = process_zip_file(in_memory_file, instance_id)
for i, line in enumerate(relevant_lines):
print("\t", i, ":", line)
end = timer()
execution_seconds = end - start
print("Completed in", execution_seconds, "seconds")
if __name__ == "__main__":
run()
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import json
import base64
WEBAPP_BINARY_NAME = "my-webapp"
ONEOFF_BINARY_NAME = "scan-oneoff"
WEBAPP_INTERNAL_PORT = 3001
def generate_dockerrun_aws_json(output_path, nondefault_cli_arglist, entrypoint_override=None):
json_object = {
"AWSEBDockerrunVersion": "1",
"Image": {
"Name": "kostmo/circleci-failure-tracker-img-small-my-webapp",
},
"Ports": [
{
"ContainerPort": WEBAPP_INTERNAL_PORT,
}
],
"Entrypoint": os.path.join("/opt/app", entrypoint_override if entrypoint_override else WEBAPP_BINARY_NAME),
"Command": " ".join(nondefault_cli_arglist),
}
with open(output_path, "w") as fh:
json.dump(json_object, fh, indent=4, sort_keys=True)
def generate_app_nondefault_cli_arglist(
app_credentials_json,
db_credentials_json,
db_mview_credentials_json,
github_app_pem_content,
circleci_api_token,
aws_sqs_queue_url,
is_notification_ingester,
no_force_ssl,
port_override,
run_oneoff = False):
arg_list = [
"--db-hostname",
db_credentials_json["db-hostname"],
"--db-username",
db_credentials_json["db-user"],
"--db-password",
db_credentials_json["db-password"],
"--github-app-rsa-pem",
base64.b64encode(github_app_pem_content.encode('ascii')).decode(),
"--aws-sqs-queue-url",
aws_sqs_queue_url,
"--circleci-api-token",
circleci_api_token,
]
if run_oneoff:
return arg_list
arg_list += [
"--github-client-id",
app_credentials_json["github-client-id"],
"--github-client-secret",
app_credentials_json["github-client-secret"],
"--github-webhook-secret",
app_credentials_json["github-webhook-secret"],
"--db-mview-username",
db_mview_credentials_json["db-user"],
"--db-mview-password",
db_mview_credentials_json["db-password"],
"--admin-password",
app_credentials_json["admin-password"],
]
if no_force_ssl:
arg_list.append("--no-force-ssl")
if port_override:
arg_list.extend(["--port", str(port_override)])
return arg_list
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import re
import sys
import argparse
import json
CURRENT_DIR = os.path.dirname(__file__)
REPO_ROOT_DIR = os.path.abspath(os.path.join(CURRENT_DIR, "../.."))
def parse_args():
parser = argparse.ArgumentParser(description='Manipulate VIEWs in the database')
parser.add_argument('--backup', dest='backup', action="store_true", help='Backs up the views. When false, creates the views in the database.')
return parser.parse_args()
def get_view_names():
view_names = []
schema_filepath = os.path.join(REPO_ROOT_DIR, "configuration/schema.sql")
for line in open(schema_filepath):
matches = re.search("CREATE VIEW public\.([^\s+]+) ", line)
if matches:
view_names.append(matches.group(1))
return view_names
def get_db_hostname():
with open(os.path.join(REPO_ROOT_DIR, "../circleci-failure-tracker-credentials/database-credentials-remote.json")) as fh:
data = json.load(fh)
return data["db-hostname"]
SCRIPT_PATH = "view-creation.sql"
def dump_view_creation_script():
view_names = get_view_names()
print("There are", len(view_names), "views.")
db_hostname = get_db_hostname()
cli_args = [
"pg_dump",
"-h",
db_hostname,
"-s",
"-U",
"postgres",
"-d",
"loganci",
]
for v in view_names:
cli_args.extend(["-t", v])
cli_args.extend([">", SCRIPT_PATH])
cli_string = " ".join(cli_args)
print("CLI string:", cli_string)
os.system(cli_string)
def run_view_creation_script():
# psql --no-password -U postgres -h $DB_HOSTNAME < ../configuration/schema.sql
db_hostname = get_db_hostname()
cli_args = [
"psql",
"--no-password",
"-U",
"postgres",
"-h",
db_hostname,
"-d",
"loganci",
"<",
SCRIPT_PATH,
]
cli_string = " ".join(cli_args)
print("CLI string:", cli_string)
os.system(cli_string)
if __name__ == "__main__":
options = parse_args()
if options.backup:
dump_view_creation_script()
else:
run_view_creation_script()
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os, sys
import json
THIS_SCRIPT_DIR = os.path.dirname(__file__)
CREDENTIALS_DIR = os.path.join(THIS_SCRIPT_DIR, "../../../circleci-failure-tracker-credentials")
def write_creds_module(source_filename, destination_filename):
json_source_path = os.path.join(CREDENTIALS_DIR, source_filename)
github_token_path = os.path.join(CREDENTIALS_DIR, "github-personal-access-token-repo-read-permissions.txt")
sqs_queue_url_path = os.path.join(CREDENTIALS_DIR, "aws-sqs-queue-url.txt")
dest_path = os.path.join("dr_ci_view_refresh", destination_filename)
with open(json_source_path) as json_fh, open(dest_path, "w") as output_module_fh:
creds_dict = json.load(json_fh)
output_module_fh.write('# This file is autogenerated!\n')
output_module_fh.write('db_hostname = "%s"\n' % creds_dict["db-hostname"])
output_module_fh.write('db_username = "%s"\n' % creds_dict["db-user"])
output_module_fh.write('db_password = "%s"\n' % creds_dict["db-password"])
output_module_fh.write('db_name = "%s"\n' % "loganci")
with open(github_token_path) as token_fh:
access_token = token_fh.read().strip()
output_module_fh.write('repo_read_auth_token = "%s"\n' % access_token)
with open(sqs_queue_url_path) as fh:
queue_url = fh.read().strip()
output_module_fh.write('sqs_queue_url = "%s"\n' % queue_url)
if __name__ == "__main__":
write_creds_module("database-credentials-remote-mview-refresher.json", "db_config.py")
write_creds_module("database-credentials-remote.json", "logan_db_config.py")
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import psycopg2
import json
from timeit import default_timer as timer
from multiprocessing.pool import ThreadPool
import db_config
def view_refresh_lambda_handler(event, context):
"""Sample pure Lambda function
Parameters
----------
event: dict, required
API Gateway Lambda Proxy Input Format
Event doc: https://docs.aws.amazon.com/apigateway/latest/developerguide/set-up-lambda-proxy-integrations.html#api-gateway-simple-proxy-for-lambda-input-format
context: object, required
Lambda Context runtime methods and attributes
Context doc: https://docs.aws.amazon.com/lambda/latest/dg/python-context-object.html
Returns
------
API Gateway Lambda Proxy Output Format: dict
Return doc: https://docs.aws.amazon.com/apigateway/latest/developerguide/set-up-lambda-proxy-integrations.html
"""
view_names = event["view-names"]
payload = update_multiple_views(view_names, "lambda")
return {
"statusCode": 200,
"body": json.dumps({
"message": "hello world",
"payload": payload,
}),
}
def update_multiple_views(view_names, event_source):
def long_running_func(view_name):
return {
"view": view_name,
"result": run(view_name, event_source),
}
p = ThreadPool(2)
return p.map(long_running_func, view_names)
WHITELISTED_VIEW_NAMES = {
"master_failures_raw_causes_mview",
"upstream_breakages_weekly_aggregation_mview",
"job_schedule_statistics_mview",
"master_failures_weekly_aggregation_mview",
"job_schedule_discriminated_mview",
"master_ordered_commits_with_metadata_mview",
"master_commit_job_success_completeness_mview",
"master_job_failure_spans_mview",
"master_job_failure_spans_conservative_mview",
"master_commit_reversion_spans_mview",
"master_required_unbuilt_jobs_mview",
"pattern_frequency_summary_mview",
"pr_merge_time_build_stats_by_master_commit_mview",
}
def run(view_name, trigger_source):
print("Now refreshing materialized view:", view_name)
# Can whitelist required to avoid SQL injection
# However, the Lambda endpoint should not be externally accessible,
# so the inputs are always trusted.
if view_name not in WHITELISTED_VIEW_NAMES:
return {"error": "Unsupported view name " + view_name}
conn = psycopg2.connect(host=db_config.db_hostname, database=db_config.db_name, user=db_config.db_username, password=db_config.db_password)
with conn.cursor() as cur:
cur.execute('SET SESSION lock_timeout = 3000;') # 3 seconds
cur.execute('SET SESSION statement_timeout = %d;' % (1000*60*10)) # 10 minutes
print("Refresh begins now...")
start = timer()
# CONCURRENTLY is very important; it allows queries to be performed at the same time
# as the view is being refreshed (which happens very often).
# However, it does require a unique index to exist on the view.
cur.execute('REFRESH MATERIALIZED VIEW CONCURRENTLY %s;' % view_name)
# cur.execute('REFRESH MATERIALIZED VIEW %s;' % view_name)
end = timer()
execution_seconds = end - start
print("Refresh completed in ", execution_seconds, "seconds")
cur.execute('INSERT INTO lambda_logging.materialized_view_refresh_events (view_name, execution_duration_seconds, event_source) VALUES (%s, %s, %s);', (view_name, execution_seconds, trigger_source))
conn.commit()
print("Inserted operation record for", view_name, "refresh into database.")
return {
"elapsed_time_seconds": execution_seconds,
}
if __name__ == "__main__":
view_names = [
# "job_schedule_discriminated_mview",
# "master_ordered_commits_with_metadata_mview",
# "master_commit_reversion_spans_mview",
"master_ordered_commits_with_metadata_mview",
]
payload = update_multiple_views(view_names, "test")
print(payload)
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import psycopg2
import json
from timeit import default_timer as timer
import logging
import boto3
from botocore.exceptions import ClientError
import logan_db_config
def record_queue_depth_lambda_handler(event, context):
my_payload = run()
return {
"statusCode": 200,
"body": json.dumps({
"message": "hello world",
"payload": my_payload,
}),
}
def get_queue_depth(sqs_queue_url):
sqs_client = boto3.client('sqs', region_name='us-east-2')
try:
msg = sqs_client.get_queue_attributes(
QueueUrl=sqs_queue_url,
AttributeNames=['ApproximateNumberOfMessages'],
)
except ClientError as e:
logging.error(e)
return None
return msg['Attributes']['ApproximateNumberOfMessages']
def run():
print("Now connecting to database...")
conn = psycopg2.connect(
host=logan_db_config.db_hostname,
database=logan_db_config.db_name,
user=logan_db_config.db_username,
password=logan_db_config.db_password)
with conn.cursor() as cur:
cur.execute('SET SESSION lock_timeout = 3000;')
cur.execute('SET SESSION statement_timeout = %d;' % (1000*60*3)) # 3 minutes
print("Work begins now...")
start = timer()
queue_depth = get_queue_depth(logan_db_config.sqs_queue_url)
cur.execute("INSERT INTO lambda_logging.sqs_queue_depth_history (queue_depth) VALUES (%s) RETURNING inserted_at;", (queue_depth,))
insertion_timestamp = cur.fetchone()[0]
conn.commit()
print("Inserted queue depth at %s..." % insertion_timestamp)
end = timer()
execution_seconds = end - start
print("Completed in", execution_seconds, "seconds")
return queue_depth
if __name__ == "__main__":
run()
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import psycopg2
import json
from timeit import default_timer as timer
import logging
import logan_db_config
# Set up logging
logging.basicConfig(level=logging.DEBUG,
format='%(levelname)s: %(asctime)s: %(message)s')
def record_master_viability_lambda_handler(event, context):
my_payload = run()
return {
"statusCode": 200,
"body": json.dumps({
"message": "hello world",
"payload": my_payload,
}),
}
def run():
print("Now connecting to database...")
conn = psycopg2.connect(
host=logan_db_config.db_hostname,
database=logan_db_config.db_name,
user=logan_db_config.db_username,
password=logan_db_config.db_password)
with conn.cursor() as cur:
cur.execute('SET SESSION lock_timeout = 3000;')
cur.execute('SET SESSION statement_timeout = %d;' % (1000*60)) # 1 minute
print("Work begins now...")
start = timer()
cur.execute("SELECT snapshot_master_viable_commit_age();")
conn.commit()
end = timer()
execution_seconds = end - start
print("Completed in", execution_seconds, "seconds")
return {
"elapsed_time_seconds": execution_seconds,
}
if __name__ == "__main__":
payload = run()
print(payload)
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Reads the "expanded" version of .circleci/config.yml from an arbitrary
build of each commit from the "master" branch of the pytorch repo and
inserts workflow jobs and properties into the Dr. CI database.
"""
import psycopg2
import psycopg2.extras
import yaml
import json
import os
import requests
import subprocess
import hashlib
import argparse
import logan_db_config
CONFIG_YAML_SHA1_API_URL_TEMPLATE = "https://api.github.com/repos/pytorch/pytorch/contents/.circleci/config.yml?ref=%s"
def populate_config_yaml_lambda_handler(event, context):
payload = run(25)
return {
"statusCode": 200,
"body": json.dumps({
"message": "hello world",
"payload": payload,
}),
}
def get_config_yaml_content_sha1(local_repo_path, commit_sha1):
"""
Uses auth token to avoid GitHub API rate limiting
"""
if local_repo_path:
try:
file_hash = subprocess.check_output([
"git",
"--git-dir",
local_repo_path,
"rev-parse",
"%s:.circleci/config.yml" % commit_sha1,
]).decode('utf-8').strip()
return file_hash
except Exception as e:
print("Couldn't obtain file SHA1 from local repo:", str(e))
print("\tFetching content SHA1 from GitHub...")
repo_sha1_retrieval_url = CONFIG_YAML_SHA1_API_URL_TEMPLATE % commit_sha1
headers = {
'Authorization': 'token %s' % logan_db_config.repo_read_auth_token,
}
repo_request = requests.get(repo_sha1_retrieval_url, headers=headers)
github_api_response_json = repo_request.json()
sha1 = github_api_response_json.get("sha")
if not sha1:
print("PROBLEM:", github_api_response_json)
return sha1
def populate_db_yaml_records(cur, build_number, repo_yaml_content_sha1):
url = "https://circleci.com/api/v1.1/project/github/pytorch/pytorch/%d" % build_number
r = requests.get(url)
api_json_obj = r.json()
yaml_text = api_json_obj.get("circle_yml").get("string")
expanded_yaml_md5 = hashlib.md5(yaml_text.encode('utf-8')).hexdigest()
yaml_obj = yaml.safe_load(yaml_text)
workflows_dict = yaml_obj.get("workflows")
cur.execute(
'INSERT INTO circleci_config_yaml_hashes (expanded_yaml_content, expanded_yaml_md5, repo_yaml_sha1) VALUES (%s, %s, %s);',
(yaml_text, expanded_yaml_md5, repo_yaml_content_sha1)
)
branch_filters_by_job_by_workflow = {}
jobs_insertion_values = []
# (workflow, dependent_job, required_job)
job_dependency_tuples = []
schedule_insertion_values = []
for workflow_name, workflow_obj in filter(lambda x: x[0] != "version", workflows_dict.items()):
if type(workflow_obj) is dict:
cur.execute(
'INSERT INTO circleci_workflows_by_yaml_file (yaml_content_sha1, name) VALUES (%s, %s) RETURNING id;',
(repo_yaml_content_sha1, workflow_name)
)
workflow_id = cur.fetchone()[0]
cron_values = []
for trigger in workflow_obj.get("triggers", []):
schedule_obj = trigger.get("schedule", {})
for k, v in schedule_obj.items():
if k == "cron":
cron_values.append(v)
for v in cron_values:
schedule_insertion_values.append((workflow_id, v))
branch_filters_by_job = branch_filters_by_job_by_workflow.setdefault(workflow_id, {})
for job_obj in workflow_obj.get("jobs", []):
if type(job_obj) is dict:
job_name = list(job_obj.keys())[0]
for key_job_name, job_value_obj in job_obj.items():
branch_filter_only_obj = job_value_obj.get("filters", {}).get("branches", {}).get("only")
if type(branch_filter_only_obj) is list:
branch_filters_by_job.setdefault(job_name, []).extend(branch_filter_only_obj)
elif type(branch_filter_only_obj) is str:
branch_filters_by_job.setdefault(job_name, []).append(branch_filter_only_obj)
for required_job in job_value_obj.get("requires", []):
job_dependency_tuples.append((workflow_id, key_job_name, required_job))
else:
job_name = job_obj
jobs_insertion_values.append((workflow_id, job_name))
jobs_insert_query = 'INSERT INTO circleci_workflow_jobs (workflow, job_name) VALUES %s'
psycopg2.extras.execute_values(
cur, jobs_insert_query, jobs_insertion_values, template=None, page_size=100
)
dependencies_insert_query = 'INSERT INTO circleci_config_job_dependencies (workflow, dependent_job, required_job) VALUES %s'
psycopg2.extras.execute_values(
cur, dependencies_insert_query, job_dependency_tuples, template=None, page_size=100
)
filter_insertion_values = []
for workflow_id, branch_filters_by_job in branch_filters_by_job_by_workflow.items():
for job_name, filters_list in branch_filters_by_job.items():
filter_insertion_values.extend([(workflow_id, job_name, branch, True) for branch in filters_list])
insert_query2 = 'INSERT INTO circleci_job_branch_filters (workflow, job_name, branch, filter_include) VALUES %s'
psycopg2.extras.execute_values(
cur, insert_query2, filter_insertion_values, template=None, page_size=100
)
schedule_insert_query = 'INSERT INTO circleci_workflow_schedules (workflow, cron_schedule) VALUES %s'
psycopg2.extras.execute_values(
cur, schedule_insert_query, schedule_insertion_values, template=None, page_size=100
)
def populate_config_info(local_repo_path, cur, commit_sha1, build_number):
repo_yaml_content_sha1 = get_config_yaml_content_sha1(local_repo_path, commit_sha1)
if repo_yaml_content_sha1:
cur.execute(
"SELECT repo_yaml_sha1 FROM circleci_config_yaml_hashes WHERE repo_yaml_sha1=%s LIMIT 1;",
(repo_yaml_content_sha1,)
)
row = cur.fetchone()
if not row:
print("\tInserting workflow into database...")
populate_db_yaml_records(cur, build_number, repo_yaml_content_sha1)
else:
print("\tWorkflow is already in database.")
print("\tInserting git-commit/config.yaml association into database...")
cur.execute(
'INSERT INTO circleci_expanded_config_yaml_hashes_by_commit (commit_sha1, repo_yaml_sha1) VALUES (%s, %s);',
(commit_sha1, repo_yaml_content_sha1)
)
print("\tInserted git-commit/config.yaml association into database.")
else:
print("Couldn't retrieve file content sha1 for commit %s!" % commit_sha1)
def run(commit_count, local_repo_path=None):
if local_repo_path:
return_code = subprocess.call([
"git",
"--git-dir",
local_repo_path,
"fetch",
"origin",
"master",
])
print("Fetched local git repo with return code: %d" % return_code)
conn = psycopg2.connect(
host=logan_db_config.db_hostname,
database=logan_db_config.db_name,
user=logan_db_config.db_username,
password=logan_db_config.db_password)
with conn.cursor() as cur:
cur.execute('SET SESSION lock_timeout = 3000;') # 3 seconds
cur.execute('SET SESSION statement_timeout = %d;' % (1000*60*3)) # 3 minutes
cur.execute("SELECT sha1, build_num FROM master_commits_unpopulated_circleci_configs LIMIT %s;", (commit_count,))
rows = cur.fetchall()
enumerated_rows = list(enumerate(rows))
def single_commit_populator(args_tuple):
(i, (commit_sha1, build_number)) = args_tuple
print("%d/%d: Populating CircleCI config for commit %s using build #%d..." % (i + 1, len(enumerated_rows), commit_sha1, build_number))
populate_config_info(local_repo_path, cur, commit_sha1, build_number)
# We don't allow concurrent actions here, since we don't want two Git commits
# with the same config.yml hash to race in database insertion.
#
# We commit the transaction after every row, so that we can make incremental progress
# even if the overall task fails.
for x in enumerated_rows:
single_commit_populator(x)
conn.commit()
return {
"foo": "bar",
}
def parse_args():
parser = argparse.ArgumentParser(description='Parse config.yml files for revisions of the pytorch repo')
parser.add_argument('--repo-path', dest='local_repo_path',
default=os.path.expanduser("~/github/pytorch-repos/pytorch/.git"),
help='Local filesystem path to pytorch repo .git directory')
parser.add_argument('--commit-count', dest='commit_count',
type=int,
default=2,
help='How many commits to retrieve')
return parser.parse_args()
if __name__ == "__main__":
options = parse_args()
payload = run(options.commit_count)
print(payload)
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import json
import pytest
from hello_world import app
@pytest.fixture()
def apigw_event():
""" Generates API GW Event"""
return {
"body": '{ "test": "body"}',
"resource": "/{proxy+}",
"requestContext": {
"resourceId": "123456",
"apiId": "1234567890",
"resourcePath": "/{proxy+}",
"httpMethod": "POST",
"requestId": "c6af9ac6-7b61-11e6-9a41-93e8deadbeef",
"accountId": "123456789012",
"identity": {
"apiKey": "",
"userArn": "",
"cognitoAuthenticationType": "",
"caller": "",
"userAgent": "Custom User Agent String",
"user": "",
"cognitoIdentityPoolId": "",
"cognitoIdentityId": "",
"cognitoAuthenticationProvider": "",
"sourceIp": "127.0.0.1",
"accountId": "",
},
"stage": "prod",
},
"queryStringParameters": {"foo": "bar"},
"headers": {
"Via": "1.1 08f323deadbeefa7af34d5feb414ce27.cloudfront.net (CloudFront)",
"Accept-Language": "en-US,en;q=0.8",
"CloudFront-Is-Desktop-Viewer": "true",
"CloudFront-Is-SmartTV-Viewer": "false",
"CloudFront-Is-Mobile-Viewer": "false",
"X-Forwarded-For": "127.0.0.1, 127.0.0.2",
"CloudFront-Viewer-Country": "US",
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8",
"Upgrade-Insecure-Requests": "1",
"X-Forwarded-Port": "443",
"Host": "1234567890.execute-api.us-east-1.amazonaws.com",
"X-Forwarded-Proto": "https",
"X-Amz-Cf-Id": "aaaaaaaaaae3VYQb9jd-nvCd-de396Uhbp027Y2JvkCPNLmGJHqlaA==",
"CloudFront-Is-Tablet-Viewer": "false",
"Cache-Control": "max-age=0",
"User-Agent": "Custom User Agent String",
"CloudFront-Forwarded-Proto": "https",
"Accept-Encoding": "gzip, deflate, sdch",
},
"pathParameters": {"proxy": "/examplepath"},
"httpMethod": "POST",
"stageVariables": {"baz": "qux"},
"path": "/examplepath",
}
def test_lambda_handler(apigw_event, mocker):
ret = app.lambda_handler(apigw_event, "")
data = json.loads(ret["body"])
assert ret["statusCode"] == 200
assert "message" in ret["body"]
assert data["message"] == "hello world"
# assert "location" in data.dict_keys()
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import os
import subprocess
from setuptools import setup
cwd = os.path.dirname(os.path.abspath(__file__))
version_txt = os.path.join(cwd, 'version.txt')
with open(version_txt, 'r') as f:
version = f.readline().strip()
try:
sha = subprocess.check_output(['git', 'rev-parse', 'HEAD'], cwd=cwd).decode('ascii').strip()
except Exception:
sha = 'Unknown'
package_name = 'functorch'
if os.getenv('BUILD_VERSION'):
version = os.getenv('BUILD_VERSION')
elif sha != 'Unknown':
version += '+' + sha[:7]
requirements = [
# This represents a nightly version of PyTorch.
# It can be installed as a binary or from source.
"torch>=1.14.0.dev",
]
extras = {}
extras["aot"] = ["networkx", ]
if __name__ == '__main__':
try:
setup(
# Metadata
name=package_name,
version=version,
author='PyTorch Core Team',
url="https://github.com/pytorch/functorch",
description='JAX-like composable function transforms for PyTorch',
license='BSD',
# Package info
packages=[],
install_requires=requirements,
extras_require=extras,
)
except Exception as e:
print(e, file=sys.stderr)
sys.exit(1)
|
import io
import os
import re
from setuptools import find_packages, setup
def read(*names, **kwargs):
with io.open(os.path.join(os.path.dirname(__file__), *names), encoding=kwargs.get("encoding", "utf8")) as fp:
return fp.read()
def find_version(*file_paths):
version_file = read(*file_paths)
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]", version_file, re.M)
if version_match:
return version_match.group(1)
raise RuntimeError("Unable to find version string.")
readme = read("README.md").replace(
'src="assets/', 'src="https://raw.githubusercontent.com/pytorch/ignite/master/assets/'
)
VERSION = find_version("ignite", "__init__.py")
requirements = ["torch>=1.3,<3", "packaging"]
setup(
# Metadata
name="pytorch-ignite",
version=VERSION,
author="PyTorch-Ignite Team",
author_email="[email protected]",
url="https://github.com/pytorch/ignite",
description="A lightweight library to help with training neural networks in PyTorch.",
long_description_content_type="text/markdown",
long_description=readme,
license="BSD",
# Package info
packages=find_packages(exclude=("tests", "tests.*")),
package_data={"ignite": ["py.typed"]},
zip_safe=False,
install_requires=requirements,
)
|
#
# Tests :
# For all images
# can import torch and its version == required one
# can import ignite and its version == required one
# for all -vision images
# can import opencv without driver issue
# for all horovod images
# can import horovod and its version == required one
# for all msdp images
# can import deepspeed and its version == required one
#
# Requirements:
# pip install docker
#
import argparse
import json
import os
import docker
def run_python_cmd(cmd):
try_except_cmd = f"""
import warnings
warnings.filterwarnings("ignore")
def main():
{cmd}
try:
main()
except Exception as e:
import traceback
print(traceback.format_exc())
"""
try:
out = client.containers.run(args.image, f"python -c '{try_except_cmd}'", auto_remove=True, stderr=True)
assert isinstance(out, bytes), type(out)
out = out.decode("utf-8").strip()
out_lower = out.lower()
if any([k in out_lower for k in ["error", "exception"]]):
raise RuntimeError(out)
except docker.errors.ContainerError as e:
raise RuntimeError(e)
return out
base_cmd = """
import torch
import ignite
result = dict()
result["torch"] = torch.__version__
result["ignite"] = ignite.__version__
{hvd}
{msdp}
print(result)
"""
if __name__ == "__main__":
parser = argparse.ArgumentParser("Check docker image script")
parser.add_argument("image", type=str, help="Docker image to check")
args = parser.parse_args()
client = docker.from_env()
docker_image_name = args.image
name, version = docker_image_name.split(":")
assert version != "latest", version
torch_version, ignite_version = version.split("-")
_, image_type = name.split("/")
expected_out = {
"torch": torch_version,
"ignite": ignite_version,
}
hvd_cmd = ""
if "hvd" in image_type:
hvd_cmd = 'import horovod; result["hvd"] = horovod.__version__'
assert "HVD_VERSION" in os.environ
val = os.environ["HVD_VERSION"]
expected_out["hvd"] = val if val[0] != "v" else val[1:]
msdp_cmd = ""
if "msdp" in image_type:
msdp_cmd = 'import deepspeed; result["msdp"] = deepspeed.__version__'
assert "MSDP_VERSION" in os.environ
val = os.environ["MSDP_VERSION"]
expected_out["msdp"] = val if val[0] != "v" else val[1:]
cmd = base_cmd.format(hvd=hvd_cmd, msdp=msdp_cmd)
out = run_python_cmd(cmd)
try:
out = out.replace("'", '"')
out = json.loads(out)
except json.decoder.JSONDecodeError:
raise RuntimeError(out)
for k, v in expected_out.items():
assert k in out, f"{k} not in {out.keys()}"
assert v in out[k], f"{v} not in {out[k]}"
if "vision" in image_type:
run_python_cmd("import cv2")
if "nlp" in image_type:
run_python_cmd("import torchtext, transformers")
if "apex" in image_type:
run_python_cmd("import apex")
|
import ignite.contrib
import ignite.distributed
import ignite.engine
import ignite.exceptions
import ignite.handlers
import ignite.metrics
import ignite.utils
__version__ = "0.5.0"
|
import collections.abc as collections
import functools
import hashlib
import logging
import random
import shutil
import warnings
from pathlib import Path
from typing import Any, Callable, cast, Dict, Optional, TextIO, Tuple, Type, TypeVar, Union
import torch
__all__ = [
"convert_tensor",
"apply_to_tensor",
"apply_to_type",
"to_onehot",
"setup_logger",
"manual_seed",
"hash_checkpoint",
]
def convert_tensor(
x: Union[torch.Tensor, collections.Sequence, collections.Mapping, str, bytes],
device: Optional[Union[str, torch.device]] = None,
non_blocking: bool = False,
) -> Union[torch.Tensor, collections.Sequence, collections.Mapping, str, bytes]:
"""Move tensors to relevant device.
Args:
x: input tensor or mapping, or sequence of tensors.
device: device type to move ``x``.
non_blocking: convert a CPU Tensor with pinned memory to a CUDA Tensor
asynchronously with respect to the host if possible
"""
def _func(tensor: torch.Tensor) -> torch.Tensor:
return tensor.to(device=device, non_blocking=non_blocking) if device is not None else tensor
return apply_to_tensor(x, _func)
def apply_to_tensor(
x: Union[torch.Tensor, collections.Sequence, collections.Mapping, str, bytes], func: Callable
) -> Union[torch.Tensor, collections.Sequence, collections.Mapping, str, bytes]:
"""Apply a function on a tensor or mapping, or sequence of tensors.
Args:
x: input tensor or mapping, or sequence of tensors.
func: the function to apply on ``x``.
"""
return apply_to_type(x, torch.Tensor, func)
def apply_to_type(
x: Union[Any, collections.Sequence, collections.Mapping, str, bytes],
input_type: Union[Type, Tuple[Type[Any], Any]],
func: Callable,
) -> Union[Any, collections.Sequence, collections.Mapping, str, bytes]:
"""Apply a function on an object of `input_type` or mapping, or sequence of objects of `input_type`.
Args:
x: object or mapping or sequence.
input_type: data type of ``x``.
func: the function to apply on ``x``.
"""
if isinstance(x, input_type):
return func(x)
if isinstance(x, (str, bytes)):
return x
if isinstance(x, collections.Mapping):
return cast(Callable, type(x))({k: apply_to_type(sample, input_type, func) for k, sample in x.items()})
if isinstance(x, tuple) and hasattr(x, "_fields"): # namedtuple
return cast(Callable, type(x))(*(apply_to_type(sample, input_type, func) for sample in x))
if isinstance(x, collections.Sequence):
return cast(Callable, type(x))([apply_to_type(sample, input_type, func) for sample in x])
raise TypeError((f"x must contain {input_type}, dicts or lists; found {type(x)}"))
def to_onehot(indices: torch.Tensor, num_classes: int) -> torch.Tensor:
"""Convert a tensor of indices of any shape `(N, ...)` to a
tensor of one-hot indicators of shape `(N, num_classes, ...)` and of type uint8. Output's device is equal to the
input's device`.
Args:
indices: input tensor to convert.
num_classes: number of classes for one-hot tensor.
.. versionchanged:: 0.4.3
This functions is now torchscriptable.
"""
new_shape = (indices.shape[0], num_classes) + indices.shape[1:]
onehot = torch.zeros(new_shape, dtype=torch.uint8, device=indices.device)
return onehot.scatter_(1, indices.unsqueeze(1), 1)
def setup_logger(
name: Optional[str] = "ignite",
level: int = logging.INFO,
stream: Optional[TextIO] = None,
format: str = "%(asctime)s %(name)s %(levelname)s: %(message)s",
filepath: Optional[str] = None,
distributed_rank: Optional[int] = None,
reset: bool = False,
) -> logging.Logger:
"""Setups logger: name, level, format etc.
Args:
name: new name for the logger. If None, the standard logger is used.
level: logging level, e.g. CRITICAL, ERROR, WARNING, INFO, DEBUG.
stream: logging stream. If None, the standard stream is used (sys.stderr).
format: logging format. By default, `%(asctime)s %(name)s %(levelname)s: %(message)s`.
filepath: Optional logging file path. If not None, logs are written to the file.
distributed_rank: Optional, rank in distributed configuration to avoid logger setup for workers.
If None, distributed_rank is initialized to the rank of process.
reset: if True, reset an existing logger rather than keep format, handlers, and level.
Returns:
logging.Logger
Examples:
Improve logs readability when training with a trainer and evaluator:
.. code-block:: python
from ignite.utils import setup_logger
trainer = ...
evaluator = ...
trainer.logger = setup_logger("trainer")
evaluator.logger = setup_logger("evaluator")
trainer.run(data, max_epochs=10)
# Logs will look like
# 2020-01-21 12:46:07,356 trainer INFO: Engine run starting with max_epochs=5.
# 2020-01-21 12:46:07,358 trainer INFO: Epoch[1] Complete. Time taken: 00:5:23
# 2020-01-21 12:46:07,358 evaluator INFO: Engine run starting with max_epochs=1.
# 2020-01-21 12:46:07,358 evaluator INFO: Epoch[1] Complete. Time taken: 00:01:02
# ...
Every existing logger can be reset if needed
.. code-block:: python
logger = setup_logger(name="my-logger", format="=== %(name)s %(message)s")
logger.info("first message")
setup_logger(name="my-logger", format="+++ %(name)s %(message)s", reset=True)
logger.info("second message")
# Logs will look like
# === my-logger first message
# +++ my-logger second message
Change the level of an existing internal logger
.. code-block:: python
setup_logger(
name="ignite.distributed.launcher.Parallel",
level=logging.WARNING
)
.. versionchanged:: 0.4.3
Added ``stream`` parameter.
.. versionchanged:: 0.4.5
Added ``reset`` parameter.
"""
# check if the logger already exists
existing = name is None or name in logging.root.manager.loggerDict
# if existing, get the logger otherwise create a new one
logger = logging.getLogger(name)
if distributed_rank is None:
import ignite.distributed as idist
distributed_rank = idist.get_rank()
# Remove previous handlers
if distributed_rank > 0 or reset:
if logger.hasHandlers():
for h in list(logger.handlers):
logger.removeHandler(h)
if distributed_rank > 0:
# Add null handler to avoid multiple parallel messages
logger.addHandler(logging.NullHandler())
# Keep the existing configuration if not reset
if existing and not reset:
return logger
if distributed_rank == 0:
logger.setLevel(level)
formatter = logging.Formatter(format)
ch = logging.StreamHandler(stream=stream)
ch.setLevel(level)
ch.setFormatter(formatter)
logger.addHandler(ch)
if filepath is not None:
fh = logging.FileHandler(filepath)
fh.setLevel(level)
fh.setFormatter(formatter)
logger.addHandler(fh)
# don't propagate to ancestors
# the problem here is to attach handlers to loggers
# should we provide a default configuration less open ?
if name is not None:
logger.propagate = False
return logger
def manual_seed(seed: int) -> None:
"""Setup random state from a seed for `torch`, `random` and optionally `numpy` (if can be imported).
Args:
seed: Random state seed
.. versionchanged:: 0.4.3
Added ``torch.cuda.manual_seed_all(seed)``.
.. versionchanged:: 0.4.5
Added ``torch_xla.core.xla_model.set_rng_state(seed)``.
"""
random.seed(seed)
torch.manual_seed(seed)
try:
import torch_xla.core.xla_model as xm
xm.set_rng_state(seed)
except ImportError:
pass
try:
import numpy as np
np.random.seed(seed)
except ImportError:
pass
def deprecated(
deprecated_in: str, removed_in: str = "", reasons: Tuple[str, ...] = (), raise_exception: bool = False
) -> Callable:
F = TypeVar("F", bound=Callable[..., Any])
def decorator(func: F) -> F:
func_doc = func.__doc__ if func.__doc__ else ""
deprecation_warning = (
f"This function has been deprecated since version {deprecated_in}"
+ (f" and will be removed in version {removed_in}" if removed_in else "")
+ ".\n Please refer to the documentation for more details."
)
@functools.wraps(func)
def wrapper(*args: Any, **kwargs: Dict[str, Any]) -> Callable:
if raise_exception:
raise DeprecationWarning(deprecation_warning)
warnings.warn(deprecation_warning, DeprecationWarning, stacklevel=2)
return func(*args, **kwargs)
appended_doc = f".. deprecated:: {deprecated_in}" + ("\n\n\t" if len(reasons) > 0 else "")
for reason in reasons:
appended_doc += "\n\t- " + reason
wrapper.__doc__ = f"**Deprecated function**.\n\n {func_doc}{appended_doc}"
return cast(F, wrapper)
return decorator
def hash_checkpoint(checkpoint_path: Union[str, Path], output_dir: Union[str, Path]) -> Tuple[Path, str]:
"""
Hash the checkpoint file in the format of ``<filename>-<hash>.<ext>``
to be used with ``check_hash`` of :func:`torch.hub.load_state_dict_from_url`.
Args:
checkpoint_path: Path to the checkpoint file.
output_dir: Output directory to store the hashed checkpoint file
(will be created if not exist).
Returns:
Path to the hashed checkpoint file, the first 8 digits of SHA256 hash.
.. versionadded:: 0.4.8
"""
if isinstance(checkpoint_path, str):
checkpoint_path = Path(checkpoint_path)
if not checkpoint_path.exists():
raise FileNotFoundError(f"{checkpoint_path.name} does not exist in {checkpoint_path.parent}.")
if isinstance(output_dir, str):
output_dir = Path(output_dir)
output_dir.mkdir(parents=True, exist_ok=True)
hash_obj = hashlib.sha256()
# taken from https://github.com/pytorch/vision/blob/main/references/classification/utils.py
with checkpoint_path.open("rb") as f:
# Read and update hash string value in blocks of 4KB
for byte_block in iter(lambda: f.read(4096), b""):
hash_obj.update(byte_block)
sha_hash = hash_obj.hexdigest()
old_filename = checkpoint_path.stem
new_filename = "-".join((old_filename, sha_hash[:8])) + ".pt"
hash_checkpoint_path = output_dir / new_filename
shutil.move(str(checkpoint_path), hash_checkpoint_path)
return hash_checkpoint_path, sha_hash
|
__all__ = ["NotComputableError"]
class NotComputableError(RuntimeError):
"""
Exception class to raise if Metric cannot be computed.
"""
|
# For compatibility
from ignite.utils import apply_to_tensor, apply_to_type, convert_tensor, to_onehot
__all__ = ["apply_to_tensor", "apply_to_type", "convert_tensor", "to_onehot"]
|
from typing import Sequence, Union
import torch
from ignite.exceptions import NotComputableError
from ignite.metrics.metric import Metric, reinit__is_reduced, sync_all_reduce
__all__ = ["MeanSquaredError"]
class MeanSquaredError(Metric):
r"""Calculates the `mean squared error <https://en.wikipedia.org/wiki/Mean_squared_error>`_.
.. math:: \text{MSE} = \frac{1}{N} \sum_{i=1}^N \|y_{i} - x_{i}\|^2
where :math:`y_{i}` is the prediction tensor and :math:`x_{i}` is ground true tensor.
- ``update`` must receive output of the form ``(y_pred, y)``.
Args:
output_transform: a callable that is used to transform the
:class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into the
form expected by the metric. This can be useful if, for example, you have a multi-output model and
you want to compute the metric with respect to one of the outputs.
By default, metrics require the output as ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y}``.
device: specifies which device updates are accumulated on. Setting the
metric's device to be the same as your ``update`` arguments ensures the ``update`` method is
non-blocking. By default, CPU.
Examples:
To use with ``Engine`` and ``process_function``, simply attach the metric instance to the engine.
The output of the engine's ``process_function`` needs to be in the format of
``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y, ...}``. If not, ``output_tranform`` can be added
to the metric to transform the output into the form expected by the metric.
``y_pred`` and ``y`` should have the same shape.
For more information on how metric works with :class:`~ignite.engine.engine.Engine`, visit :ref:`attach-engine`.
.. include:: defaults.rst
:start-after: :orphan:
.. testcode::
metric = MeanSquaredError()
metric.attach(default_evaluator, 'mse')
preds = torch.tensor([
[1, 2, 4, 1],
[2, 3, 1, 5],
[1, 3, 5, 1],
[1, 5, 1 ,11]
])
target = preds * 0.75
state = default_evaluator.run([[preds, target]])
print(state.metrics['mse'])
.. testoutput::
3.828125
"""
_state_dict_all_req_keys = ("_sum_of_squared_errors", "_num_examples")
@reinit__is_reduced
def reset(self) -> None:
self._sum_of_squared_errors = torch.tensor(0.0, device=self._device)
self._num_examples = 0
@reinit__is_reduced
def update(self, output: Sequence[torch.Tensor]) -> None:
y_pred, y = output[0].detach(), output[1].detach()
squared_errors = torch.pow(y_pred - y.view_as(y_pred), 2)
self._sum_of_squared_errors += torch.sum(squared_errors).to(self._device)
self._num_examples += y.shape[0]
@sync_all_reduce("_sum_of_squared_errors", "_num_examples")
def compute(self) -> Union[float, torch.Tensor]:
if self._num_examples == 0:
raise NotComputableError("MeanSquaredError must have at least one example before it can be computed.")
return self._sum_of_squared_errors.item() / self._num_examples
|
from typing import Callable, Union
import torch
import ignite.distributed as idist
from ignite.engine import Engine, Events
from ignite.handlers.timing import Timer
from ignite.metrics.metric import Metric, reinit__is_reduced, sync_all_reduce
class Frequency(Metric):
"""Provides metrics for the number of examples processed per second.
Examples:
For more information on how metric works with :class:`~ignite.engine.engine.Engine`, visit :ref:`attach-engine`.
.. code-block:: python
# Compute number of tokens processed
wps_metric = Frequency(output_transform=lambda x: x['ntokens'])
wps_metric.attach(trainer, name='wps')
# Logging with TQDM
ProgressBar(persist=True).attach(trainer, metric_names=['wps'])
# Progress bar will look like
# Epoch [2/10]: [12/24] 50%|█████ , wps=400 [00:17<1:23]
To compute examples processed per second every 50th iteration:
.. code-block:: python
# Compute number of tokens processed
wps_metric = Frequency(output_transform=lambda x: x['ntokens'])
wps_metric.attach(trainer, name='wps', event_name=Events.ITERATION_COMPLETED(every=50))
# Logging with TQDM
ProgressBar(persist=True).attach(trainer, metric_names=['wps'])
# Progress bar will look like
# Epoch [2/10]: [50/100] 50%|█████ , wps=400 [00:17<00:35]
"""
def __init__(
self, output_transform: Callable = lambda x: x, device: Union[str, torch.device] = torch.device("cpu")
) -> None:
super(Frequency, self).__init__(output_transform=output_transform, device=device)
@reinit__is_reduced
def reset(self) -> None:
self._timer = Timer()
self._acc = 0
self._n = 0
self._elapsed = 0.0
super(Frequency, self).reset()
@reinit__is_reduced
def update(self, output: int) -> None:
self._acc += output
self._n = self._acc
self._elapsed = self._timer.value()
@sync_all_reduce("_n", "_elapsed")
def compute(self) -> float:
time_divisor = 1.0
if idist.get_world_size() > 1:
time_divisor *= idist.get_world_size()
# Returns the average processed objects per second across all workers
return self._n / self._elapsed * time_divisor
def completed(self, engine: Engine, name: str) -> None:
engine.state.metrics[name] = int(self.compute())
# TODO: see issue https://github.com/pytorch/ignite/issues/1405
def attach( # type: ignore
self, engine: Engine, name: str, event_name: Events = Events.ITERATION_COMPLETED
) -> None:
engine.add_event_handler(Events.EPOCH_STARTED, self.started)
engine.add_event_handler(Events.ITERATION_COMPLETED, self.iteration_completed)
engine.add_event_handler(event_name, self.completed, name)
|
from typing import Callable, Sequence, Union
import torch
from ignite.exceptions import NotComputableError
from ignite.metrics.metric import Metric, reinit__is_reduced, sync_all_reduce
__all__ = ["MultiLabelConfusionMatrix"]
class MultiLabelConfusionMatrix(Metric):
"""Calculates a confusion matrix for multi-labelled, multi-class data.
- ``update`` must receive output of the form ``(y_pred, y)``.
- `y_pred` must contain 0s and 1s and has the following shape (batch_size, num_classes, ...).
For example, `y_pred[i, j]` = 1 denotes that the j'th class is one of the labels of the i'th sample as predicted.
- `y` should have the following shape (batch_size, num_classes, ...) with 0s and 1s. For example,
`y[i, j]` = 1 denotes that the j'th class is one of the labels of the i'th sample according to the ground truth.
- both `y` and `y_pred` must be torch Tensors having any of the following types:
{torch.uint8, torch.int8, torch.int16, torch.int32, torch.int64}. They must have the same dimensions.
- The confusion matrix 'M' is of dimension (num_classes, 2, 2).
* M[i, 0, 0] corresponds to count/rate of true negatives of class i
* M[i, 0, 1] corresponds to count/rate of false positives of class i
* M[i, 1, 0] corresponds to count/rate of false negatives of class i
* M[i, 1, 1] corresponds to count/rate of true positives of class i
- The classes present in M are indexed as 0, ... , num_classes-1 as can be inferred from above.
Args:
num_classes: Number of classes, should be > 1.
output_transform: a callable that is used to transform the
:class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into the
form expected by the metric. This can be useful if, for example, you have a multi-output model and
you want to compute the metric with respect to one of the outputs.
device: specifies which device updates are accumulated on. Setting the metric's
device to be the same as your ``update`` arguments ensures the ``update`` method is non-blocking. By
default, CPU.
normalized: whether to normalize confusion matrix by its sum or not.
Example:
For more information on how metric works with :class:`~ignite.engine.engine.Engine`, visit :ref:`attach-engine`.
.. include:: defaults.rst
:start-after: :orphan:
.. testcode::
metric = MultiLabelConfusionMatrix(num_classes=3)
metric.attach(default_evaluator, "mlcm")
y_true = torch.tensor([
[0, 0, 1],
[0, 0, 0],
[0, 0, 0],
[1, 0, 0],
[0, 1, 1],
])
y_pred = torch.tensor([
[1, 1, 0],
[1, 0, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
])
state = default_evaluator.run([[y_pred, y_true]])
print(state.metrics["mlcm"])
.. testoutput::
tensor([[[0, 4],
[0, 1]],
[[3, 1],
[0, 1]],
[[1, 2],
[2, 0]]])
.. versionadded:: 0.4.5
"""
_state_dict_all_req_keys = ("confusion_matrix", "_num_examples")
def __init__(
self,
num_classes: int,
output_transform: Callable = lambda x: x,
device: Union[str, torch.device] = torch.device("cpu"),
normalized: bool = False,
):
if num_classes <= 1:
raise ValueError("Argument num_classes needs to be > 1")
self.num_classes = num_classes
self._num_examples = 0
self.normalized = normalized
super(MultiLabelConfusionMatrix, self).__init__(output_transform=output_transform, device=device)
@reinit__is_reduced
def reset(self) -> None:
self.confusion_matrix = torch.zeros(self.num_classes, 2, 2, dtype=torch.int64, device=self._device)
self._num_examples = 0
@reinit__is_reduced
def update(self, output: Sequence[torch.Tensor]) -> None:
self._check_input(output)
y_pred, y = output[0].detach(), output[1].detach()
self._num_examples += y.shape[0]
y_reshaped = y.transpose(0, 1).reshape(self.num_classes, -1)
y_pred_reshaped = y_pred.transpose(0, 1).reshape(self.num_classes, -1)
y_total = y_reshaped.sum(dim=1)
y_pred_total = y_pred_reshaped.sum(dim=1)
tp = (y_reshaped * y_pred_reshaped).sum(dim=1)
fp = y_pred_total - tp
fn = y_total - tp
tn = y_reshaped.shape[1] - tp - fp - fn
self.confusion_matrix += torch.stack([tn, fp, fn, tp], dim=1).reshape(-1, 2, 2).to(self._device)
@sync_all_reduce("confusion_matrix", "_num_examples")
def compute(self) -> torch.Tensor:
if self._num_examples == 0:
raise NotComputableError("Confusion matrix must have at least one example before it can be computed.")
if self.normalized:
conf = self.confusion_matrix.to(dtype=torch.float64)
sums = conf.sum(dim=(1, 2))
return conf / sums[:, None, None]
return self.confusion_matrix
def _check_input(self, output: Sequence[torch.Tensor]) -> None:
y_pred, y = output[0].detach(), output[1].detach()
if y_pred.ndimension() < 2:
raise ValueError(
f"y_pred must at least have shape (batch_size, num_classes (currently set to {self.num_classes}), ...)"
)
if y.ndimension() < 2:
raise ValueError(
f"y must at least have shape (batch_size, num_classes (currently set to {self.num_classes}), ...)"
)
if y_pred.shape[0] != y.shape[0]:
raise ValueError(f"y_pred and y have different batch size: {y_pred.shape[0]} vs {y.shape[0]}")
if y_pred.shape[1] != self.num_classes:
raise ValueError(f"y_pred does not have correct number of classes: {y_pred.shape[1]} vs {self.num_classes}")
if y.shape[1] != self.num_classes:
raise ValueError(f"y does not have correct number of classes: {y.shape[1]} vs {self.num_classes}")
if y.shape != y_pred.shape:
raise ValueError("y and y_pred shapes must match.")
valid_types = (torch.uint8, torch.int8, torch.int16, torch.int32, torch.int64)
if y_pred.dtype not in valid_types:
raise ValueError(f"y_pred must be of any type: {valid_types}")
if y.dtype not in valid_types:
raise ValueError(f"y must be of any type: {valid_types}")
if not torch.equal(y_pred, y_pred**2):
raise ValueError("y_pred must be a binary tensor")
if not torch.equal(y, y**2):
raise ValueError("y must be a binary tensor")
|
from typing import Callable, Sequence, Union
import torch
from ignite.exceptions import NotComputableError
from ignite.metrics.metric import Metric, reinit__is_reduced, sync_all_reduce
__all__ = ["TopKCategoricalAccuracy"]
class TopKCategoricalAccuracy(Metric):
"""
Calculates the top-k categorical accuracy.
- ``update`` must receive output of the form ``(y_pred, y)``.
Args:
k: the k in “top-k”.
output_transform: a callable that is used to transform the
:class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into the
form expected by the metric. This can be useful if, for example, you have a multi-output model and
you want to compute the metric with respect to one of the outputs.
By default, metrics require the output as ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y}``.
device: specifies which device updates are accumulated on. Setting the
metric's device to be the same as your ``update`` arguments ensures the ``update`` method is
non-blocking. By default, CPU.
Examples:
To use with ``Engine`` and ``process_function``, simply attach the metric instance to the engine.
The output of the engine's ``process_function`` needs to be in the format of
``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y, ...}``. If not, ``output_tranform`` can be added
to the metric to transform the output into the form expected by the metric.
For more information on how metric works with :class:`~ignite.engine.engine.Engine`, visit :ref:`attach-engine`.
.. include:: defaults.rst
:start-after: :orphan:
.. testcode::
def process_function(engine, batch):
y_pred, y = batch
return y_pred, y
def one_hot_to_binary_output_transform(output):
y_pred, y = output
y = torch.argmax(y, dim=1) # one-hot vector to label index vector
return y_pred, y
engine = Engine(process_function)
metric = TopKCategoricalAccuracy(
k=2, output_transform=one_hot_to_binary_output_transform)
metric.attach(engine, 'top_k_accuracy')
preds = torch.tensor([
[0.7, 0.2, 0.05, 0.05], # 1 is in the top 2
[0.2, 0.3, 0.4, 0.1], # 0 is not in the top 2
[0.4, 0.4, 0.1, 0.1], # 0 is in the top 2
[0.7, 0.05, 0.2, 0.05] # 2 is in the top 2
])
target = torch.tensor([ # targets as one-hot vectors
[0, 1, 0, 0],
[1, 0, 0, 0],
[1, 0, 0, 0],
[0, 0, 1, 0]
])
state = engine.run([[preds, target]])
print(state.metrics['top_k_accuracy'])
.. testoutput::
0.75
"""
_state_dict_all_req_keys = ("_num_correct", "_num_examples")
def __init__(
self,
k: int = 5,
output_transform: Callable = lambda x: x,
device: Union[str, torch.device] = torch.device("cpu"),
) -> None:
super(TopKCategoricalAccuracy, self).__init__(output_transform, device=device)
self._k = k
@reinit__is_reduced
def reset(self) -> None:
self._num_correct = torch.tensor(0, device=self._device)
self._num_examples = 0
@reinit__is_reduced
def update(self, output: Sequence[torch.Tensor]) -> None:
y_pred, y = output[0].detach(), output[1].detach()
sorted_indices = torch.topk(y_pred, self._k, dim=1)[1]
expanded_y = y.view(-1, 1).expand(-1, self._k)
correct = torch.sum(torch.eq(sorted_indices, expanded_y), dim=1)
self._num_correct += torch.sum(correct).to(self._device)
self._num_examples += correct.shape[0]
@sync_all_reduce("_num_correct", "_num_examples")
def compute(self) -> Union[float, torch.Tensor]:
if self._num_examples == 0:
raise NotComputableError(
"TopKCategoricalAccuracy must have at least one example before it can be computed."
)
return self._num_correct.item() / self._num_examples
|
from typing import Sequence, Union
import torch
from ignite.exceptions import NotComputableError
from ignite.metrics.metric import Metric, reinit__is_reduced, sync_all_reduce
__all__ = ["MeanAbsoluteError"]
class MeanAbsoluteError(Metric):
r"""Calculates `the mean absolute error <https://en.wikipedia.org/wiki/Mean_absolute_error>`_.
.. math:: \text{MAE} = \frac{1}{N} \sum_{i=1}^N \lvert y_{i} - x_{i} \rvert
where :math:`y_{i}` is the prediction tensor and :math:`x_{i}` is ground true tensor.
- ``update`` must receive output of the form ``(y_pred, y)``.
Args:
output_transform: a callable that is used to transform the
:class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into the
form expected by the metric. This can be useful if, for example, you have a multi-output model and
you want to compute the metric with respect to one of the outputs.
By default, metrics require the output as ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y}``.
device: specifies which device updates are accumulated on. Setting the
metric's device to be the same as your ``update`` arguments ensures the ``update`` method is
non-blocking. By default, CPU.
Examples:
To use with ``Engine`` and ``process_function``, simply attach the metric instance to the engine.
The output of the engine's ``process_function`` needs to be in the format of
``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y, ...}``. If not, ``output_tranform`` can be added
to the metric to transform the output into the form expected by the metric.
``y_pred`` and ``y`` should have the same shape.
For more information on how metric works with :class:`~ignite.engine.engine.Engine`, visit :ref:`attach-engine`.
.. include:: defaults.rst
:start-after: :orphan:
.. testcode::
metric = MeanAbsoluteError()
metric.attach(default_evaluator, 'mae')
preds = torch.tensor([
[1, 2, 4, 1],
[2, 3, 1, 5],
[1, 3, 5, 1],
[1, 5, 1 ,11]
])
target = preds * 0.75
state = default_evaluator.run([[preds, target]])
print(state.metrics['mae'])
.. testoutput::
2.9375
"""
_state_dict_all_req_keys = ("_sum_of_absolute_errors", "_num_examples")
@reinit__is_reduced
def reset(self) -> None:
self._sum_of_absolute_errors = torch.tensor(0.0, device=self._device)
self._num_examples = 0
@reinit__is_reduced
def update(self, output: Sequence[torch.Tensor]) -> None:
y_pred, y = output[0].detach(), output[1].detach()
absolute_errors = torch.abs(y_pred - y.view_as(y_pred))
self._sum_of_absolute_errors += torch.sum(absolute_errors).to(self._device)
self._num_examples += y.shape[0]
@sync_all_reduce("_sum_of_absolute_errors", "_num_examples")
def compute(self) -> Union[float, torch.Tensor]:
if self._num_examples == 0:
raise NotComputableError("MeanAbsoluteError must have at least one example before it can be computed.")
return self._sum_of_absolute_errors.item() / self._num_examples
|
import warnings
from typing import Callable, Optional, Sequence, Union
import torch
import torch.nn.functional as F
from ignite.exceptions import NotComputableError
from ignite.metrics.metric import Metric, reinit__is_reduced, sync_all_reduce
__all__ = ["SSIM"]
class SSIM(Metric):
"""
Computes Structural Similarity Index Measure
- ``update`` must receive output of the form ``(y_pred, y)``. They have to be of the same type.
Valid :class:`torch.dtype` are the following:
- on CPU: `torch.float32`, `torch.float64`.
- on CUDA: `torch.float16`, `torch.bfloat16`, `torch.float32`, `torch.float64`.
Args:
data_range: Range of the image. Typically, ``1.0`` or ``255``.
kernel_size: Size of the kernel. Default: (11, 11)
sigma: Standard deviation of the gaussian kernel.
Argument is used if ``gaussian=True``. Default: (1.5, 1.5)
k1: Parameter of SSIM. Default: 0.01
k2: Parameter of SSIM. Default: 0.03
gaussian: ``True`` to use gaussian kernel, ``False`` to use uniform kernel
output_transform: A callable that is used to transform the
:class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into the
form expected by the metric.
device: specifies which device updates are accumulated on. Setting the metric's
device to be the same as your ``update`` arguments ensures the ``update`` method is non-blocking. By
default, CPU.
Examples:
To use with ``Engine`` and ``process_function``, simply attach the metric instance to the engine.
The output of the engine's ``process_function`` needs to be in the format of
``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y, ...}``. If not, ``output_tranform`` can be added
to the metric to transform the output into the form expected by the metric.
``y_pred`` and ``y`` can be un-normalized or normalized image tensors. Depending on that, the user might need
to adjust ``data_range``. ``y_pred`` and ``y`` should have the same shape.
For more information on how metric works with :class:`~ignite.engine.engine.Engine`, visit :ref:`attach-engine`.
.. include:: defaults.rst
:start-after: :orphan:
.. testcode::
metric = SSIM(data_range=1.0)
metric.attach(default_evaluator, 'ssim')
preds = torch.rand([4, 3, 16, 16])
target = preds * 0.75
state = default_evaluator.run([[preds, target]])
print(state.metrics['ssim'])
.. testoutput::
0.9218971...
.. versionadded:: 0.4.2
"""
_state_dict_all_req_keys = ("_sum_of_ssim", "_num_examples", "_kernel")
def __init__(
self,
data_range: Union[int, float],
kernel_size: Union[int, Sequence[int]] = (11, 11),
sigma: Union[float, Sequence[float]] = (1.5, 1.5),
k1: float = 0.01,
k2: float = 0.03,
gaussian: bool = True,
output_transform: Callable = lambda x: x,
device: Union[str, torch.device] = torch.device("cpu"),
):
if isinstance(kernel_size, int):
self.kernel_size: Sequence[int] = [kernel_size, kernel_size]
elif isinstance(kernel_size, Sequence):
self.kernel_size = kernel_size
else:
raise ValueError("Argument kernel_size should be either int or a sequence of int.")
if isinstance(sigma, float):
self.sigma: Sequence[float] = [sigma, sigma]
elif isinstance(sigma, Sequence):
self.sigma = sigma
else:
raise ValueError("Argument sigma should be either float or a sequence of float.")
if any(x % 2 == 0 or x <= 0 for x in self.kernel_size):
raise ValueError(f"Expected kernel_size to have odd positive number. Got {kernel_size}.")
if any(y <= 0 for y in self.sigma):
raise ValueError(f"Expected sigma to have positive number. Got {sigma}.")
super(SSIM, self).__init__(output_transform=output_transform, device=device)
self.gaussian = gaussian
self.data_range = data_range
self.c1 = (k1 * data_range) ** 2
self.c2 = (k2 * data_range) ** 2
self.pad_h = (self.kernel_size[0] - 1) // 2
self.pad_w = (self.kernel_size[1] - 1) // 2
self._kernel_2d = self._gaussian_or_uniform_kernel(kernel_size=self.kernel_size, sigma=self.sigma)
self._kernel: Optional[torch.Tensor] = None
@reinit__is_reduced
def reset(self) -> None:
self._sum_of_ssim = torch.tensor(0.0, dtype=torch.float64, device=self._device)
self._num_examples = 0
def _uniform(self, kernel_size: int) -> torch.Tensor:
kernel = torch.zeros(kernel_size)
start_uniform_index = max(kernel_size // 2 - 2, 0)
end_uniform_index = min(kernel_size // 2 + 3, kernel_size)
min_, max_ = -2.5, 2.5
kernel[start_uniform_index:end_uniform_index] = 1 / (max_ - min_)
return kernel.unsqueeze(dim=0) # (1, kernel_size)
def _gaussian(self, kernel_size: int, sigma: float) -> torch.Tensor:
ksize_half = (kernel_size - 1) * 0.5
kernel = torch.linspace(-ksize_half, ksize_half, steps=kernel_size, device=self._device)
gauss = torch.exp(-0.5 * (kernel / sigma).pow(2))
return (gauss / gauss.sum()).unsqueeze(dim=0) # (1, kernel_size)
def _gaussian_or_uniform_kernel(self, kernel_size: Sequence[int], sigma: Sequence[float]) -> torch.Tensor:
if self.gaussian:
kernel_x = self._gaussian(kernel_size[0], sigma[0])
kernel_y = self._gaussian(kernel_size[1], sigma[1])
else:
kernel_x = self._uniform(kernel_size[0])
kernel_y = self._uniform(kernel_size[1])
return torch.matmul(kernel_x.t(), kernel_y) # (kernel_size, 1) * (1, kernel_size)
@reinit__is_reduced
def update(self, output: Sequence[torch.Tensor]) -> None:
y_pred, y = output[0].detach(), output[1].detach()
if y_pred.dtype != y.dtype:
raise TypeError(
f"Expected y_pred and y to have the same data type. Got y_pred: {y_pred.dtype} and y: {y.dtype}."
)
if y_pred.shape != y.shape:
raise ValueError(
f"Expected y_pred and y to have the same shape. Got y_pred: {y_pred.shape} and y: {y.shape}."
)
if len(y_pred.shape) != 4 or len(y.shape) != 4:
raise ValueError(
f"Expected y_pred and y to have BxCxHxW shape. Got y_pred: {y_pred.shape} and y: {y.shape}."
)
# converts potential integer tensor to fp
if not y.is_floating_point():
y = y.float()
if not y_pred.is_floating_point():
y_pred = y_pred.float()
nb_channel = y_pred.size(1)
if self._kernel is None or self._kernel.shape[0] != nb_channel:
self._kernel = self._kernel_2d.expand(nb_channel, 1, -1, -1)
if y_pred.device != self._kernel.device:
if self._kernel.device == torch.device("cpu"):
self._kernel = self._kernel.to(device=y_pred.device)
elif y_pred.device == torch.device("cpu"):
warnings.warn(
"y_pred tensor is on cpu device but previous computation was on another device: "
f"{self._kernel.device}. To avoid having a performance hit, please ensure that all "
"y and y_pred tensors are on the same device.",
)
y_pred = y_pred.to(device=self._kernel.device)
y = y.to(device=self._kernel.device)
y_pred = F.pad(y_pred, [self.pad_w, self.pad_w, self.pad_h, self.pad_h], mode="reflect")
y = F.pad(y, [self.pad_w, self.pad_w, self.pad_h, self.pad_h], mode="reflect")
if y_pred.dtype != self._kernel.dtype:
self._kernel = self._kernel.to(dtype=y_pred.dtype)
input_list = [y_pred, y, y_pred * y_pred, y * y, y_pred * y]
outputs = F.conv2d(torch.cat(input_list), self._kernel, groups=nb_channel)
batch_size = y_pred.size(0)
output_list = [outputs[x * batch_size : (x + 1) * batch_size] for x in range(len(input_list))]
mu_pred_sq = output_list[0].pow(2)
mu_target_sq = output_list[1].pow(2)
mu_pred_target = output_list[0] * output_list[1]
sigma_pred_sq = output_list[2] - mu_pred_sq
sigma_target_sq = output_list[3] - mu_target_sq
sigma_pred_target = output_list[4] - mu_pred_target
a1 = 2 * mu_pred_target + self.c1
a2 = 2 * sigma_pred_target + self.c2
b1 = mu_pred_sq + mu_target_sq + self.c1
b2 = sigma_pred_sq + sigma_target_sq + self.c2
ssim_idx = (a1 * a2) / (b1 * b2)
self._sum_of_ssim += torch.mean(ssim_idx, (1, 2, 3), dtype=torch.float64).sum().to(device=self._device)
self._num_examples += y.shape[0]
@sync_all_reduce("_sum_of_ssim", "_num_examples")
def compute(self) -> float:
if self._num_examples == 0:
raise NotComputableError("SSIM must have at least one example before it can be computed.")
return (self._sum_of_ssim / self._num_examples).item()
|
from typing import Sequence
import torch
from ignite.metrics.metric import reinit__is_reduced
from ignite.metrics.precision import _BasePrecisionRecall
__all__ = ["Recall"]
class Recall(_BasePrecisionRecall):
r"""Calculates recall for binary, multiclass and multilabel data.
.. math:: \text{Recall} = \frac{ TP }{ TP + FN }
where :math:`\text{TP}` is true positives and :math:`\text{FN}` is false negatives.
- ``update`` must receive output of the form ``(y_pred, y)``.
- `y_pred` must be in the following shape (batch_size, num_categories, ...) or (batch_size, ...).
- `y` must be in the following shape (batch_size, ...).
Args:
output_transform: a callable that is used to transform the
:class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into the
form expected by the metric. This can be useful if, for example, you have a multi-output model and
you want to compute the metric with respect to one of the outputs.
average: available options are
False
default option. For multicalss and multilabel inputs, per class and per label
metric is returned respectively.
None
like `False` option except that per class metric is returned for binary data as well.
For compatibility with Scikit-Learn api.
'micro'
Metric is computed counting stats of classes/labels altogether.
.. math::
\text{Micro Recall} = \frac{\sum_{k=1}^C TP_k}{\sum_{k=1}^C TP_k+FN_k}
where :math:`C` is the number of classes/labels (2 in binary case). :math:`k` in
:math:`TP_k` and :math:`FN_k`means that the measures are computed for class/label :math:`k` (in
a one-vs-rest sense in multiclass case).
For binary and multiclass inputs, this is equivalent with accuracy,
so use :class:`~ignite.metrics.accuracy.Accuracy`.
'samples'
for multilabel input, at first, recall is computed on a
per sample basis and then average across samples is returned.
.. math::
\text{Sample-averaged Recall} = \frac{\sum_{n=1}^N \frac{TP_n}{TP_n+FN_n}}{N}
where :math:`N` is the number of samples. :math:`n` in :math:`TP_n` and :math:`FN_n`
means that the measures are computed for sample :math:`n`, across labels.
Incompatible with binary and multiclass inputs.
'weighted'
like macro recall but considers class/label imbalance. For binary and multiclass
input, it computes metric for each class then returns average of them weighted by
support of classes (number of actual samples in each class). For multilabel input,
it computes recall for each label then returns average of them weighted by support
of labels (number of actual positive samples in each label).
.. math::
Recall_k = \frac{TP_k}{TP_k+FN_k}
.. math::
\text{Weighted Recall} = \frac{\sum_{k=1}^C P_k * Recall_k}{N}
where :math:`C` is the number of classes (2 in binary case). :math:`P_k` is the number
of samples belonged to class :math:`k` in binary and multiclass case, and the number of
positive samples belonged to label :math:`k` in multilabel case.
Note that for binary and multiclass data, weighted recall is equivalent
with accuracy, so use :class:`~ignite.metrics.accuracy.Accuracy`.
macro
computes macro recall which is unweighted average of metric computed across
classes or labels.
.. math::
\text{Macro Recall} = \frac{\sum_{k=1}^C Recall_k}{C}
where :math:`C` is the number of classes (2 in binary case).
True
like macro option. For backward compatibility.
is_multilabel: flag to use in multilabel case. By default, value is False.
device: specifies which device updates are accumulated on. Setting the metric's
device to be the same as your ``update`` arguments ensures the ``update`` method is non-blocking. By
default, CPU.
Examples:
For more information on how metric works with :class:`~ignite.engine.engine.Engine`, visit :ref:`attach-engine`.
.. include:: defaults.rst
:start-after: :orphan:
Binary case. In binary and multilabel cases, the elements of
`y` and `y_pred` should have 0 or 1 values.
.. testcode:: 1
metric = Recall()
two_class_metric = Recall(average=None) # Returns recall for both classes
metric.attach(default_evaluator, "recall")
two_class_metric.attach(default_evaluator, "both classes recall")
y_true = torch.tensor([1, 0, 1, 1, 0, 1])
y_pred = torch.tensor([1, 0, 1, 0, 1, 1])
state = default_evaluator.run([[y_pred, y_true]])
print(f"Recall: {state.metrics['recall']}")
print(f"Recall for class 0 and class 1: {state.metrics['both classes recall']}")
.. testoutput:: 1
Recall: 0.75
Recall for class 0 and class 1: tensor([0.5000, 0.7500], dtype=torch.float64)
Multiclass case
.. testcode:: 2
metric = Recall()
macro_metric = Recall(average=True)
metric.attach(default_evaluator, "recall")
macro_metric.attach(default_evaluator, "macro recall")
y_true = torch.tensor([2, 0, 2, 1, 0])
y_pred = torch.tensor([
[0.0266, 0.1719, 0.3055],
[0.6886, 0.3978, 0.8176],
[0.9230, 0.0197, 0.8395],
[0.1785, 0.2670, 0.6084],
[0.8448, 0.7177, 0.7288]
])
state = default_evaluator.run([[y_pred, y_true]])
print(f"Recall: {state.metrics['recall']}")
print(f"Macro Recall: {state.metrics['macro recall']}")
.. testoutput:: 2
Recall: tensor([0.5000, 0.0000, 0.5000], dtype=torch.float64)
Macro Recall: 0.3333333333333333
Multilabel case, the shapes must be (batch_size, num_categories, ...)
.. testcode:: 3
metric = Recall(is_multilabel=True)
micro_metric = Recall(is_multilabel=True, average='micro')
macro_metric = Recall(is_multilabel=True, average=True)
samples_metric = Recall(is_multilabel=True, average='samples')
metric.attach(default_evaluator, "recall")
micro_metric.attach(default_evaluator, "micro recall")
macro_metric.attach(default_evaluator, "macro recall")
samples_metric.attach(default_evaluator, "samples recall")
y_true = torch.tensor([
[0, 0, 1],
[0, 0, 0],
[0, 0, 0],
[1, 0, 0],
[0, 1, 1],
])
y_pred = torch.tensor([
[1, 1, 0],
[1, 0, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
])
state = default_evaluator.run([[y_pred, y_true]])
print(f"Recall: {state.metrics['recall']}")
print(f"Micro Recall: {state.metrics['micro recall']}")
print(f"Macro Recall: {state.metrics['macro recall']}")
print(f"Samples Recall: {state.metrics['samples recall']}")
.. testoutput:: 3
Recall: tensor([1., 1., 0.], dtype=torch.float64)
Micro Recall: 0.5
Macro Recall: 0.6666666666666666
Samples Recall: 0.3
Thresholding of predictions can be done as below:
.. testcode:: 4
def thresholded_output_transform(output):
y_pred, y = output
y_pred = torch.round(y_pred)
return y_pred, y
metric = Recall(output_transform=thresholded_output_transform)
metric.attach(default_evaluator, "recall")
y_true = torch.tensor([1, 0, 1, 1, 0, 1])
y_pred = torch.tensor([0.6, 0.2, 0.9, 0.4, 0.7, 0.65])
state = default_evaluator.run([[y_pred, y_true]])
print(state.metrics['recall'])
.. testoutput:: 4
0.75
.. versionchanged:: 0.4.10
Some new options were added to `average` parameter.
"""
@reinit__is_reduced
def update(self, output: Sequence[torch.Tensor]) -> None:
self._check_shape(output)
self._check_type(output)
_, y, correct = self._prepare_output(output)
if self._average == "samples":
actual_positives = y.sum(dim=1)
true_positives = correct.sum(dim=1)
self._numerator += torch.sum(true_positives / (actual_positives + self.eps))
self._denominator += y.size(0)
elif self._average == "micro":
self._denominator += y.sum()
self._numerator += correct.sum()
else: # _average in [False, 'macro', 'weighted']
self._denominator += y.sum(dim=0)
self._numerator += correct.sum(dim=0)
if self._average == "weighted":
self._weight += y.sum(dim=0)
self._updated = True
|
from typing import Callable, Optional, Union
import torch
from ignite.metrics.metrics_lambda import MetricsLambda
from ignite.metrics.precision import Precision
from ignite.metrics.recall import Recall
__all__ = ["Fbeta"]
def Fbeta(
beta: float,
average: bool = True,
precision: Optional[Precision] = None,
recall: Optional[Recall] = None,
output_transform: Optional[Callable] = None,
device: Union[str, torch.device] = torch.device("cpu"),
) -> MetricsLambda:
r"""Calculates F-beta score.
.. math::
F_\beta = \left( 1 + \beta^2 \right) * \frac{ \text{precision} * \text{recall} }
{ \left( \beta^2 * \text{precision} \right) + \text{recall} }
where :math:`\beta` is a positive real factor.
- ``update`` must receive output of the form ``(y_pred, y)``.
- `y_pred` must be in the following shape (batch_size, num_categories, ...) or (batch_size, ...).
- `y` must be in the following shape (batch_size, ...).
Args:
beta: weight of precision in harmonic mean
average: if True, F-beta score is computed as the unweighted average (across all classes
in multiclass case), otherwise, returns a tensor with F-beta score for each class in multiclass case.
precision: precision object metric with `average=False` to compute F-beta score
recall: recall object metric with `average=False` to compute F-beta score
output_transform: a callable that is used to transform the
:class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into the
form expected by the metric. It is used only if precision or recall are not provided.
device: specifies which device updates are accumulated on. Setting the metric's
device to be the same as your ``update`` arguments ensures the ``update`` method is non-blocking. By
default, CPU.
Returns:
MetricsLambda, F-beta metric
Examples:
For more information on how metric works with :class:`~ignite.engine.engine.Engine`, visit :ref:`attach-engine`.
.. include:: defaults.rst
:start-after: :orphan:
Binary case
.. testcode:: 1
P = Precision(average=False)
R = Recall(average=False)
metric = Fbeta(beta=1.0, precision=P, recall=R)
metric.attach(default_evaluator, "f-beta")
y_true = torch.tensor([1, 0, 1, 1, 0, 1])
y_pred = torch.tensor([1, 0, 1, 0, 1, 1])
state = default_evaluator.run([[y_pred, y_true]])
print(state.metrics["f-beta"])
.. testoutput:: 1
0.7499...
Multiclass case
.. testcode:: 2
P = Precision(average=False)
R = Recall(average=False)
metric = Fbeta(beta=1.0, precision=P, recall=R)
metric.attach(default_evaluator, "f-beta")
y_true = torch.tensor([2, 0, 2, 1, 0, 1])
y_pred = torch.tensor([
[0.0266, 0.1719, 0.3055],
[0.6886, 0.3978, 0.8176],
[0.9230, 0.0197, 0.8395],
[0.1785, 0.2670, 0.6084],
[0.8448, 0.7177, 0.7288],
[0.7748, 0.9542, 0.8573],
])
state = default_evaluator.run([[y_pred, y_true]])
print(state.metrics["f-beta"])
.. testoutput:: 2
0.5222...
F-beta can be computed for each class as done below:
.. testcode:: 3
P = Precision(average=False)
R = Recall(average=False)
metric = Fbeta(beta=1.0, average=False, precision=P, recall=R)
metric.attach(default_evaluator, "f-beta")
y_true = torch.tensor([2, 0, 2, 1, 0, 1])
y_pred = torch.tensor([
[0.0266, 0.1719, 0.3055],
[0.6886, 0.3978, 0.8176],
[0.9230, 0.0197, 0.8395],
[0.1785, 0.2670, 0.6084],
[0.8448, 0.7177, 0.7288],
[0.7748, 0.9542, 0.8573],
])
state = default_evaluator.run([[y_pred, y_true]])
print(state.metrics["f-beta"])
.. testoutput:: 3
tensor([0.5000, 0.6667, 0.4000], dtype=torch.float64)
The elements of `y` and `y_pred` should have 0 or 1 values. Thresholding of predictions can
be done as below:
.. testcode:: 4
def thresholded_output_transform(output):
y_pred, y = output
y_pred = torch.round(y_pred)
return y_pred, y
P = Precision(average=False, output_transform=thresholded_output_transform)
R = Recall(average=False, output_transform=thresholded_output_transform)
metric = Fbeta(beta=1.0, precision=P, recall=R)
metric.attach(default_evaluator, "f-beta")
y_true = torch.tensor([1, 0, 1, 1, 0, 1])
y_pred = torch.tensor([0.6, 0.2, 0.9, 0.4, 0.7, 0.65])
state = default_evaluator.run([[y_pred, y_true]])
print(state.metrics["f-beta"])
.. testoutput:: 4
0.7499...
"""
if not (beta > 0):
raise ValueError(f"Beta should be a positive integer, but given {beta}")
if precision is not None and output_transform is not None:
raise ValueError("If precision argument is provided, output_transform should be None")
if recall is not None and output_transform is not None:
raise ValueError("If recall argument is provided, output_transform should be None")
if precision is None:
precision = Precision(
output_transform=(lambda x: x) if output_transform is None else output_transform, # type: ignore[arg-type]
average=False,
device=device,
)
elif precision._average:
raise ValueError("Input precision metric should have average=False")
if recall is None:
recall = Recall(
output_transform=(lambda x: x) if output_transform is None else output_transform, # type: ignore[arg-type]
average=False,
device=device,
)
elif recall._average:
raise ValueError("Input recall metric should have average=False")
fbeta = (1.0 + beta**2) * precision * recall / (beta**2 * precision + recall + 1e-15)
if average:
fbeta = fbeta.mean().item()
return fbeta
|
import math
from typing import Union
import torch
from ignite.metrics.mean_squared_error import MeanSquaredError
__all__ = ["RootMeanSquaredError"]
class RootMeanSquaredError(MeanSquaredError):
r"""Calculates the `root mean squared error <https://en.wikipedia.org/wiki/Root-mean-square_deviation>`_.
.. math:: \text{RMSE} = \sqrt{ \frac{1}{N} \sum_{i=1}^N \|y_{i} - x_{i} \|^2 }
where :math:`y_{i}` is the prediction tensor and :math:`x_{i}` is ground true tensor.
- ``update`` must receive output of the form ``(y_pred, y)``.
Args:
output_transform: a callable that is used to transform the
:class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into the
form expected by the metric. This can be useful if, for example, you have a multi-output model and
you want to compute the metric with respect to one of the outputs.
By default, metrics require the output as ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y}``.
device: specifies which device updates are accumulated on. Setting the
metric's device to be the same as your ``update`` arguments ensures the ``update`` method is
non-blocking. By default, CPU.
Examples:
To use with ``Engine`` and ``process_function``, simply attach the metric instance to the engine.
The output of the engine's ``process_function`` needs to be in the format of
``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y, ...}``. If not, ``output_tranform`` can be added
to the metric to transform the output into the form expected by the metric.
``y_pred`` and ``y`` should have the same shape.
For more information on how metric works with :class:`~ignite.engine.engine.Engine`, visit :ref:`attach-engine`.
.. include:: defaults.rst
:start-after: :orphan:
.. testcode::
metric = RootMeanSquaredError()
metric.attach(default_evaluator, 'rmse')
preds = torch.tensor([
[1, 2, 4, 1],
[2, 3, 1, 5],
[1, 3, 5, 1],
[1, 5, 1 ,11]
])
target = preds * 0.75
state = default_evaluator.run([[preds, target]])
print(state.metrics['rmse'])
.. testoutput::
1.956559480312316
"""
def compute(self) -> Union[torch.Tensor, float]:
mse = super(RootMeanSquaredError, self).compute()
return math.sqrt(mse)
|
from ignite.metrics.accumulation import Average, GeometricAverage, VariableAccumulation
from ignite.metrics.accuracy import Accuracy
from ignite.metrics.classification_report import ClassificationReport
from ignite.metrics.confusion_matrix import ConfusionMatrix, DiceCoefficient, IoU, JaccardIndex, mIoU
from ignite.metrics.epoch_metric import EpochMetric
from ignite.metrics.fbeta import Fbeta
from ignite.metrics.frequency import Frequency
from ignite.metrics.gan.fid import FID
from ignite.metrics.gan.inception_score import InceptionScore
from ignite.metrics.loss import Loss
from ignite.metrics.mean_absolute_error import MeanAbsoluteError
from ignite.metrics.mean_pairwise_distance import MeanPairwiseDistance
from ignite.metrics.mean_squared_error import MeanSquaredError
from ignite.metrics.metric import BatchFiltered, BatchWise, EpochWise, Metric, MetricUsage
from ignite.metrics.metrics_lambda import MetricsLambda
from ignite.metrics.multilabel_confusion_matrix import MultiLabelConfusionMatrix
from ignite.metrics.nlp.bleu import Bleu
from ignite.metrics.nlp.rouge import Rouge, RougeL, RougeN
from ignite.metrics.precision import Precision
from ignite.metrics.psnr import PSNR
from ignite.metrics.recall import Recall
from ignite.metrics.root_mean_squared_error import RootMeanSquaredError
from ignite.metrics.running_average import RunningAverage
from ignite.metrics.ssim import SSIM
from ignite.metrics.top_k_categorical_accuracy import TopKCategoricalAccuracy
__all__ = [
"Metric",
"Accuracy",
"Loss",
"MetricsLambda",
"MeanAbsoluteError",
"MeanPairwiseDistance",
"MeanSquaredError",
"ConfusionMatrix",
"ClassificationReport",
"TopKCategoricalAccuracy",
"Average",
"DiceCoefficient",
"EpochMetric",
"Fbeta",
"FID",
"GeometricAverage",
"IoU",
"InceptionScore",
"mIoU",
"JaccardIndex",
"MultiLabelConfusionMatrix",
"Precision",
"PSNR",
"Recall",
"RootMeanSquaredError",
"RunningAverage",
"VariableAccumulation",
"Frequency",
"SSIM",
"Bleu",
"Rouge",
"RougeN",
"RougeL",
]
|
import numbers
from typing import Callable, Tuple, Union
import torch
from ignite.exceptions import NotComputableError
from ignite.metrics.metric import Metric, reinit__is_reduced, sync_all_reduce
__all__ = ["VariableAccumulation", "GeometricAverage", "Average"]
class VariableAccumulation(Metric):
"""Single variable accumulator helper to compute (arithmetic, geometric, harmonic) average of a single variable.
- ``update`` must receive output of the form `x`.
- `x` can be a number or `torch.Tensor`.
Note:
The class stores input into two public variables: `accumulator` and `num_examples`.
Number of samples is updated following the rule:
- `+1` if input is a number
- `+1` if input is a 1D `torch.Tensor`
- `+batch_size` if input is a ND `torch.Tensor`. Batch size is the first dimension (`shape[0]`).
Args:
op: a callable to update accumulator. Method's signature is `(accumulator, output)`.
For example, to compute arithmetic mean value, `op = lambda a, x: a + x`.
output_transform: a callable that is used to transform the
:class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into the
form expected by the metric. This can be useful if, for example, you have a multi-output model and
you want to compute the metric with respect to one of the outputs.
device: specifies which device updates are accumulated on. Setting the metric's
device to be the same as your ``update`` arguments ensures the ``update`` method is non-blocking. By
default, CPU.
"""
required_output_keys = None
_state_dict_all_req_keys = ("accumulator", "num_examples")
def __init__(
self,
op: Callable,
output_transform: Callable = lambda x: x,
device: Union[str, torch.device] = torch.device("cpu"),
):
if not callable(op):
raise TypeError(f"Argument op should be a callable, but given {type(op)}")
self._op = op
super(VariableAccumulation, self).__init__(output_transform=output_transform, device=device)
@reinit__is_reduced
def reset(self) -> None:
self.accumulator = torch.tensor(0.0, dtype=torch.float64, device=self._device)
self.num_examples = 0
def _check_output_type(self, output: Union[float, torch.Tensor]) -> None:
if not isinstance(output, (numbers.Number, torch.Tensor)):
raise TypeError(f"Output should be a number or torch.Tensor, but given {type(output)}")
@reinit__is_reduced
def update(self, output: Union[float, torch.Tensor]) -> None:
self._check_output_type(output)
if isinstance(output, torch.Tensor):
output = output.detach()
if not (output.device == self._device and output.dtype == self.accumulator.dtype):
output = output.to(self.accumulator)
self.accumulator = self._op(self.accumulator, output)
if isinstance(output, torch.Tensor):
self.num_examples += output.shape[0] if len(output.shape) > 1 else 1
else:
self.num_examples += 1
@sync_all_reduce("accumulator", "num_examples")
def compute(self) -> Tuple[torch.Tensor, int]:
return self.accumulator, self.num_examples
class Average(VariableAccumulation):
"""Helper class to compute arithmetic average of a single variable.
- ``update`` must receive output of the form `x`.
- `x` can be a number or `torch.Tensor`.
Note:
Number of samples is updated following the rule:
- `+1` if input is a number
- `+1` if input is a 1D `torch.Tensor`
- `+batch_size` if input is an ND `torch.Tensor`. Batch size is the first dimension (`shape[0]`).
For input `x` being an ND `torch.Tensor` with N > 1, the first dimension is seen as the number of samples and
is summed up and added to the accumulator: `accumulator += x.sum(dim=0)`
``output_tranform`` can be added to the metric to transform the output into the form expected by the metric.
Args:
output_transform: a callable that is used to transform the
:class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into the
form expected by the metric. This can be useful if, for example, you have a multi-output model and
you want to compute the metric with respect to one of the outputs.
device: specifies which device updates are accumulated on. Setting the metric's
device to be the same as your ``update`` arguments ensures the ``update`` method is non-blocking. By
default, CPU.
Examples:
For more information on how metric works with :class:`~ignite.engine.engine.Engine`, visit :ref:`attach-engine`.
.. include:: defaults.rst
:start-after: :orphan:
.. testcode::
metric = Average()
metric.attach(default_evaluator, 'avg')
# Case 1. input is er
data = torch.tensor([0, 1, 2, 3, 4])
state = default_evaluator.run(data)
print(state.metrics['avg'])
.. testoutput::
2.0
.. testcode::
metric = Average()
metric.attach(default_evaluator, 'avg')
# Case 2. input is a 1D torch.Tensor
data = torch.tensor([
[0, 0, 0],
[1, 1, 1],
[2, 2, 2],
[3, 3, 3]
])
state = default_evaluator.run(data)
print(state.metrics['avg'])
.. testoutput::
tensor([1.5000, 1.5000, 1.5000], dtype=torch.float64)
.. testcode::
metric = Average()
metric.attach(default_evaluator, 'avg')
# Case 3. input is a ND torch.Tensor
data = [
torch.tensor([[0, 0, 0], [1, 1, 1]]),
torch.tensor([[2, 2, 2], [3, 3, 3]])
]
state = default_evaluator.run(data)
print(state.metrics['avg'])
.. testoutput::
tensor([1.5000, 1.5000, 1.5000], dtype=torch.float64)
"""
def __init__(
self, output_transform: Callable = lambda x: x, device: Union[str, torch.device] = torch.device("cpu")
):
def _mean_op(a: Union[float, torch.Tensor], x: Union[float, torch.Tensor]) -> Union[float, torch.Tensor]:
if isinstance(x, torch.Tensor) and x.ndim > 1:
x = x.sum(dim=0)
return a + x
super(Average, self).__init__(op=_mean_op, output_transform=output_transform, device=device)
@sync_all_reduce("accumulator", "num_examples")
def compute(self) -> Union[float, torch.Tensor]:
if self.num_examples < 1:
raise NotComputableError(
f"{self.__class__.__name__} must have at least one example before it can be computed."
)
return self.accumulator / self.num_examples
class GeometricAverage(VariableAccumulation):
"""Helper class to compute geometric average of a single variable.
- ``update`` must receive output of the form `x`.
- `x` can be a positive number or a positive `torch.Tensor`, such that ``torch.log(x)`` is not `nan`.
Args:
output_transform: a callable that is used to transform the
:class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into the
form expected by the metric. This can be useful if, for example, you have a multi-output model and
you want to compute the metric with respect to one of the outputs.
device: specifies which device updates are accumulated on. Setting the metric's
device to be the same as your ``update`` arguments ensures the ``update`` method is non-blocking. By
default, CPU.
Note:
Number of samples is updated following the rule:
- `+1` if input is a number
- `+1` if input is a 1D `torch.Tensor`
- `+batch_size` if input is a ND `torch.Tensor`. Batch size is the first dimension (`shape[0]`).
For input `x` being an ND `torch.Tensor` with N > 1, the first dimension is seen as the number of samples and
is aggregated and added to the accumulator: `accumulator *= prod(x, dim=0)`
``output_tranform`` can be added to the metric to transform the output into the form expected by the metric.
Examples:
For more information on how metric works with :class:`~ignite.engine.engine.Engine`, visit :ref:`attach-engine`.
.. include:: defaults.rst
:start-after: :orphan:
.. testcode::
metric = GeometricAverage()
metric.attach(default_evaluator, 'avg')
# Case 1. input is er
data = torch.tensor([1, 2, 3])
state = default_evaluator.run(data)
print(state.metrics['avg'])
.. testoutput::
1.8171...
.. testcode::
metric = GeometricAverage()
metric.attach(default_evaluator, 'avg')
# Case 2. input is a 1D torch.Tensor
data = torch.tensor([
[1, 1, 1],
[2, 2, 2],
[3, 3, 3],
[4, 4, 4],
])
state = default_evaluator.run(data)
print(state.metrics['avg'])
.. testoutput::
tensor([2.2134, 2.2134, 2.2134], dtype=torch.float64)
.. testcode::
metric = GeometricAverage()
metric.attach(default_evaluator, 'avg')
# Case 3. input is a ND torch.Tensor
data = [
torch.tensor([[1, 1, 1], [2, 2, 2]]),
torch.tensor([[3, 3, 3], [4, 4, 4]])
]
state = default_evaluator.run(data)
print(state.metrics['avg'])
.. testoutput::
tensor([2.2134, 2.2134, 2.2134], dtype=torch.float64)
"""
def __init__(
self, output_transform: Callable = lambda x: x, device: Union[str, torch.device] = torch.device("cpu")
):
def _geom_op(a: torch.Tensor, x: Union[float, torch.Tensor]) -> torch.Tensor:
if not isinstance(x, torch.Tensor):
x = torch.tensor(x)
x = torch.log(x)
if x.ndim > 1:
x = x.sum(dim=0)
return a + x
super(GeometricAverage, self).__init__(op=_geom_op, output_transform=output_transform, device=device)
@sync_all_reduce("accumulator", "num_examples")
def compute(self) -> Union[float, torch.Tensor]:
if self.num_examples < 1:
raise NotComputableError(
f"{self.__class__.__name__} must have at least one example before it can be computed."
)
tensor = torch.exp(self.accumulator / self.num_examples)
if tensor.numel() == 1:
return tensor.item()
return tensor
|
from abc import ABCMeta, abstractmethod
from collections import OrderedDict
from collections.abc import Mapping
from functools import wraps
from numbers import Number
from typing import Any, Callable, cast, Dict, List, Optional, Sequence, Tuple, TYPE_CHECKING, Union
import torch
import ignite.distributed as idist
from ignite.base.mixins import Serializable
from ignite.engine import CallableEventWithFilter, Engine, Events
if TYPE_CHECKING:
from ignite.metrics.metrics_lambda import MetricsLambda
__all__ = [
"Metric",
"MetricUsage",
"EpochWise",
"BatchWise",
"BatchFiltered",
"RunningEpochWise",
"RunningBatchWise",
"SingleEpochRunningBatchWise",
]
class MetricUsage:
"""
Base class for all usages of metrics.
A usage of metric defines the events when a metric starts to compute, updates and completes.
Valid events are from :class:`~ignite.engine.events.Events`.
Args:
started: event when the metric starts to compute. This event will be associated to
:meth:`~ignite.metrics.metric.Metric.started`.
completed: event when the metric completes. This event will be associated to
:meth:`~ignite.metrics.metric.Metric.completed`.
iteration_completed: event when the metric updates. This event will be associated to
:meth:`~ignite.metrics.metric.Metric.iteration_completed`.
"""
usage_name: str
def __init__(self, started: Events, completed: Events, iteration_completed: CallableEventWithFilter) -> None:
self.__started = started
self.__completed = completed
self.__iteration_completed = iteration_completed
@property
def STARTED(self) -> Events:
return self.__started
@property
def COMPLETED(self) -> Events:
return self.__completed
@property
def ITERATION_COMPLETED(self) -> CallableEventWithFilter:
return self.__iteration_completed
class EpochWise(MetricUsage):
"""
Epoch-wise usage of Metrics. It's the default and most common usage of metrics.
Metric's methods are triggered on the following engine events:
- :meth:`~ignite.metrics.metric.Metric.started` on every ``EPOCH_STARTED``
(See :class:`~ignite.engine.events.Events`).
- :meth:`~ignite.metrics.metric.Metric.iteration_completed` on every ``ITERATION_COMPLETED``.
- :meth:`~ignite.metrics.metric.Metric.completed` on every ``EPOCH_COMPLETED``.
Attributes:
usage_name: usage name string
"""
usage_name: str = "epoch_wise"
def __init__(self) -> None:
super(EpochWise, self).__init__(
started=Events.EPOCH_STARTED,
completed=Events.EPOCH_COMPLETED,
iteration_completed=Events.ITERATION_COMPLETED,
)
class RunningEpochWise(EpochWise):
"""
Running epoch-wise usage of Metrics. It's the running version of the :class:`~.metrics.metric.EpochWise` metric
usage. A metric with such a usage most likely accompanies an :class:`~.metrics.metric.EpochWise` one to compute
a running measure of it e.g. running average.
Metric's methods are triggered on the following engine events:
- :meth:`~ignite.metrics.metric.Metric.started` on every ``STARTED``
(See :class:`~ignite.engine.events.Events`).
- :meth:`~ignite.metrics.metric.Metric.iteration_completed` on every ``EPOCH_COMPLETED``.
- :meth:`~ignite.metrics.metric.Metric.completed` on every ``EPOCH_COMPLETED``.
Attributes:
usage_name: usage name string
"""
usage_name: str = "running_epoch_wise"
def __init__(self) -> None:
super(EpochWise, self).__init__(
started=Events.STARTED,
completed=Events.EPOCH_COMPLETED,
iteration_completed=Events.EPOCH_COMPLETED,
)
class BatchWise(MetricUsage):
"""
Batch-wise usage of Metrics.
Metric's methods are triggered on the following engine events:
- :meth:`~ignite.metrics.metric.Metric.started` on every ``ITERATION_STARTED``
(See :class:`~ignite.engine.events.Events`).
- :meth:`~ignite.metrics.metric.Metric.iteration_completed` on every ``ITERATION_COMPLETED``.
- :meth:`~ignite.metrics.metric.Metric.completed` on every ``ITERATION_COMPLETED``.
Attributes:
usage_name: usage name string
"""
usage_name: str = "batch_wise"
def __init__(self) -> None:
super(BatchWise, self).__init__(
started=Events.ITERATION_STARTED,
completed=Events.ITERATION_COMPLETED,
iteration_completed=Events.ITERATION_COMPLETED,
)
class RunningBatchWise(BatchWise):
"""
Running batch-wise usage of Metrics. It's the running version of the :class:`~.metrics.metric.EpochWise` metric
usage. A metric with such a usage could for example accompany a :class:`~.metrics.metric.BatchWise` one to compute
a running measure of it e.g. running average.
Metric's methods are triggered on the following engine events:
- :meth:`~ignite.metrics.metric.Metric.started` on every ``STARTED``
(See :class:`~ignite.engine.events.Events`).
- :meth:`~ignite.metrics.metric.Metric.iteration_completed` on every ``ITERATION_COMPLETED``.
- :meth:`~ignite.metrics.metric.Metric.completed` on every ``ITERATION_COMPLETED``.
Attributes:
usage_name: usage name string
"""
usage_name: str = "running_batch_wise"
def __init__(self) -> None:
super(BatchWise, self).__init__(
started=Events.STARTED,
completed=Events.ITERATION_COMPLETED,
iteration_completed=Events.ITERATION_COMPLETED,
)
class SingleEpochRunningBatchWise(BatchWise):
"""
Running batch-wise usage of Metrics in a single epoch. It's like :class:`~.metrics.metric.RunningBatchWise` metric
usage with the difference that is used during a single epoch.
Metric's methods are triggered on the following engine events:
- :meth:`~ignite.metrics.metric.Metric.started` on every ``EPOCH_STARTED``
(See :class:`~ignite.engine.events.Events`).
- :meth:`~ignite.metrics.metric.Metric.iteration_completed` on every ``ITERATION_COMPLETED``.
- :meth:`~ignite.metrics.metric.Metric.completed` on every ``ITERATION_COMPLETED``.
Attributes:
usage_name: usage name string
"""
usage_name: str = "single_epoch_running_batch_wise"
def __init__(self) -> None:
super(BatchWise, self).__init__(
started=Events.EPOCH_STARTED,
completed=Events.ITERATION_COMPLETED,
iteration_completed=Events.ITERATION_COMPLETED,
)
class BatchFiltered(MetricUsage):
"""
Batch filtered usage of Metrics. This usage is similar to epoch-wise but update event is filtered.
Metric's methods are triggered on the following engine events:
- :meth:`~ignite.metrics.metric.Metric.started` on every ``EPOCH_STARTED``
(See :class:`~ignite.engine.events.Events`).
- :meth:`~ignite.metrics.metric.Metric.iteration_completed` on filtered ``ITERATION_COMPLETED``.
- :meth:`~ignite.metrics.metric.Metric.completed` on every ``EPOCH_COMPLETED``.
Args:
args: Positional arguments to setup :attr:`~ignite.engine.events.Events.ITERATION_COMPLETED`
kwargs: Keyword arguments to setup :attr:`~ignite.engine.events.Events.ITERATION_COMPLETED`
handled by :meth:`~ignite.metrics.metric.Metric.iteration_completed`.
"""
def __init__(self, *args: Any, **kwargs: Any) -> None:
super(BatchFiltered, self).__init__(
started=Events.EPOCH_STARTED,
completed=Events.EPOCH_COMPLETED,
iteration_completed=Events.ITERATION_COMPLETED(*args, **kwargs),
)
class Metric(Serializable, metaclass=ABCMeta):
"""
Base class for all Metrics.
Args:
output_transform: a callable that is used to transform the
:class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into the
form expected by the metric. This can be useful if, for example, you have a multi-output model and
you want to compute the metric with respect to one of the outputs.
By default, metrics require the output as ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y}``.
device: specifies which device updates are accumulated on. Setting the
metric's device to be the same as your ``update`` arguments ensures the ``update`` method is
non-blocking. By default, CPU.
Attributes:
required_output_keys: dictionary defines required keys to be found in ``engine.state.output`` if the
latter is a dictionary. By default, ``("y_pred", "y")``. This is useful with custom metrics that can require
other arguments than predictions ``y_pred`` and targets ``y``. See an example below.
Examples:
Let's implement a custom metric that requires ``y_pred``, ``y`` and ``x`` as input for ``update`` function.
In the example below we show how to setup standard metric like Accuracy and the custom metric using by an
``evaluator`` created with :meth:`~ignite.engine.create_supervised_evaluator` method.
For more information on how metric works with :class:`~ignite.engine.engine.Engine`, visit :ref:`attach-engine`.
.. code-block:: python
# https://discuss.pytorch.org/t/how-access-inputs-in-custom-ignite-metric/91221/5
import torch
import torch.nn as nn
from ignite.metrics import Metric, Accuracy
from ignite.engine import create_supervised_evaluator
class CustomMetric(Metric):
required_output_keys = ("y_pred", "y", "x")
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def update(self, output):
y_pred, y, x = output
# ...
def reset(self):
# ...
pass
def compute(self):
# ...
pass
model = ...
metrics = {
"Accuracy": Accuracy(),
"CustomMetric": CustomMetric()
}
evaluator = create_supervised_evaluator(
model,
metrics=metrics,
output_transform=lambda x, y, y_pred: {"x": x, "y": y, "y_pred": y_pred}
)
res = evaluator.run(data)
.. versionchanged:: 0.4.2
``required_output_keys`` became public attribute.
"""
# public class attribute
required_output_keys: Optional[Tuple] = ("y_pred", "y")
# for backward compatibility
_required_output_keys = required_output_keys
def __init__(
self, output_transform: Callable = lambda x: x, device: Union[str, torch.device] = torch.device("cpu")
):
self._output_transform = output_transform
# Some metrics have a large performance regression when run on XLA devices, so for now, we disallow it.
if torch.device(device).type == "xla":
raise ValueError("Cannot create metric on an XLA device. Use device='cpu' instead.")
self._device = torch.device(device)
self.reset()
@abstractmethod
def reset(self) -> None:
"""
Resets the metric to its initial state.
By default, this is called at the start of each epoch.
"""
pass
@abstractmethod
def update(self, output: Any) -> None:
"""
Updates the metric's state using the passed batch output.
By default, this is called once for each batch.
Args:
output: the is the output from the engine's process function.
"""
pass
@abstractmethod
def compute(self) -> Any:
"""
Computes the metric based on its accumulated state.
By default, this is called at the end of each epoch.
Returns:
Any: | the actual quantity of interest. However, if a :class:`~collections.abc.Mapping` is returned,
it will be (shallow) flattened into `engine.state.metrics` when
:func:`~ignite.metrics.metric.Metric.completed` is called.
Raises:
NotComputableError: raised when the metric cannot be computed.
"""
pass
def started(self, engine: Engine) -> None:
"""Helper method to start data gathering for metric's computation. It is automatically attached to the
`engine` with :meth:`~ignite.metrics.metric.Metric.attach`.
Args:
engine: the engine to which the metric must be attached
"""
self.reset()
@torch.no_grad()
def iteration_completed(self, engine: Engine) -> None:
"""Helper method to update metric's computation. It is automatically attached to the
`engine` with :meth:`~ignite.metrics.metric.Metric.attach`.
Args:
engine: the engine to which the metric must be attached
Note:
``engine.state.output`` is used to compute metric values.
The majority of implemented metrics accept the following formats for ``engine.state.output``:
``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y}``. ``y_pred`` and ``y`` can be torch tensors or
list of tensors/numbers if applicable.
.. versionchanged:: 0.4.5
``y_pred`` and ``y`` can be torch tensors or list of tensors/numbers
"""
output = self._output_transform(engine.state.output)
if isinstance(output, Mapping):
if self.required_output_keys is None:
raise TypeError(
f"Transformed engine output for {self.__class__.__name__} metric should be a tuple/list, "
f"but given {type(output)}"
)
if not all([k in output for k in self.required_output_keys]):
raise ValueError(
"When transformed engine's output is a mapping, "
f"it should contain {self.required_output_keys} keys, but given {list(output.keys())}"
)
output = tuple(output[k] for k in self.required_output_keys)
if isinstance(output, Sequence) and all([_is_list_of_tensors_or_numbers(o) for o in output]):
if not (len(output) == 2 and len(output[0]) == len(output[1])):
raise ValueError(
f"Output should have 2 items of the same length, "
f"got {len(output)} and {len(output[0])}, {len(output[1])}"
)
for o1, o2 in zip(output[0], output[1]):
# o1 and o2 are list of tensors or numbers
tensor_o1 = _to_batched_tensor(o1)
tensor_o2 = _to_batched_tensor(o2, device=tensor_o1.device)
self.update((tensor_o1, tensor_o2))
else:
self.update(output)
def completed(self, engine: Engine, name: str) -> None:
"""Helper method to compute metric's value and put into the engine. It is automatically attached to the
`engine` with :meth:`~ignite.metrics.metric.Metric.attach`. If metrics' value is torch tensor, it is
explicitly sent to CPU device.
Args:
engine: the engine to which the metric must be attached
name: the name of the metric used as key in dict `engine.state.metrics`
.. versionchanged:: 0.4.3
Added dict in metrics results.
.. versionchanged:: 0.4.5
metric's value is put on CPU if torch tensor.
"""
result = self.compute()
if isinstance(result, Mapping):
if name in result.keys():
raise ValueError(f"Argument name '{name}' is conflicting with mapping keys: {list(result.keys())}")
for key, value in result.items():
engine.state.metrics[key] = value
engine.state.metrics[name] = result
else:
if isinstance(result, torch.Tensor):
if len(result.size()) == 0:
result = result.item()
elif "cpu" not in result.device.type:
result = result.cpu()
engine.state.metrics[name] = result
def _check_usage(self, usage: Union[str, MetricUsage]) -> MetricUsage:
if isinstance(usage, str):
usages = [EpochWise, RunningEpochWise, BatchWise, RunningBatchWise, SingleEpochRunningBatchWise]
for usage_cls in usages:
if usage == usage_cls.usage_name:
usage = usage_cls()
break
if not isinstance(usage, MetricUsage):
raise ValueError(
"Argument usage should be '(Running)EpochWise.usage_name' or "
f"'((SingleEpoch)Running)BatchWise.usage_name', got {usage}"
)
if not isinstance(usage, MetricUsage):
raise TypeError(f"Unhandled usage type {type(usage)}")
return usage
def attach(self, engine: Engine, name: str, usage: Union[str, MetricUsage] = EpochWise()) -> None:
"""
Attaches current metric to provided engine. On the end of engine's run, `engine.state.metrics` dictionary will
contain computed metric's value under provided name.
Args:
engine: the engine to which the metric must be attached
name: the name of the metric to attach
usage: the usage of the metric. Valid string values should be
:attr:`ignite.metrics.metric.EpochWise.usage_name` (default) or
:attr:`ignite.metrics.metric.BatchWise.usage_name`.
Examples:
.. code-block:: python
metric = ...
metric.attach(engine, "mymetric")
assert "mymetric" in engine.run(data).metrics
assert metric.is_attached(engine)
Example with usage:
.. code-block:: python
metric = ...
metric.attach(engine, "mymetric", usage=BatchWise.usage_name)
assert "mymetric" in engine.run(data).metrics
assert metric.is_attached(engine, usage=BatchWise.usage_name)
"""
usage = self._check_usage(usage)
if not engine.has_event_handler(self.started, usage.STARTED):
engine.add_event_handler(usage.STARTED, self.started)
if not engine.has_event_handler(self.iteration_completed, usage.ITERATION_COMPLETED):
engine.add_event_handler(usage.ITERATION_COMPLETED, self.iteration_completed)
engine.add_event_handler(usage.COMPLETED, self.completed, name)
def detach(self, engine: Engine, usage: Union[str, MetricUsage] = EpochWise()) -> None:
"""
Detaches current metric from the engine and no metric's computation is done during the run.
This method in conjunction with :meth:`~ignite.metrics.metric.Metric.attach` can be useful if several
metrics need to be computed with different periods. For example, one metric is computed every training epoch
and another metric (e.g. more expensive one) is done every n-th training epoch.
Args:
engine: the engine from which the metric must be detached
usage: the usage of the metric. Valid string values should be
'epoch_wise' (default) or 'batch_wise'.
Examples:
.. code-block:: python
metric = ...
engine = ...
metric.detach(engine)
assert "mymetric" not in engine.run(data).metrics
assert not metric.is_attached(engine)
Example with usage:
.. code-block:: python
metric = ...
engine = ...
metric.detach(engine, usage="batch_wise")
assert "mymetric" not in engine.run(data).metrics
assert not metric.is_attached(engine, usage="batch_wise")
"""
usage = self._check_usage(usage)
if engine.has_event_handler(self.completed, usage.COMPLETED):
engine.remove_event_handler(self.completed, usage.COMPLETED)
if engine.has_event_handler(self.started, usage.STARTED):
engine.remove_event_handler(self.started, usage.STARTED)
if engine.has_event_handler(self.iteration_completed, usage.ITERATION_COMPLETED):
engine.remove_event_handler(self.iteration_completed, usage.ITERATION_COMPLETED)
def is_attached(self, engine: Engine, usage: Union[str, MetricUsage] = EpochWise()) -> bool:
"""
Checks if current metric is attached to provided engine. If attached, metric's computed
value is written to `engine.state.metrics` dictionary.
Args:
engine: the engine checked from which the metric should be attached
usage: the usage of the metric. Valid string values should be
'epoch_wise' (default) or 'batch_wise'.
"""
usage = self._check_usage(usage)
return engine.has_event_handler(self.completed, usage.COMPLETED)
def state_dict(self) -> OrderedDict:
"""Method returns state dict with attributes of the metric specified in its
`_state_dict_all_req_keys` attribute. Can be used to save internal state of the class.
If there's an active distributed configuration, some collective operations is done and
the list of values across ranks is saved under each attribute's name in the dict.
"""
state = OrderedDict()
for attr_name in self._state_dict_all_req_keys:
if attr_name not in self.__dict__:
raise ValueError(
f"Found a value in _state_dict_all_req_keys that is not among metric attributes: {attr_name}"
)
attr = getattr(self, attr_name)
if not isinstance(attr, (int, float, torch.Tensor)):
raise TypeError(
"Currently, only numeric or tensor-typed attributes of the metric"
" could be added to its state_dict."
)
if idist.get_world_size() == 1:
state[attr_name] = [attr]
else:
if isinstance(attr, (int, float)):
attr_type = type(attr)
attr = float(attr)
gathered_attr = cast(List[Any], idist.all_gather(attr))
if isinstance(attr, float):
gathered_attr = [attr_type(process_attr) for process_attr in gathered_attr]
state[attr_name] = gathered_attr
return state
def load_state_dict(self, state_dict: Mapping) -> None:
"""Method replaces internal state of the class with provided state dict data.
If there's an active distributed configuration, the process uses its rank to pick the proper value from
the list of values saved under each attribute's name in the dict.
Args:
state_dict: a dict containing attributes of the metric specified in its `_state_dict_all_req_keys`
attribute.
"""
super().load_state_dict(state_dict)
rank = idist.get_rank()
for attr in self._state_dict_all_req_keys:
setattr(self, attr, state_dict[attr][rank])
def __add__(self, other: Any) -> "MetricsLambda":
from ignite.metrics.metrics_lambda import MetricsLambda
return MetricsLambda(lambda x, y: x + y, self, other)
def __radd__(self, other: Any) -> "MetricsLambda":
from ignite.metrics.metrics_lambda import MetricsLambda
return MetricsLambda(lambda x, y: x + y, other, self)
def __sub__(self, other: Any) -> "MetricsLambda":
from ignite.metrics.metrics_lambda import MetricsLambda
return MetricsLambda(lambda x, y: x - y, self, other)
def __rsub__(self, other: Any) -> "MetricsLambda":
from ignite.metrics.metrics_lambda import MetricsLambda
return MetricsLambda(lambda x, y: x - y, other, self)
def __mul__(self, other: Any) -> "MetricsLambda":
from ignite.metrics.metrics_lambda import MetricsLambda
return MetricsLambda(lambda x, y: x * y, self, other)
def __rmul__(self, other: Any) -> "MetricsLambda":
from ignite.metrics.metrics_lambda import MetricsLambda
return MetricsLambda(lambda x, y: x * y, other, self)
def __pow__(self, other: Any) -> "MetricsLambda":
from ignite.metrics.metrics_lambda import MetricsLambda
return MetricsLambda(lambda x, y: x**y, self, other)
def __rpow__(self, other: Any) -> "MetricsLambda":
from ignite.metrics.metrics_lambda import MetricsLambda
return MetricsLambda(lambda x, y: x**y, other, self)
def __mod__(self, other: Any) -> "MetricsLambda":
from ignite.metrics.metrics_lambda import MetricsLambda
return MetricsLambda(lambda x, y: x % y, self, other)
def __truediv__(self, other: Any) -> "MetricsLambda":
from ignite.metrics.metrics_lambda import MetricsLambda
return MetricsLambda(lambda x, y: x.__truediv__(y), self, other)
def __rtruediv__(self, other: Any) -> "MetricsLambda":
from ignite.metrics.metrics_lambda import MetricsLambda
return MetricsLambda(lambda x, y: x.__truediv__(y), other, self)
def __floordiv__(self, other: Any) -> "MetricsLambda":
from ignite.metrics.metrics_lambda import MetricsLambda
return MetricsLambda(lambda x, y: x // y, self, other)
def __getattr__(self, attr: str) -> Callable:
from ignite.metrics.metrics_lambda import MetricsLambda
def fn(x: Metric, *args: Any, **kwargs: Any) -> Any:
return getattr(x, attr)(*args, **kwargs)
def wrapper(*args: Any, **kwargs: Any) -> "MetricsLambda":
return MetricsLambda(fn, self, *args, **kwargs)
return wrapper
def __getitem__(self, index: Any) -> "MetricsLambda":
from ignite.metrics.metrics_lambda import MetricsLambda
return MetricsLambda(lambda x: x[index], self)
def __getstate__(self) -> Dict:
return self.__dict__
def __setstate__(self, d: Dict) -> None:
self.__dict__.update(d)
def sync_all_reduce(*attrs: Any) -> Callable:
"""Helper decorator for distributed configuration to collect instance attribute value
across all participating processes and apply the specified reduction operation.
See :doc:`metrics` on how to use it.
Args:
attrs: attribute names of decorated class
.. versionchanged:: 0.4.5
- Ability to handle different reduction operations (SUM, MAX, MIN, PRODUCT).
"""
def wrapper(func: Callable) -> Callable:
@wraps(func)
def another_wrapper(self: Metric, *args: Any, **kwargs: Any) -> Callable:
if not isinstance(self, Metric):
raise RuntimeError(
"Decorator sync_all_reduce should be used on ignite.metric.Metric class methods only"
)
ws = idist.get_world_size()
unreduced_attrs = {}
if len(attrs) > 0 and ws > 1:
for attr in attrs:
op_kwargs = {}
if ":" in attr:
attr, op = attr.split(":")
valid_ops = ["MIN", "MAX", "SUM", "PRODUCT"]
if op not in valid_ops:
raise ValueError(f"Reduction operation is not valid (expected : {valid_ops}, got: {op}")
op_kwargs["op"] = op
if attr not in self.__dict__:
raise ValueError(f"Metric {type(self)} has no attribute named `{attr}`.")
t = getattr(self, attr)
if not isinstance(t, (Number, torch.Tensor)):
raise TypeError(
"Attribute provided to sync_all_reduce should be a "
f"number or tensor but `{attr}` has type {type(t)}"
)
unreduced_attrs[attr] = t
# Here `clone` is necessary since `idist.all_reduce` modifies `t` inplace in the case
# `t` is a tensor and its `device` is same as that of the process.
# TODO: Remove this dual behavior of `all_reduce` to always either return a new tensor or
# modify it in-place.
t_reduced = idist.all_reduce(cast(float, t) if isinstance(t, Number) else t.clone(), **op_kwargs)
setattr(self, attr, t_reduced)
result = func(self, *args, **kwargs)
for attr, value in unreduced_attrs.items():
setattr(self, attr, value)
return result
return another_wrapper
setattr(wrapper, "_decorated", True)
return wrapper
def reinit__is_reduced(func: Callable) -> Callable:
"""Helper decorator for distributed configuration.
See :doc:`metrics` on how to use it.
Args:
func: A callable to reinit.
"""
@wraps(func)
def wrapper(self: Metric, *args: Any, **kwargs: Any) -> None:
func(self, *args, **kwargs)
if "_result" in self.__dict__:
self._result = None # type: ignore[attr-defined]
setattr(wrapper, "_decorated", True)
return wrapper
def _is_list_of_tensors_or_numbers(x: Sequence[Union[torch.Tensor, float]]) -> bool:
return isinstance(x, Sequence) and all([isinstance(t, (torch.Tensor, Number)) for t in x])
def _to_batched_tensor(x: Union[torch.Tensor, float], device: Optional[torch.device] = None) -> torch.Tensor:
if isinstance(x, torch.Tensor):
return x.unsqueeze(dim=0)
return torch.tensor([x], device=device)
|
import warnings
from typing import Callable, cast, Optional, Sequence, Union
import torch
import ignite.distributed as idist
from ignite.exceptions import NotComputableError
from ignite.metrics.accuracy import _BaseClassification
from ignite.metrics.metric import reinit__is_reduced, sync_all_reduce
from ignite.utils import to_onehot
__all__ = ["Precision"]
class _BasePrecisionRecall(_BaseClassification):
_state_dict_all_req_keys = ("_numerator", "_denominator", "_weight")
def __init__(
self,
output_transform: Callable = lambda x: x,
average: Optional[Union[bool, str]] = False,
is_multilabel: bool = False,
device: Union[str, torch.device] = torch.device("cpu"),
):
if not (average is None or isinstance(average, bool) or average in ["macro", "micro", "weighted", "samples"]):
raise ValueError(
"Argument average should be None or a boolean or one of values"
" 'macro', 'micro', 'weighted' and 'samples'."
)
if average is True:
self._average: Optional[Union[bool, str]] = "macro"
else:
self._average = average
self.eps = 1e-20
self._updated = False
super(_BasePrecisionRecall, self).__init__(
output_transform=output_transform, is_multilabel=is_multilabel, device=device
)
def _check_type(self, output: Sequence[torch.Tensor]) -> None:
super()._check_type(output)
if self._type in ["binary", "multiclass"] and self._average == "samples":
raise ValueError("Argument average='samples' is incompatible with binary and multiclass input data.")
y_pred, y = output
if self._type == "multiclass" and y.dtype != torch.long:
warnings.warn("`y` should be of dtype long when entry type is multiclass", RuntimeWarning)
if (
self._type == "binary"
and self._average is not False
and (y.dtype != torch.long or y_pred.dtype != torch.long)
):
warnings.warn(
"`y` and `y_pred` should be of dtype long when entry type is binary and average!=False", RuntimeWarning
)
def _prepare_output(self, output: Sequence[torch.Tensor]) -> Sequence[torch.Tensor]:
y_pred, y = output[0].detach(), output[1].detach()
if self._type == "binary" or self._type == "multiclass":
num_classes = 2 if self._type == "binary" else y_pred.size(1)
if self._type == "multiclass" and y.max() + 1 > num_classes:
raise ValueError(
f"y_pred contains fewer classes than y. Number of classes in the prediction is {num_classes}"
f" and an element in y has invalid class = {y.max().item() + 1}."
)
y = y.view(-1)
if self._type == "binary" and self._average is False:
y_pred = y_pred.view(-1)
else:
y = to_onehot(y.long(), num_classes=num_classes)
indices = torch.argmax(y_pred, dim=1) if self._type == "multiclass" else y_pred.long()
y_pred = to_onehot(indices.view(-1), num_classes=num_classes)
elif self._type == "multilabel":
# if y, y_pred shape is (N, C, ...) -> (N * ..., C)
num_labels = y_pred.size(1)
y_pred = torch.transpose(y_pred, 1, -1).reshape(-1, num_labels)
y = torch.transpose(y, 1, -1).reshape(-1, num_labels)
# Convert from int cuda/cpu to double on self._device
y_pred = y_pred.to(dtype=torch.float64, device=self._device)
y = y.to(dtype=torch.float64, device=self._device)
correct = y * y_pred
return y_pred, y, correct
@reinit__is_reduced
def reset(self) -> None:
"""
`numerator`, `denominator` and `weight` are three variables chosen to be abstract
representatives of the ones that are measured for cases with different `average` parameters.
`weight` is only used when `average='weighted'`. Actual value of these three variables is
as follows.
average='samples':
numerator (torch.Tensor): sum of metric value for samples
denominator (int): number of samples
average='weighted':
numerator (torch.Tensor): number of true positives per class/label
denominator (torch.Tensor): number of predicted(for precision) or actual(for recall) positives per
class/label.
weight (torch.Tensor): number of actual positives per class
average='micro':
numerator (torch.Tensor): sum of number of true positives for classes/labels
denominator (torch.Tensor): sum of number of predicted(for precision) or actual(for recall) positives for
classes/labels.
average='macro' or boolean or None:
numerator (torch.Tensor): number of true positives per class/label
denominator (torch.Tensor): number of predicted(for precision) or actual(for recall) positives per
class/label.
"""
self._numerator: Union[int, torch.Tensor] = 0
self._denominator: Union[int, torch.Tensor] = 0
self._weight: Union[int, torch.Tensor] = 0
self._updated = False
super(_BasePrecisionRecall, self).reset()
@sync_all_reduce("_numerator", "_denominator")
def compute(self) -> Union[torch.Tensor, float]:
r"""
Return value of the metric for `average` options `'weighted'` and `'macro'` is computed as follows.
.. math::
\text{Precision/Recall} = \frac{ numerator }{ denominator } \cdot weight
wherein `weight` is the internal variable `_weight` for `'weighted'` option and :math:`1/C`
for the `macro` one. :math:`C` is the number of classes/labels.
Return value of the metric for `average` options `'micro'`, `'samples'`, `False` and None is as follows.
.. math::
\text{Precision/Recall} = \frac{ numerator }{ denominator }
"""
if not self._updated:
raise NotComputableError(
f"{self.__class__.__name__} must have at least one example before it can be computed."
)
fraction = self._numerator / (self._denominator + (self.eps if self._average != "samples" else 0))
if self._average == "weighted":
_weight = idist.all_reduce(self._weight.clone()) # type: ignore[union-attr]
sum_of_weights = cast(torch.Tensor, _weight).sum() + self.eps
return ((fraction @ _weight) / sum_of_weights).item() # type: ignore
elif self._average == "micro" or self._average == "samples":
return cast(torch.Tensor, fraction).item()
elif self._average == "macro":
return cast(torch.Tensor, fraction).mean().item()
else:
return fraction
class Precision(_BasePrecisionRecall):
r"""Calculates precision for binary, multiclass and multilabel data.
.. math:: \text{Precision} = \frac{ TP }{ TP + FP }
where :math:`\text{TP}` is true positives and :math:`\text{FP}` is false positives.
- ``update`` must receive output of the form ``(y_pred, y)``.
- `y_pred` must be in the following shape (batch_size, num_categories, ...) or (batch_size, ...).
- `y` must be in the following shape (batch_size, ...).
Args:
output_transform: a callable that is used to transform the
:class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into the
form expected by the metric. This can be useful if, for example, you have a multi-output model and
you want to compute the metric with respect to one of the outputs.
average: available options are
False
default option. For multicalss and multilabel inputs, per class and per label
metric is returned respectively.
None
like `False` option except that per class metric is returned for binary data as well.
For compatibility with Scikit-Learn api.
'micro'
Metric is computed counting stats of classes/labels altogether.
.. math::
\text{Micro Precision} = \frac{\sum_{k=1}^C TP_k}{\sum_{k=1}^C TP_k+FP_k}
where :math:`C` is the number of classes/labels (2 in binary case). :math:`k` in :math:`TP_k`
and :math:`FP_k` means that the measures are computed for class/label :math:`k` (in a one-vs-rest
sense in multiclass case).
For binary and multiclass inputs, this is equivalent with accuracy,
so use :class:`~ignite.metrics.accuracy.Accuracy`.
'samples'
for multilabel input, at first, precision is computed on a
per sample basis and then average across samples is returned.
.. math::
\text{Sample-averaged Precision} = \frac{\sum_{n=1}^N \frac{TP_n}{TP_n+FP_n}}{N}
where :math:`N` is the number of samples. :math:`n` in :math:`TP_n` and :math:`FP_n`
means that the measures are computed for sample :math:`n`, across labels.
Incompatible with binary and multiclass inputs.
'weighted'
like macro precision but considers class/label imbalance. for binary and multiclass
input, it computes metric for each class then returns average of them weighted by
support of classes (number of actual samples in each class). For multilabel input,
it computes precision for each label then returns average of them weighted by support
of labels (number of actual positive samples in each label).
.. math::
Precision_k = \frac{TP_k}{TP_k+FP_k}
.. math::
\text{Weighted Precision} = \frac{\sum_{k=1}^C P_k * Precision_k}{N}
where :math:`C` is the number of classes (2 in binary case). :math:`P_k` is the number
of samples belonged to class :math:`k` in binary and multiclass case, and the number of
positive samples belonged to label :math:`k` in multilabel case.
macro
computes macro precision which is unweighted average of metric computed across
classes/labels.
.. math::
\text{Macro Precision} = \frac{\sum_{k=1}^C Precision_k}{C}
where :math:`C` is the number of classes (2 in binary case).
True
like macro option. For backward compatibility.
is_multilabel: flag to use in multilabel case. By default, value is False.
device: specifies which device updates are accumulated on. Setting the metric's
device to be the same as your ``update`` arguments ensures the ``update`` method is non-blocking. By
default, CPU.
Examples:
For more information on how metric works with :class:`~ignite.engine.engine.Engine`, visit :ref:`attach-engine`.
.. include:: defaults.rst
:start-after: :orphan:
Binary case. In binary and multilabel cases, the elements of
`y` and `y_pred` should have 0 or 1 values.
.. testcode:: 1
metric = Precision()
weighted_metric = Precision(average='weighted')
two_class_metric = Precision(average=None) # Returns precision for both classes
metric.attach(default_evaluator, "precision")
weighted_metric.attach(default_evaluator, "weighted precision")
two_class_metric.attach(default_evaluator, "both classes precision")
y_true = torch.tensor([1, 0, 1, 1, 0, 1])
y_pred = torch.tensor([1, 0, 1, 0, 1, 1])
state = default_evaluator.run([[y_pred, y_true]])
print(f"Precision: {state.metrics['precision']}")
print(f"Weighted Precision: {state.metrics['weighted precision']}")
print(f"Precision for class 0 and class 1: {state.metrics['both classes precision']}")
.. testoutput:: 1
Precision: 0.75
Weighted Precision: 0.6666666666666666
Precision for class 0 and class 1: tensor([0.5000, 0.7500], dtype=torch.float64)
Multiclass case
.. testcode:: 2
metric = Precision()
macro_metric = Precision(average=True)
weighted_metric = Precision(average='weighted')
metric.attach(default_evaluator, "precision")
macro_metric.attach(default_evaluator, "macro precision")
weighted_metric.attach(default_evaluator, "weighted precision")
y_true = torch.tensor([2, 0, 2, 1, 0])
y_pred = torch.tensor([
[0.0266, 0.1719, 0.3055],
[0.6886, 0.3978, 0.8176],
[0.9230, 0.0197, 0.8395],
[0.1785, 0.2670, 0.6084],
[0.8448, 0.7177, 0.7288]
])
state = default_evaluator.run([[y_pred, y_true]])
print(f"Precision: {state.metrics['precision']}")
print(f"Macro Precision: {state.metrics['macro precision']}")
print(f"Weighted Precision: {state.metrics['weighted precision']}")
.. testoutput:: 2
Precision: tensor([0.5000, 0.0000, 0.3333], dtype=torch.float64)
Macro Precision: 0.27777777777777773
Weighted Precision: 0.3333333333333333
Multilabel case, the shapes must be (batch_size, num_labels, ...)
.. testcode:: 3
metric = Precision(is_multilabel=True)
micro_metric = Precision(is_multilabel=True, average='micro')
macro_metric = Precision(is_multilabel=True, average=True)
weighted_metric = Precision(is_multilabel=True, average='weighted')
samples_metric = Precision(is_multilabel=True, average='samples')
metric.attach(default_evaluator, "precision")
micro_metric.attach(default_evaluator, "micro precision")
macro_metric.attach(default_evaluator, "macro precision")
weighted_metric.attach(default_evaluator, "weighted precision")
samples_metric.attach(default_evaluator, "samples precision")
y_true = torch.tensor([
[0, 0, 1],
[0, 0, 0],
[0, 0, 0],
[1, 0, 0],
[0, 1, 1],
])
y_pred = torch.tensor([
[1, 1, 0],
[1, 0, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
])
state = default_evaluator.run([[y_pred, y_true]])
print(f"Precision: {state.metrics['precision']}")
print(f"Micro Precision: {state.metrics['micro precision']}")
print(f"Macro Precision: {state.metrics['macro precision']}")
print(f"Weighted Precision: {state.metrics['weighted precision']}")
print(f"Samples Precision: {state.metrics['samples precision']}")
.. testoutput:: 3
Precision: tensor([0.2000, 0.5000, 0.0000], dtype=torch.float64)
Micro Precision: 0.2222222222222222
Macro Precision: 0.2333333333333333
Weighted Precision: 0.175
Samples Precision: 0.2
Thresholding of predictions can be done as below:
.. testcode:: 4
def thresholded_output_transform(output):
y_pred, y = output
y_pred = torch.round(y_pred)
return y_pred, y
metric = Precision(output_transform=thresholded_output_transform)
metric.attach(default_evaluator, "precision")
y_true = torch.tensor([1, 0, 1, 1, 0, 1])
y_pred = torch.tensor([0.6, 0.2, 0.9, 0.4, 0.7, 0.65])
state = default_evaluator.run([[y_pred, y_true]])
print(state.metrics["precision"])
.. testoutput:: 4
0.75
.. versionchanged:: 0.4.10
Some new options were added to `average` parameter.
"""
@reinit__is_reduced
def update(self, output: Sequence[torch.Tensor]) -> None:
r"""
Update the metric state using prediction and target.
Args:
output: a binary tuple of tensors (y_pred, y) whose shapes follow the table below. N stands for the batch
dimension, `...` for possible additional dimensions and C for class dimension.
.. list-table::
:widths: 20 10 10 10
:header-rows: 1
* - Output member\\Data type
- Binary
- Multiclass
- Multilabel
* - y_pred
- (N, ...)
- (N, C, ...)
- (N, C, ...)
* - y
- (N, ...)
- (N, ...)
- (N, C, ...)
For binary and multilabel data, both y and y_pred should consist of 0's and 1's, but for multiclass
data, y_pred and y should consist of probabilities and integers respectively.
"""
self._check_shape(output)
self._check_type(output)
y_pred, y, correct = self._prepare_output(output)
if self._average == "samples":
all_positives = y_pred.sum(dim=1)
true_positives = correct.sum(dim=1)
self._numerator += torch.sum(true_positives / (all_positives + self.eps))
self._denominator += y.size(0)
elif self._average == "micro":
self._denominator += y_pred.sum()
self._numerator += correct.sum()
else: # _average in [False, None, 'macro', 'weighted']
self._denominator += y_pred.sum(dim=0)
self._numerator += correct.sum(dim=0)
if self._average == "weighted":
self._weight += y.sum(dim=0)
self._updated = True
|
import warnings
from typing import Callable, cast, List, Optional, Tuple, Union
import torch
import ignite.distributed as idist
from ignite.exceptions import NotComputableError
from ignite.metrics.metric import Metric, reinit__is_reduced
__all__ = ["EpochMetric"]
class EpochMetric(Metric):
"""Class for metrics that should be computed on the entire output history of a model.
Model's output and targets are restricted to be of shape ``(batch_size, n_targets)``. Output
datatype should be `float32`. Target datatype should be `long` for classification and `float` for regression.
.. warning::
Current implementation stores all input data (output and target) in as tensors before computing a metric.
This can potentially lead to a memory error if the input data is larger than available RAM.
In distributed configuration, all stored data (output and target) is mutually collected across all processes
using all gather collective operation. This can potentially lead to a memory error.
Compute method executes ``compute_fn`` on zero rank process only and final result is broadcasted to
all processes.
- ``update`` must receive output of the form ``(y_pred, y)``.
Args:
compute_fn: a callable which receives two tensors as the `predictions` and `targets`
and returns a scalar. Input tensors will be on specified ``device`` (see arg below).
output_transform: a callable that is used to transform the
:class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into the
form expected by the metric. This can be useful if, for example, you have a multi-output model and
you want to compute the metric with respect to one of the outputs.
check_compute_fn: if True, ``compute_fn`` is run on the first batch of data to ensure there are no
issues. If issues exist, user is warned that there might be an issue with the ``compute_fn``.
Default, True.
device: optional device specification for internal storage.
Example:
For more information on how metric works with :class:`~ignite.engine.engine.Engine`, visit :ref:`attach-engine`.
.. include:: defaults.rst
:start-after: :orphan:
.. testcode::
def mse_fn(y_preds, y_targets):
return torch.mean(((y_preds - y_targets.type_as(y_preds)) ** 2)).item()
metric = EpochMetric(mse_fn)
metric.attach(default_evaluator, "mse")
y_true = torch.tensor([0, 1, 2, 3, 4, 5])
y_pred = y_true * 0.75
state = default_evaluator.run([[y_pred, y_true]])
print(state.metrics["mse"])
.. testoutput::
0.5729...
Warnings:
EpochMetricWarning: User is warned that there are issues with ``compute_fn`` on a batch of data processed.
To disable the warning, set ``check_compute_fn=False``.
"""
_state_dict_all_req_keys = ("_predictions", "_targets")
def __init__(
self,
compute_fn: Callable[[torch.Tensor, torch.Tensor], float],
output_transform: Callable = lambda x: x,
check_compute_fn: bool = True,
device: Union[str, torch.device] = torch.device("cpu"),
) -> None:
if not callable(compute_fn):
raise TypeError("Argument compute_fn should be callable.")
self.compute_fn = compute_fn
self._check_compute_fn = check_compute_fn
super(EpochMetric, self).__init__(output_transform=output_transform, device=device)
@reinit__is_reduced
def reset(self) -> None:
self._predictions: List[torch.Tensor] = []
self._targets: List[torch.Tensor] = []
self._result: Optional[float] = None
def _check_shape(self, output: Tuple[torch.Tensor, torch.Tensor]) -> None:
y_pred, y = output
if y_pred.ndimension() not in (1, 2):
raise ValueError("Predictions should be of shape (batch_size, n_targets) or (batch_size, ).")
if y.ndimension() not in (1, 2):
raise ValueError("Targets should be of shape (batch_size, n_targets) or (batch_size, ).")
def _check_type(self, output: Tuple[torch.Tensor, torch.Tensor]) -> None:
y_pred, y = output
if len(self._predictions) < 1:
return
dtype_preds = self._predictions[-1].dtype
if dtype_preds != y_pred.dtype:
raise ValueError(
f"Incoherent types between input y_pred and stored predictions: {dtype_preds} vs {y_pred.dtype}"
)
dtype_targets = self._targets[-1].dtype
if dtype_targets != y.dtype:
raise ValueError(f"Incoherent types between input y and stored targets: {dtype_targets} vs {y.dtype}")
@reinit__is_reduced
def update(self, output: Tuple[torch.Tensor, torch.Tensor]) -> None:
self._check_shape(output)
y_pred, y = output[0].detach(), output[1].detach()
if y_pred.ndimension() == 2 and y_pred.shape[1] == 1:
y_pred = y_pred.squeeze(dim=-1)
if y.ndimension() == 2 and y.shape[1] == 1:
y = y.squeeze(dim=-1)
y_pred = y_pred.clone().to(self._device)
y = y.clone().to(self._device)
self._check_type((y_pred, y))
self._predictions.append(y_pred)
self._targets.append(y)
# Check once the signature and execution of compute_fn
if len(self._predictions) == 1 and self._check_compute_fn:
try:
self.compute_fn(self._predictions[0], self._targets[0])
except Exception as e:
warnings.warn(f"Probably, there can be a problem with `compute_fn`:\n {e}.", EpochMetricWarning)
def compute(self) -> float:
if len(self._predictions) < 1 or len(self._targets) < 1:
raise NotComputableError("EpochMetric must have at least one example before it can be computed.")
if self._result is None:
_prediction_tensor = torch.cat(self._predictions, dim=0)
_target_tensor = torch.cat(self._targets, dim=0)
ws = idist.get_world_size()
if ws > 1:
# All gather across all processes
_prediction_tensor = cast(torch.Tensor, idist.all_gather(_prediction_tensor))
_target_tensor = cast(torch.Tensor, idist.all_gather(_target_tensor))
self._result = 0.0
if idist.get_rank() == 0:
# Run compute_fn on zero rank only
self._result = self.compute_fn(_prediction_tensor, _target_tensor)
if ws > 1:
# broadcast result to all processes
self._result = cast(float, idist.broadcast(self._result, src=0))
return self._result
class EpochMetricWarning(UserWarning):
pass
|
from typing import Callable, cast, Dict, Sequence, Tuple, Union
import torch
from ignite.exceptions import NotComputableError
from ignite.metrics.metric import Metric, reinit__is_reduced, sync_all_reduce
__all__ = ["Loss"]
class Loss(Metric):
"""
Calculates the average loss according to the passed loss_fn.
Args:
loss_fn: a callable taking a prediction tensor, a target
tensor, optionally other arguments, and returns the average loss
over all observations in the batch.
output_transform: a callable that is used to transform the
:class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into the
form expected by the metric.
This can be useful if, for example, you have a multi-output model and
you want to compute the metric with respect to one of the outputs.
The output is expected to be a tuple `(prediction, target)` or
(prediction, target, kwargs) where kwargs is a dictionary of extra
keywords arguments. If extra keywords arguments are provided they are passed to `loss_fn`.
batch_size: a callable taking a target tensor that returns the
first dimension size (usually the batch size).
device: specifies which device updates are accumulated on. Setting the
metric's device to be the same as your ``update`` arguments ensures the ``update`` method is
non-blocking. By default, CPU.
Attributes:
required_output_keys: dictionary defines required keys to be found in ``engine.state.output`` if the
latter is a dictionary. Default, ``("y_pred", "y", "criterion_kwargs")``. This is useful when the
criterion function requires additional arguments, which can be passed using ``criterion_kwargs``.
See an example below.
Examples:
Let's implement a Loss metric that requires ``x``, ``y_pred``, ``y`` and ``criterion_kwargs`` as input
for ``criterion`` function. In the example below we show how to setup standard metric like Accuracy
and the Loss metric using an ``evaluator`` created with
:meth:`~ignite.engine.create_supervised_evaluator` method.
For more information on how metric works with :class:`~ignite.engine.engine.Engine`, visit :ref:`attach-engine`.
.. include:: defaults.rst
:start-after: :orphan:
.. testcode::
model = default_model
criterion = nn.NLLLoss()
metric = Loss(criterion)
metric.attach(default_evaluator, 'loss')
y_pred = torch.tensor([[0.1, 0.4, 0.5], [0.1, 0.7, 0.2]])
y_true = torch.tensor([2, 2]).long()
state = default_evaluator.run([[y_pred, y_true]])
print(state.metrics['loss'])
.. testoutput::
-0.3499999...
"""
required_output_keys = ("y_pred", "y", "criterion_kwargs")
_state_dict_all_req_keys = ("_sum", "_num_examples")
def __init__(
self,
loss_fn: Callable,
output_transform: Callable = lambda x: x,
batch_size: Callable = len,
device: Union[str, torch.device] = torch.device("cpu"),
):
super(Loss, self).__init__(output_transform, device=device)
self._loss_fn = loss_fn
self._batch_size = batch_size
@reinit__is_reduced
def reset(self) -> None:
self._sum = torch.tensor(0.0, device=self._device)
self._num_examples = 0
@reinit__is_reduced
def update(self, output: Sequence[Union[torch.Tensor, Dict]]) -> None:
if len(output) == 2:
y_pred, y = cast(Tuple[torch.Tensor, torch.Tensor], output)
kwargs: Dict = {}
else:
y_pred, y, kwargs = cast(Tuple[torch.Tensor, torch.Tensor, Dict], output)
average_loss = self._loss_fn(y_pred, y, **kwargs).detach()
if len(average_loss.shape) != 0:
raise ValueError("loss_fn did not return the average loss.")
n = self._batch_size(y)
self._sum += average_loss.to(self._device) * n
self._num_examples += n
@sync_all_reduce("_sum", "_num_examples")
def compute(self) -> float:
if self._num_examples == 0:
raise NotComputableError("Loss must have at least one example before it can be computed.")
return self._sum.item() / self._num_examples
|
from typing import Callable, Sequence, Union
import torch
from ignite.exceptions import NotComputableError
from ignite.metrics.metric import Metric, reinit__is_reduced, sync_all_reduce
__all__ = ["PSNR"]
class PSNR(Metric):
r"""Computes average
`Peak signal-to-noise ratio (PSNR) <https://en.wikipedia.org/wiki/Peak_signal-to-noise_ratio>`_.
.. math::
\text{PSNR}(I, J) = 10 * \log_{10}\left(\frac{ MAX_{I}^2 }{ \text{ MSE } }\right)
where :math:`\text{MSE}` is `mean squared error <https://en.wikipedia.org/wiki/Mean_squared_error>`_.
- ``update`` must receive output of the form ``(y_pred, y)``.
- `y_pred` and `y` **must** have (batch_size, ...) shape.
- `y_pred` and `y` **must** have same dtype and same shape.
Args:
data_range: The data range of the target image (distance between minimum
and maximum possible values).
For other data types, please set the data range, otherwise an exception will be raised.
output_transform: A callable that is used to transform the Engine’s
process_function’s output into the form expected by the metric.
device: specifies which device updates are accumulated on.
Setting the metric’s device to be the same as your update arguments ensures
the update method is non-blocking. By default, CPU.
Examples:
To use with ``Engine`` and ``process_function``, simply attach the metric instance to the engine.
The output of the engine's ``process_function`` needs to be in format of
``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y, ...}``. If not, ``output_tranform`` can be added
to the metric to transform the output into the form expected by the metric.
For more information on how metric works with :class:`~ignite.engine.engine.Engine`,
visit :ref:`attach-engine`.
.. include:: defaults.rst
:start-after: :orphan:
.. testcode::
psnr = PSNR(data_range=1.0)
psnr.attach(default_evaluator, 'psnr')
preds = torch.rand([4, 3, 16, 16])
target = preds * 0.75
state = default_evaluator.run([[preds, target]])
print(state.metrics['psnr'])
.. testoutput::
16.8671405...
This metric by default accepts Grayscale or RGB images. But if you have YCbCr or YUV images, only
Y channel is needed for computing PSNR. And, this can be done with ``output_transform``. For instance,
.. testcode::
def get_y_channel(output):
y_pred, y = output
# y_pred and y are (B, 3, H, W) and YCbCr or YUV images
# let's select y channel
return y_pred[:, 0, ...], y[:, 0, ...]
psnr = PSNR(data_range=219, output_transform=get_y_channel)
psnr.attach(default_evaluator, 'psnr')
preds = 219 * torch.rand([4, 3, 16, 16])
target = preds * 0.75
state = default_evaluator.run([[preds, target]])
print(state.metrics['psnr'])
.. testoutput::
16.7027966...
.. versionadded:: 0.4.3
"""
_state_dict_all_req_keys = ("_sum_of_batchwise_psnr", "_num_examples")
def __init__(
self,
data_range: Union[int, float],
output_transform: Callable = lambda x: x,
device: Union[str, torch.device] = torch.device("cpu"),
):
super().__init__(output_transform=output_transform, device=device)
self.data_range = data_range
def _check_shape_dtype(self, output: Sequence[torch.Tensor]) -> None:
y_pred, y = output
if y_pred.dtype != y.dtype:
raise TypeError(
f"Expected y_pred and y to have the same data type. Got y_pred: {y_pred.dtype} and y: {y.dtype}."
)
if y_pred.shape != y.shape:
raise ValueError(
f"Expected y_pred and y to have the same shape. Got y_pred: {y_pred.shape} and y: {y.shape}."
)
@reinit__is_reduced
def reset(self) -> None:
self._sum_of_batchwise_psnr = torch.tensor(0.0, dtype=torch.float64, device=self._device)
self._num_examples = 0
@reinit__is_reduced
def update(self, output: Sequence[torch.Tensor]) -> None:
self._check_shape_dtype(output)
y_pred, y = output[0].detach(), output[1].detach()
dim = tuple(range(1, y.ndim))
mse_error = torch.pow(y_pred.double() - y.view_as(y_pred).double(), 2).mean(dim=dim)
self._sum_of_batchwise_psnr += torch.sum(10.0 * torch.log10(self.data_range**2 / (mse_error + 1e-10))).to(
device=self._device
)
self._num_examples += y.shape[0]
@sync_all_reduce("_sum_of_batchwise_psnr", "_num_examples")
def compute(self) -> float:
if self._num_examples == 0:
raise NotComputableError("PSNR must have at least one example before it can be computed.")
return (self._sum_of_batchwise_psnr / self._num_examples).item()
|
import warnings
from typing import Any, Callable, cast, Optional, Union
import torch
import ignite.distributed as idist
from ignite.engine import Engine, Events
from ignite.metrics.metric import Metric, MetricUsage, reinit__is_reduced, RunningBatchWise, SingleEpochRunningBatchWise
__all__ = ["RunningAverage"]
class RunningAverage(Metric):
"""Compute running average of a metric or the output of process function.
Args:
src: input source: an instance of :class:`~ignite.metrics.metric.Metric` or None. The latter
corresponds to `engine.state.output` which holds the output of process function.
alpha: running average decay factor, default 0.98
output_transform: a function to use to transform the output if `src` is None and
corresponds the output of process function. Otherwise it should be None.
epoch_bound: whether the running average should be reset after each epoch. It is depracated in favor of
``usage`` argument in :meth:`attach` method. Setting ``epoch_bound`` to ``False`` is equivalent to
``usage=SingleEpochRunningBatchWise()`` and setting it to ``True`` is equivalent to
``usage=RunningBatchWise()`` in the :meth:`attach` method. Default None.
device: specifies which device updates are accumulated on. Should be
None when ``src`` is an instance of :class:`~ignite.metrics.metric.Metric`, as the running average will
use the ``src``'s device. Otherwise, defaults to CPU. Only applicable when the computed value
from the metric is a tensor.
Examples:
For more information on how metric works with :class:`~ignite.engine.engine.Engine`, visit :ref:`attach-engine`.
.. include:: defaults.rst
:start-after: :orphan:
.. testcode:: 1
default_trainer = get_default_trainer()
accuracy = Accuracy()
metric = RunningAverage(accuracy)
metric.attach(default_trainer, 'running_avg_accuracy')
@default_trainer.on(Events.ITERATION_COMPLETED)
def log_running_avg_metrics():
print(default_trainer.state.metrics['running_avg_accuracy'])
y_true = [torch.tensor(y) for y in [[0], [1], [0], [1], [0], [1]]]
y_pred = [torch.tensor(y) for y in [[0], [0], [0], [1], [1], [1]]]
state = default_trainer.run(zip(y_pred, y_true))
.. testoutput:: 1
1.0
0.98
0.98039...
0.98079...
0.96117...
0.96195...
.. testcode:: 2
default_trainer = get_default_trainer()
metric = RunningAverage(output_transform=lambda x: x.item())
metric.attach(default_trainer, 'running_avg_accuracy')
@default_trainer.on(Events.ITERATION_COMPLETED)
def log_running_avg_metrics():
print(default_trainer.state.metrics['running_avg_accuracy'])
y = [torch.tensor(y) for y in [[0], [1], [0], [1], [0], [1]]]
state = default_trainer.run(y)
.. testoutput:: 2
0.0
0.020000...
0.019600...
0.039208...
0.038423...
0.057655...
"""
required_output_keys = None
# TODO Shall we put `src` here? Then we should add a new branch for metric-typed attributes in `state_dict`
# and `load_state_dict`. Examples; This class; `Rouge` which has a `List[_BaseRouge]`.
_state_dict_all_req_keys = ("_value",)
def __init__(
self,
src: Optional[Metric] = None,
alpha: float = 0.98,
output_transform: Optional[Callable] = None,
epoch_bound: Optional[bool] = None,
device: Optional[Union[str, torch.device]] = None,
):
if not (isinstance(src, Metric) or src is None):
raise TypeError("Argument src should be a Metric or None.")
if not (0.0 < alpha <= 1.0):
raise ValueError("Argument alpha should be a float between 0.0 and 1.0.")
if isinstance(src, Metric):
if output_transform is not None:
raise ValueError("Argument output_transform should be None if src is a Metric.")
def output_transform(x: Any) -> Any:
return x
if device is not None:
raise ValueError("Argument device should be None if src is a Metric.")
self.src: Union[Metric, None] = src
device = src._device
else:
if output_transform is None:
raise ValueError(
"Argument output_transform should not be None if src corresponds "
"to the output of process function."
)
self.src = None
if device is None:
device = torch.device("cpu")
if epoch_bound is not None:
warnings.warn(
"`epoch_bound` is deprecated and will be removed in the future. Consider using `usage` argument of"
"`attach` method instead. `epoch_bound=True` is equivalent with `usage=SingleEpochRunningBatchWise()`"
" and `epoch_bound=False` is equivalent with `usage=RunningBatchWise()`."
)
self.epoch_bound = epoch_bound
self.alpha = alpha
super(RunningAverage, self).__init__(output_transform=output_transform, device=device)
@reinit__is_reduced
def reset(self) -> None:
self._value: Optional[Union[float, torch.Tensor]] = None
if isinstance(self.src, Metric):
self.src.reset()
@reinit__is_reduced
def update(self, output: Union[torch.Tensor, float]) -> None:
if self.src is None:
output = output.detach().to(self._device, copy=True) if isinstance(output, torch.Tensor) else output
value = idist.all_reduce(output) / idist.get_world_size()
else:
value = self.src.compute()
self.src.reset()
if self._value is None:
self._value = value
else:
self._value = self._value * self.alpha + (1.0 - self.alpha) * value
def compute(self) -> Union[torch.Tensor, float]:
return cast(Union[torch.Tensor, float], self._value)
def attach(self, engine: Engine, name: str, usage: Union[str, MetricUsage] = RunningBatchWise()) -> None:
r"""
Attach the metric to the ``engine`` using the events determined by the ``usage``.
Args:
engine: the engine to get attached to.
name: by which, the metric is inserted into ``engine.state.metrics`` dictionary.
usage: the usage determining on which events the metric is reset, updated and computed. It should be an
instance of the :class:`~ignite.metrics.metric.MetricUsage`\ s in the following table.
======================================================= ===========================================
``usage`` **class** **Description**
======================================================= ===========================================
:class:`~.metrics.metric.RunningBatchWise` Running average of the ``src`` metric or
``engine.state.output`` is computed across
batches. In the former case, on each batch,
``src`` is reset, updated and computed then
its value is retrieved. Default.
:class:`~.metrics.metric.SingleEpochRunningBatchWise` Same as above but the running average is
computed across batches in an epoch so it
is reset at the end of the epoch.
:class:`~.metrics.metric.RunningEpochWise` Running average of the ``src`` metric or
``engine.state.output`` is computed across
epochs. In the former case, ``src`` works
as if it was attached in a
:class:`~ignite.metrics.metric.EpochWise`
manner and its computed value is retrieved
at the end of the epoch. The latter case
doesn't make much sense for this usage as
the ``engine.state.output`` of the last
batch is retrieved then.
======================================================= ===========================================
``RunningAverage`` retrieves ``engine.state.output`` at ``usage.ITERATION_COMPLETED`` if the ``src`` is not
given and it's computed and updated using ``src``, by manually calling its ``compute`` method, or
``engine.state.output`` at ``usage.COMPLETED`` event.
Also if ``src`` is given, it is updated at ``usage.ITERATION_COMPLETED``, but its reset event is determined by
``usage`` type. If ``isinstance(usage, BatchWise)`` holds true, ``src`` is reset on ``BatchWise().STARTED``,
otherwise on ``EpochWise().STARTED`` if ``isinstance(usage, EpochWise)``.
.. versionchanged:: 0.5.1
Added `usage` argument
"""
usage = self._check_usage(usage)
if self.epoch_bound is not None:
usage = SingleEpochRunningBatchWise() if self.epoch_bound else RunningBatchWise()
if isinstance(self.src, Metric) and not engine.has_event_handler(
self.src.iteration_completed, Events.ITERATION_COMPLETED
):
engine.add_event_handler(Events.ITERATION_COMPLETED, self.src.iteration_completed)
super().attach(engine, name, usage)
def detach(self, engine: Engine, usage: Union[str, MetricUsage] = RunningBatchWise()) -> None:
usage = self._check_usage(usage)
if self.epoch_bound is not None:
usage = SingleEpochRunningBatchWise() if self.epoch_bound else RunningBatchWise()
if isinstance(self.src, Metric) and engine.has_event_handler(
self.src.iteration_completed, Events.ITERATION_COMPLETED
):
engine.remove_event_handler(self.src.iteration_completed, Events.ITERATION_COMPLETED)
super().detach(engine, usage)
|
import itertools
from typing import Any, Callable, Optional, Union
import torch
from ignite.engine import Engine
from ignite.metrics.metric import EpochWise, Metric, MetricUsage, reinit__is_reduced
__all__ = ["MetricsLambda"]
class MetricsLambda(Metric):
"""
Apply a function to other metrics to obtain a new metric.
The result of the new metric is defined to be the result
of applying the function to the result of argument metrics.
When update, this metric recursively updates the metrics
it depends on. When reset, all its dependency metrics would be
resetted as well. When attach, all its dependency metrics would be attached
automatically (but partially, e.g :meth:`~ignite.metrics.metric.Metric.is_attached()` will return False).
Args:
f: the function that defines the computation
args: Sequence of other metrics or something
else that will be fed to ``f`` as arguments.
kwargs: Sequence of other metrics or something
else that will be fed to ``f`` as keyword arguments.
Examples:
For more information on how metric works with :class:`~ignite.engine.engine.Engine`, visit :ref:`attach-engine`.
.. include:: defaults.rst
:start-after: :orphan:
.. testcode::
precision = Precision(average=False)
recall = Recall(average=False)
def Fbeta(r, p, beta):
return torch.mean((1 + beta ** 2) * p * r / (beta ** 2 * p + r + 1e-20)).item()
F1 = MetricsLambda(Fbeta, recall, precision, 1)
F2 = MetricsLambda(Fbeta, recall, precision, 2)
F3 = MetricsLambda(Fbeta, recall, precision, 3)
F4 = MetricsLambda(Fbeta, recall, precision, 4)
F1.attach(default_evaluator, "F1")
F2.attach(default_evaluator, "F2")
F3.attach(default_evaluator, "F3")
F4.attach(default_evaluator, "F4")
y_true = torch.tensor([1, 0, 1, 0, 0, 1])
y_pred = torch.tensor([1, 0, 1, 0, 1, 1])
state = default_evaluator.run([[y_pred, y_true]])
print(state.metrics["F1"])
print(state.metrics["F2"])
print(state.metrics["F3"])
print(state.metrics["F4"])
.. testoutput::
0.8571...
0.9375...
0.9677...
0.9807...
When check if the metric is attached, if one of its dependency
metrics is detached, the metric is considered detached too.
.. code-block:: python
engine = ...
precision = Precision(average=False)
aP = precision.mean()
aP.attach(engine, "aP")
assert aP.is_attached(engine)
# partially attached
assert not precision.is_attached(engine)
precision.detach(engine)
assert not aP.is_attached(engine)
# fully attached
assert not precision.is_attached(engine)
"""
def __init__(self, f: Callable, *args: Any, **kwargs: Any) -> None:
self.function = f
self.args = args
self.kwargs = kwargs
self.engine: Optional[Engine] = None
self._updated = False
super(MetricsLambda, self).__init__(device="cpu")
@reinit__is_reduced
def reset(self) -> None:
for i in itertools.chain(self.args, self.kwargs.values()):
if isinstance(i, Metric):
i.reset()
self._updated = False
@reinit__is_reduced
def update(self, output: Any) -> None:
if self.engine:
raise ValueError(
"MetricsLambda is already attached to an engine, "
"and MetricsLambda can't use update API while it's attached."
)
for i in itertools.chain(self.args, self.kwargs.values()):
if isinstance(i, Metric):
i.update(output)
self._updated = True
def compute(self) -> Any:
materialized = [_get_value_on_cpu(i) for i in self.args]
materialized_kwargs = {k: _get_value_on_cpu(v) for k, v in self.kwargs.items()}
return self.function(*materialized, **materialized_kwargs)
def _internal_attach(self, engine: Engine, usage: MetricUsage) -> None:
self.engine = engine
for index, metric in enumerate(itertools.chain(self.args, self.kwargs.values())):
if isinstance(metric, MetricsLambda):
metric._internal_attach(engine, usage)
elif isinstance(metric, Metric):
# NB : metrics is attached partially
# We must not use is_attached() but rather if these events exist
if not engine.has_event_handler(metric.started, usage.STARTED):
engine.add_event_handler(usage.STARTED, metric.started)
if not engine.has_event_handler(metric.iteration_completed, usage.ITERATION_COMPLETED):
engine.add_event_handler(usage.ITERATION_COMPLETED, metric.iteration_completed)
def attach(self, engine: Engine, name: str, usage: Union[str, MetricUsage] = EpochWise()) -> None:
if self._updated:
raise ValueError(
"The underlying metrics are already updated, can't attach while using reset/update/compute API."
)
usage = self._check_usage(usage)
# recursively attach all its dependencies (partially)
self._internal_attach(engine, usage)
# attach only handler on EPOCH_COMPLETED
engine.add_event_handler(usage.COMPLETED, self.completed, name)
def detach(self, engine: Engine, usage: Union[str, MetricUsage] = EpochWise()) -> None:
usage = self._check_usage(usage)
# remove from engine
super(MetricsLambda, self).detach(engine, usage)
self.engine = None
def is_attached(self, engine: Engine, usage: Union[str, MetricUsage] = EpochWise()) -> bool:
usage = self._check_usage(usage)
# check recursively the dependencies
return super(MetricsLambda, self).is_attached(engine, usage) and self._internal_is_attached(engine, usage)
def _internal_is_attached(self, engine: Engine, usage: MetricUsage) -> bool:
# if no engine, metrics is not attached
if engine is None:
return False
# check recursively if metrics are attached
is_detached = False
for metric in itertools.chain(self.args, self.kwargs.values()):
if isinstance(metric, MetricsLambda):
if not metric._internal_is_attached(engine, usage):
is_detached = True
elif isinstance(metric, Metric):
if not engine.has_event_handler(metric.started, usage.STARTED):
is_detached = True
if not engine.has_event_handler(metric.iteration_completed, usage.ITERATION_COMPLETED):
is_detached = True
return not is_detached
def _get_value_on_cpu(v: Any) -> Any:
if isinstance(v, Metric):
v = v.compute()
if isinstance(v, torch.Tensor):
v = v.cpu()
return v
|
from typing import Callable, Optional, Sequence, Tuple, Union
import torch
from ignite.exceptions import NotComputableError
from ignite.metrics.metric import Metric, reinit__is_reduced, sync_all_reduce
__all__ = ["Accuracy"]
class _BaseClassification(Metric):
def __init__(
self,
output_transform: Callable = lambda x: x,
is_multilabel: bool = False,
device: Union[str, torch.device] = torch.device("cpu"),
):
self._is_multilabel = is_multilabel
self._type: Optional[str] = None
self._num_classes: Optional[int] = None
super(_BaseClassification, self).__init__(output_transform=output_transform, device=device)
def reset(self) -> None:
self._type = None
self._num_classes = None
def _check_shape(self, output: Sequence[torch.Tensor]) -> None:
y_pred, y = output
if not (y.ndimension() == y_pred.ndimension() or y.ndimension() + 1 == y_pred.ndimension()):
raise ValueError(
"y must have shape of (batch_size, ...) and y_pred must have "
"shape of (batch_size, num_categories, ...) or (batch_size, ...), "
f"but given {y.shape} vs {y_pred.shape}."
)
y_shape = y.shape
y_pred_shape: Tuple[int, ...] = y_pred.shape
if y.ndimension() + 1 == y_pred.ndimension():
y_pred_shape = (y_pred_shape[0],) + y_pred_shape[2:]
if not (y_shape == y_pred_shape):
raise ValueError("y and y_pred must have compatible shapes.")
if self._is_multilabel and not (y.shape == y_pred.shape and y.ndimension() > 1 and y.shape[1] > 1):
raise ValueError(
"y and y_pred must have same shape of (batch_size, num_categories, ...) and num_categories > 1."
)
def _check_binary_multilabel_cases(self, output: Sequence[torch.Tensor]) -> None:
y_pred, y = output
if not torch.equal(y, y**2):
raise ValueError("For binary cases, y must be comprised of 0's and 1's.")
if not torch.equal(y_pred, y_pred**2):
raise ValueError("For binary cases, y_pred must be comprised of 0's and 1's.")
def _check_type(self, output: Sequence[torch.Tensor]) -> None:
y_pred, y = output
if y.ndimension() + 1 == y_pred.ndimension():
num_classes = y_pred.shape[1]
if num_classes == 1:
update_type = "binary"
self._check_binary_multilabel_cases((y_pred, y))
else:
update_type = "multiclass"
elif y.ndimension() == y_pred.ndimension():
self._check_binary_multilabel_cases((y_pred, y))
if self._is_multilabel:
update_type = "multilabel"
num_classes = y_pred.shape[1]
else:
update_type = "binary"
num_classes = 1
else:
raise RuntimeError(
f"Invalid shapes of y (shape={y.shape}) and y_pred (shape={y_pred.shape}), check documentation."
" for expected shapes of y and y_pred."
)
if self._type is None:
self._type = update_type
self._num_classes = num_classes
else:
if self._type != update_type:
raise RuntimeError(f"Input data type has changed from {self._type} to {update_type}.")
if self._num_classes != num_classes:
raise ValueError(f"Input data number of classes has changed from {self._num_classes} to {num_classes}")
class Accuracy(_BaseClassification):
r"""Calculates the accuracy for binary, multiclass and multilabel data.
.. math:: \text{Accuracy} = \frac{ TP + TN }{ TP + TN + FP + FN }
where :math:`\text{TP}` is true positives, :math:`\text{TN}` is true negatives,
:math:`\text{FP}` is false positives and :math:`\text{FN}` is false negatives.
- ``update`` must receive output of the form ``(y_pred, y)``.
- `y_pred` must be in the following shape (batch_size, num_categories, ...) or (batch_size, ...).
- `y` must be in the following shape (batch_size, ...).
- `y` and `y_pred` must be in the following shape of (batch_size, num_categories, ...) and
num_categories must be greater than 1 for multilabel cases.
Args:
output_transform: a callable that is used to transform the
:class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into the
form expected by the metric. This can be useful if, for example, you have a multi-output model and
you want to compute the metric with respect to one of the outputs.
is_multilabel: flag to use in multilabel case. By default, False.
device: specifies which device updates are accumulated on. Setting the metric's
device to be the same as your ``update`` arguments ensures the ``update`` method is non-blocking. By
default, CPU.
Examples:
For more information on how metric works with :class:`~ignite.engine.engine.Engine`, visit :ref:`attach-engine`.
.. include:: defaults.rst
:start-after: :orphan:
Binary case
.. testcode:: 1
metric = Accuracy()
metric.attach(default_evaluator, "accuracy")
y_true = torch.tensor([1, 0, 1, 1, 0, 1])
y_pred = torch.tensor([1, 0, 1, 0, 1, 1])
state = default_evaluator.run([[y_pred, y_true]])
print(state.metrics["accuracy"])
.. testoutput:: 1
0.6666...
Multiclass case
.. testcode:: 2
metric = Accuracy()
metric.attach(default_evaluator, "accuracy")
y_true = torch.tensor([2, 0, 2, 1, 0, 1])
y_pred = torch.tensor([
[0.0266, 0.1719, 0.3055],
[0.6886, 0.3978, 0.8176],
[0.9230, 0.0197, 0.8395],
[0.1785, 0.2670, 0.6084],
[0.8448, 0.7177, 0.7288],
[0.7748, 0.9542, 0.8573],
])
state = default_evaluator.run([[y_pred, y_true]])
print(state.metrics["accuracy"])
.. testoutput:: 2
0.5
Multilabel case
.. testcode:: 3
metric = Accuracy(is_multilabel=True)
metric.attach(default_evaluator, "accuracy")
y_true = torch.tensor([
[0, 0, 1, 0, 1],
[1, 0, 1, 0, 0],
[0, 0, 0, 0, 1],
[1, 0, 0, 0, 1],
[0, 1, 1, 0, 1],
])
y_pred = torch.tensor([
[1, 1, 0, 0, 0],
[1, 0, 1, 0, 0],
[1, 0, 0, 0, 0],
[1, 0, 1, 1, 1],
[1, 1, 0, 0, 1],
])
state = default_evaluator.run([[y_pred, y_true]])
print(state.metrics["accuracy"])
.. testoutput:: 3
0.2
In binary and multilabel cases, the elements of `y` and `y_pred` should have 0 or 1 values. Thresholding of
predictions can be done as below:
.. testcode:: 4
def thresholded_output_transform(output):
y_pred, y = output
y_pred = torch.round(y_pred)
return y_pred, y
metric = Accuracy(output_transform=thresholded_output_transform)
metric.attach(default_evaluator, "accuracy")
y_true = torch.tensor([1, 0, 1, 1, 0, 1])
y_pred = torch.tensor([0.6, 0.2, 0.9, 0.4, 0.7, 0.65])
state = default_evaluator.run([[y_pred, y_true]])
print(state.metrics["accuracy"])
.. testoutput:: 4
0.6666...
"""
_state_dict_all_req_keys = ("_num_correct", "_num_examples")
def __init__(
self,
output_transform: Callable = lambda x: x,
is_multilabel: bool = False,
device: Union[str, torch.device] = torch.device("cpu"),
):
super(Accuracy, self).__init__(output_transform=output_transform, is_multilabel=is_multilabel, device=device)
@reinit__is_reduced
def reset(self) -> None:
self._num_correct = torch.tensor(0, device=self._device)
self._num_examples = 0
super(Accuracy, self).reset()
@reinit__is_reduced
def update(self, output: Sequence[torch.Tensor]) -> None:
self._check_shape(output)
self._check_type(output)
y_pred, y = output[0].detach(), output[1].detach()
if self._type == "binary":
correct = torch.eq(y_pred.view(-1).to(y), y.view(-1))
elif self._type == "multiclass":
indices = torch.argmax(y_pred, dim=1)
correct = torch.eq(indices, y).view(-1)
elif self._type == "multilabel":
# if y, y_pred shape is (N, C, ...) -> (N x ..., C)
num_classes = y_pred.size(1)
last_dim = y_pred.ndimension()
y_pred = torch.transpose(y_pred, 1, last_dim - 1).reshape(-1, num_classes)
y = torch.transpose(y, 1, last_dim - 1).reshape(-1, num_classes)
correct = torch.all(y == y_pred.type_as(y), dim=-1)
self._num_correct += torch.sum(correct).to(self._device)
self._num_examples += correct.shape[0]
@sync_all_reduce("_num_examples", "_num_correct")
def compute(self) -> float:
if self._num_examples == 0:
raise NotComputableError("Accuracy must have at least one example before it can be computed.")
return self._num_correct.item() / self._num_examples
|
from typing import Callable, Sequence, Union
import torch
from torch.nn.functional import pairwise_distance
from ignite.exceptions import NotComputableError
from ignite.metrics.metric import Metric, reinit__is_reduced, sync_all_reduce
__all__ = ["MeanPairwiseDistance"]
class MeanPairwiseDistance(Metric):
"""Calculates the mean :class:`~torch.nn.PairwiseDistance`.
Average of pairwise distances computed on provided batches.
- ``update`` must receive output of the form ``(y_pred, y)``.
Args:
p: the norm degree. Default: 2
eps: Small value to avoid division by zero. Default: 1e-6
output_transform: a callable that is used to transform the
:class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into the
form expected by the metric. This can be useful if, for example, you have a multi-output model and
you want to compute the metric with respect to one of the outputs.
By default, metrics require the output as ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y}``.
device: specifies which device updates are accumulated on. Setting the
metric's device to be the same as your ``update`` arguments ensures the ``update`` method is
non-blocking. By default, CPU.
Examples:
To use with ``Engine`` and ``process_function``, simply attach the metric instance to the engine.
The output of the engine's ``process_function`` needs to be in the format of
``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y, ...}``. If not, ``output_tranform`` can be added
to the metric to transform the output into the form expected by the metric.
``y_pred`` and ``y`` should have the same shape.
For more information on how metric works with :class:`~ignite.engine.engine.Engine`, visit :ref:`attach-engine`.
.. include:: defaults.rst
:start-after: :orphan:
.. testcode::
metric = MeanPairwiseDistance(p=4)
metric.attach(default_evaluator, 'mpd')
preds = torch.tensor([
[1, 2, 4, 1],
[2, 3, 1, 5],
[1, 3, 5, 1],
[1, 5, 1 ,11]
])
target = preds * 0.75
state = default_evaluator.run([[preds, target]])
print(state.metrics['mpd'])
.. testoutput::
1.5955...
"""
_state_dict_all_req_keys = ("_sum_of_distances", "_num_examples")
def __init__(
self,
p: int = 2,
eps: float = 1e-6,
output_transform: Callable = lambda x: x,
device: Union[str, torch.device] = torch.device("cpu"),
) -> None:
super(MeanPairwiseDistance, self).__init__(output_transform, device=device)
self._p = p
self._eps = eps
@reinit__is_reduced
def reset(self) -> None:
self._sum_of_distances = torch.tensor(0.0, device=self._device)
self._num_examples = 0
@reinit__is_reduced
def update(self, output: Sequence[torch.Tensor]) -> None:
y_pred, y = output[0].detach(), output[1].detach()
distances = pairwise_distance(y_pred, y, p=self._p, eps=self._eps)
self._sum_of_distances += torch.sum(distances).to(self._device)
self._num_examples += y.shape[0]
@sync_all_reduce("_sum_of_distances", "_num_examples")
def compute(self) -> Union[float, torch.Tensor]:
if self._num_examples == 0:
raise NotComputableError("MeanAbsoluteError must have at least one example before it can be computed.")
return self._sum_of_distances.item() / self._num_examples
|
import numbers
from typing import Callable, Optional, Sequence, Tuple, Union
import torch
from ignite.exceptions import NotComputableError
from ignite.metrics.metric import Metric, reinit__is_reduced, sync_all_reduce
from ignite.metrics.metrics_lambda import MetricsLambda
__all__ = ["ConfusionMatrix", "mIoU", "IoU", "DiceCoefficient", "cmAccuracy", "cmPrecision", "cmRecall", "JaccardIndex"]
class ConfusionMatrix(Metric):
"""Calculates confusion matrix for multi-class data.
- ``update`` must receive output of the form ``(y_pred, y)``.
- `y_pred` must contain logits and has the following shape (batch_size, num_classes, ...).
If you are doing binary classification, see Note for an example on how to get this.
- `y` should have the following shape (batch_size, ...) and contains ground-truth class indices
with or without the background class. During the computation, argmax of `y_pred` is taken to determine
predicted classes.
Args:
num_classes: Number of classes, should be > 1. See notes for more details.
average: confusion matrix values averaging schema: None, "samples", "recall", "precision".
Default is None. If `average="samples"` then confusion matrix values are normalized by the number of seen
samples. If `average="recall"` then confusion matrix values are normalized such that diagonal values
represent class recalls. If `average="precision"` then confusion matrix values are normalized such that
diagonal values represent class precisions.
output_transform: a callable that is used to transform the
:class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into the
form expected by the metric. This can be useful if, for example, you have a multi-output model and
you want to compute the metric with respect to one of the outputs.
device: specifies which device updates are accumulated on. Setting the metric's
device to be the same as your ``update`` arguments ensures the ``update`` method is non-blocking. By
default, CPU.
Note:
The confusion matrix is formatted such that columns are predictions and rows are targets.
For example, if you were to plot the matrix, you could correctly assign to the horizontal axis
the label "predicted values" and to the vertical axis the label "actual values".
Note:
In case of the targets `y` in `(batch_size, ...)` format, target indices between 0 and `num_classes` only
contribute to the confusion matrix and others are neglected. For example, if `num_classes=20` and target index
equal 255 is encountered, then it is filtered out.
Examples:
For more information on how metric works with :class:`~ignite.engine.engine.Engine`, visit :ref:`attach-engine`.
.. include:: defaults.rst
:start-after: :orphan:
.. testcode:: 1
metric = ConfusionMatrix(num_classes=3)
metric.attach(default_evaluator, 'cm')
y_true = torch.tensor([0, 1, 0, 1, 2])
y_pred = torch.tensor([
[0.0, 1.0, 0.0],
[0.0, 1.0, 0.0],
[1.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 1.0, 0.0],
])
state = default_evaluator.run([[y_pred, y_true]])
print(state.metrics['cm'])
.. testoutput:: 1
tensor([[1, 1, 0],
[0, 2, 0],
[0, 1, 0]])
If you are doing binary classification with a single output unit, you may have to transform your network output,
so that you have one value for each class. E.g. you can transform your network output into a one-hot vector
with:
.. testcode:: 2
def binary_one_hot_output_transform(output):
y_pred, y = output
y_pred = torch.sigmoid(y_pred).round().long()
y_pred = ignite.utils.to_onehot(y_pred, 2)
y = y.long()
return y_pred, y
metric = ConfusionMatrix(num_classes=2, output_transform=binary_one_hot_output_transform)
metric.attach(default_evaluator, 'cm')
y_true = torch.tensor([0, 1, 0, 1, 0])
y_pred = torch.tensor([0, 0, 1, 1, 0])
state = default_evaluator.run([[y_pred, y_true]])
print(state.metrics['cm'])
.. testoutput:: 2
tensor([[2, 1],
[1, 1]])
"""
_state_dict_all_req_keys = ("confusion_matrix", "_num_examples")
def __init__(
self,
num_classes: int,
average: Optional[str] = None,
output_transform: Callable = lambda x: x,
device: Union[str, torch.device] = torch.device("cpu"),
):
if average is not None and average not in ("samples", "recall", "precision"):
raise ValueError("Argument average can None or one of 'samples', 'recall', 'precision'")
if num_classes <= 1:
raise ValueError("Argument num_classes needs to be > 1")
self.num_classes = num_classes
self._num_examples = 0
self.average = average
super(ConfusionMatrix, self).__init__(output_transform=output_transform, device=device)
@reinit__is_reduced
def reset(self) -> None:
self.confusion_matrix = torch.zeros(self.num_classes, self.num_classes, dtype=torch.int64, device=self._device)
self._num_examples = 0
def _check_shape(self, output: Sequence[torch.Tensor]) -> None:
y_pred, y = output[0].detach(), output[1].detach()
if y_pred.ndimension() < 2:
raise ValueError(
f"y_pred must have shape (batch_size, num_classes (currently set to {self.num_classes}), ...), "
f"but given {y_pred.shape}"
)
if y_pred.shape[1] != self.num_classes:
raise ValueError(f"y_pred does not have correct number of classes: {y_pred.shape[1]} vs {self.num_classes}")
if not (y.ndimension() + 1 == y_pred.ndimension()):
raise ValueError(
f"y_pred must have shape (batch_size, num_classes (currently set to {self.num_classes}), ...) "
"and y must have shape of (batch_size, ...), "
f"but given {y.shape} vs {y_pred.shape}."
)
y_shape = y.shape
y_pred_shape: Tuple[int, ...] = y_pred.shape
if y.ndimension() + 1 == y_pred.ndimension():
y_pred_shape = (y_pred_shape[0],) + y_pred_shape[2:]
if y_shape != y_pred_shape:
raise ValueError("y and y_pred must have compatible shapes.")
@reinit__is_reduced
def update(self, output: Sequence[torch.Tensor]) -> None:
self._check_shape(output)
y_pred, y = output[0].detach(), output[1].detach()
self._num_examples += y_pred.shape[0]
# target is (batch_size, ...)
y_pred = torch.argmax(y_pred, dim=1).flatten()
y = y.flatten()
target_mask = (y >= 0) & (y < self.num_classes)
y = y[target_mask]
y_pred = y_pred[target_mask]
indices = self.num_classes * y + y_pred
m = torch.bincount(indices, minlength=self.num_classes**2).reshape(self.num_classes, self.num_classes)
self.confusion_matrix += m.to(self.confusion_matrix)
@sync_all_reduce("confusion_matrix", "_num_examples")
def compute(self) -> torch.Tensor:
if self._num_examples == 0:
raise NotComputableError("Confusion matrix must have at least one example before it can be computed.")
if self.average:
self.confusion_matrix = self.confusion_matrix.float()
if self.average == "samples":
return self.confusion_matrix / self._num_examples
else:
return self.normalize(self.confusion_matrix, self.average)
return self.confusion_matrix
@staticmethod
def normalize(matrix: torch.Tensor, average: str) -> torch.Tensor:
"""Normalize given `matrix` with given `average`."""
if average == "recall":
return matrix / (matrix.sum(dim=1).unsqueeze(1) + 1e-15)
elif average == "precision":
return matrix / (matrix.sum(dim=0) + 1e-15)
else:
raise ValueError("Argument average should be one of 'samples', 'recall', 'precision'")
def IoU(cm: ConfusionMatrix, ignore_index: Optional[int] = None) -> MetricsLambda:
r"""Calculates Intersection over Union using :class:`~ignite.metrics.confusion_matrix.ConfusionMatrix` metric.
.. math:: \text{J}(A, B) = \frac{ \lvert A \cap B \rvert }{ \lvert A \cup B \rvert }
Args:
cm: instance of confusion matrix metric
ignore_index: index to ignore, e.g. background index
Returns:
MetricsLambda
Examples:
For more information on how metric works with :class:`~ignite.engine.engine.Engine`, visit :ref:`attach-engine`.
.. include:: defaults.rst
:start-after: :orphan:
.. testcode::
cm = ConfusionMatrix(num_classes=3)
metric = IoU(cm)
metric.attach(default_evaluator, 'iou')
y_true = torch.tensor([0, 1, 0, 1, 2])
y_pred = torch.tensor([
[0.0, 1.0, 0.0],
[0.0, 1.0, 0.0],
[1.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 1.0, 0.0],
])
state = default_evaluator.run([[y_pred, y_true]])
print(state.metrics['iou'])
.. testoutput::
tensor([0.5000, 0.5000, 0.0000], dtype=torch.float64)
"""
if not isinstance(cm, ConfusionMatrix):
raise TypeError(f"Argument cm should be instance of ConfusionMatrix, but given {type(cm)}")
if not (cm.average in (None, "samples")):
raise ValueError("ConfusionMatrix should have average attribute either None or 'samples'")
if ignore_index is not None:
if not (isinstance(ignore_index, numbers.Integral) and 0 <= ignore_index < cm.num_classes):
raise ValueError(
f"ignore_index should be integer and in the range of [0, {cm.num_classes}), but given {ignore_index}"
)
# Increase floating point precision and pass to CPU
cm = cm.to(torch.double)
iou: MetricsLambda = cm.diag() / (cm.sum(dim=1) + cm.sum(dim=0) - cm.diag() + 1e-15)
if ignore_index is not None:
ignore_idx: int = ignore_index # used due to typing issues with mympy
def ignore_index_fn(iou_vector: torch.Tensor) -> torch.Tensor:
if ignore_idx >= len(iou_vector):
raise ValueError(f"ignore_index {ignore_idx} is larger than the length of IoU vector {len(iou_vector)}")
indices = list(range(len(iou_vector)))
indices.remove(ignore_idx)
return iou_vector[indices]
return MetricsLambda(ignore_index_fn, iou)
else:
return iou
def mIoU(cm: ConfusionMatrix, ignore_index: Optional[int] = None) -> MetricsLambda:
"""Calculates mean Intersection over Union using :class:`~ignite.metrics.confusion_matrix.ConfusionMatrix` metric.
Args:
cm: instance of confusion matrix metric
ignore_index: index to ignore, e.g. background index
Returns:
MetricsLambda
Examples:
For more information on how metric works with :class:`~ignite.engine.engine.Engine`, visit :ref:`attach-engine`.
.. include:: defaults.rst
:start-after: :orphan:
.. testcode::
cm = ConfusionMatrix(num_classes=3)
metric = mIoU(cm, ignore_index=0)
metric.attach(default_evaluator, 'miou')
y_true = torch.tensor([0, 1, 0, 1, 2])
y_pred = torch.tensor([
[0.0, 1.0, 0.0],
[0.0, 1.0, 0.0],
[1.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 1.0, 0.0],
])
state = default_evaluator.run([[y_pred, y_true]])
print(state.metrics['miou'])
.. testoutput::
0.24999...
"""
iou: MetricsLambda = IoU(cm=cm, ignore_index=ignore_index).mean()
return iou
def cmAccuracy(cm: ConfusionMatrix) -> MetricsLambda:
"""Calculates accuracy using :class:`~ignite.metrics.metric.ConfusionMatrix` metric.
Args:
cm: instance of confusion matrix metric
Returns:
MetricsLambda
"""
# Increase floating point precision and pass to CPU
cm = cm.to(torch.double)
accuracy: MetricsLambda = cm.diag().sum() / (cm.sum() + 1e-15)
return accuracy
def cmPrecision(cm: ConfusionMatrix, average: bool = True) -> MetricsLambda:
"""Calculates precision using :class:`~ignite.metrics.metric.ConfusionMatrix` metric.
Args:
cm: instance of confusion matrix metric
average: if True metric value is averaged over all classes
Returns:
MetricsLambda
"""
# Increase floating point precision and pass to CPU
cm = cm.to(torch.double)
precision: MetricsLambda = cm.diag() / (cm.sum(dim=0) + 1e-15)
if average:
mean: MetricsLambda = precision.mean()
return mean
return precision
def cmRecall(cm: ConfusionMatrix, average: bool = True) -> MetricsLambda:
"""
Calculates recall using :class:`~ignite.metrics.confusion_matrix.ConfusionMatrix` metric.
Args:
cm: instance of confusion matrix metric
average: if True metric value is averaged over all classes
Returns:
MetricsLambda
"""
# Increase floating point precision and pass to CPU
cm = cm.to(torch.double)
recall: MetricsLambda = cm.diag() / (cm.sum(dim=1) + 1e-15)
if average:
mean: MetricsLambda = recall.mean()
return mean
return recall
def DiceCoefficient(cm: ConfusionMatrix, ignore_index: Optional[int] = None) -> MetricsLambda:
"""Calculates Dice Coefficient for a given :class:`~ignite.metrics.confusion_matrix.ConfusionMatrix` metric.
Args:
cm: instance of confusion matrix metric
ignore_index: index to ignore, e.g. background index
Examples:
For more information on how metric works with :class:`~ignite.engine.engine.Engine`, visit :ref:`attach-engine`.
.. include:: defaults.rst
:start-after: :orphan:
.. testcode::
cm = ConfusionMatrix(num_classes=3)
metric = DiceCoefficient(cm, ignore_index=0)
metric.attach(default_evaluator, 'dice')
y_true = torch.tensor([0, 1, 0, 1, 2])
y_pred = torch.tensor([
[0.0, 1.0, 0.0],
[0.0, 1.0, 0.0],
[1.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 1.0, 0.0],
])
state = default_evaluator.run([[y_pred, y_true]])
print(state.metrics['dice'])
.. testoutput::
tensor([0.6667, 0.0000], dtype=torch.float64)
"""
if not isinstance(cm, ConfusionMatrix):
raise TypeError(f"Argument cm should be instance of ConfusionMatrix, but given {type(cm)}")
if ignore_index is not None:
if not (isinstance(ignore_index, numbers.Integral) and 0 <= ignore_index < cm.num_classes):
raise ValueError(
f"ignore_index should be integer and in the range of [0, {cm.num_classes}), but given {ignore_index}"
)
# Increase floating point precision and pass to CPU
cm = cm.to(torch.double)
dice: MetricsLambda = 2.0 * cm.diag() / (cm.sum(dim=1) + cm.sum(dim=0) + 1e-15)
if ignore_index is not None:
ignore_idx: int = ignore_index # used due to typing issues with mympy
def ignore_index_fn(dice_vector: torch.Tensor) -> torch.Tensor:
if ignore_idx >= len(dice_vector):
raise ValueError(
f"ignore_index {ignore_idx} is larger than the length of Dice vector {len(dice_vector)}"
)
indices = list(range(len(dice_vector)))
indices.remove(ignore_idx)
return dice_vector[indices]
return MetricsLambda(ignore_index_fn, dice)
else:
return dice
def JaccardIndex(cm: ConfusionMatrix, ignore_index: Optional[int] = None) -> MetricsLambda:
r"""Calculates the Jaccard Index using :class:`~ignite.metrics.confusion_matrix.ConfusionMatrix` metric.
Implementation is based on :meth:`~ignite.metrics.IoU`.
.. math:: \text{J}(A, B) = \frac{ \lvert A \cap B \rvert }{ \lvert A \cup B \rvert }
Args:
cm: instance of confusion matrix metric
ignore_index: index to ignore, e.g. background index
Returns:
MetricsLambda
Examples:
For more information on how metric works with :class:`~ignite.engine.engine.Engine`, visit :ref:`attach-engine`.
.. include:: defaults.rst
:start-after: :orphan:
.. testcode::
cm = ConfusionMatrix(num_classes=3)
metric = JaccardIndex(cm, ignore_index=0)
metric.attach(default_evaluator, 'jac')
y_true = torch.tensor([0, 1, 0, 1, 2])
y_pred = torch.tensor([
[0.0, 1.0, 0.0],
[0.0, 1.0, 0.0],
[1.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 1.0, 0.0],
])
state = default_evaluator.run([[y_pred, y_true]])
print(state.metrics['jac'])
.. testoutput::
tensor([0.5000, 0.0000], dtype=torch.float64)
"""
return IoU(cm, ignore_index)
|
import json
from typing import Callable, Collection, Dict, List, Optional, Union
import torch
from ignite.metrics.fbeta import Fbeta
from ignite.metrics.metrics_lambda import MetricsLambda
from ignite.metrics.precision import Precision
from ignite.metrics.recall import Recall
__all__ = ["ClassificationReport"]
def ClassificationReport(
beta: int = 1,
output_dict: bool = False,
output_transform: Callable = lambda x: x,
device: Union[str, torch.device] = torch.device("cpu"),
is_multilabel: bool = False,
labels: Optional[List[str]] = None,
) -> MetricsLambda:
r"""Build a text report showing the main classification metrics. The report resembles in functionality to
`scikit-learn classification_report
<https://scikit-learn.org/stable/modules/generated/sklearn.metrics.classification_report.html#sklearn.metrics.classification_report>`_
The underlying implementation doesn't use the sklearn function.
Args:
beta: weight of precision in harmonic mean
output_dict: If True, return output as dict, otherwise return a str
output_transform: a callable that is used to transform the
:class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into the
form expected by the metric. This can be useful if, for example, you have a multi-output model and
you want to compute the metric with respect to one of the outputs.
is_multilabel: If True, the tensors are assumed to be multilabel.
device: optional device specification for internal storage.
labels: Optional list of label indices to include in the report
Examples:
For more information on how metric works with :class:`~ignite.engine.engine.Engine`, visit :ref:`attach-engine`.
.. include:: defaults.rst
:start-after: :orphan:
Multiclass case
.. testcode:: 1
metric = ClassificationReport(output_dict=True)
metric.attach(default_evaluator, "cr")
y_true = torch.tensor([2, 0, 2, 1, 0, 1])
y_pred = torch.tensor([
[0.0266, 0.1719, 0.3055],
[0.6886, 0.3978, 0.8176],
[0.9230, 0.0197, 0.8395],
[0.1785, 0.2670, 0.6084],
[0.8448, 0.7177, 0.7288],
[0.7748, 0.9542, 0.8573],
])
state = default_evaluator.run([[y_pred, y_true]])
print(state.metrics["cr"].keys())
print(state.metrics["cr"]["0"])
print(state.metrics["cr"]["1"])
print(state.metrics["cr"]["2"])
print(state.metrics["cr"]["macro avg"])
.. testoutput:: 1
dict_keys(['0', '1', '2', 'macro avg'])
{'precision': 0.5, 'recall': 0.5, 'f1-score': 0.4999...}
{'precision': 1.0, 'recall': 0.5, 'f1-score': 0.6666...}
{'precision': 0.3333..., 'recall': 0.5, 'f1-score': 0.3999...}
{'precision': 0.6111..., 'recall': 0.5, 'f1-score': 0.5222...}
Multilabel case, the shapes must be (batch_size, num_categories, ...)
.. testcode:: 2
metric = ClassificationReport(output_dict=True, is_multilabel=True)
metric.attach(default_evaluator, "cr")
y_true = torch.tensor([
[0, 0, 1],
[0, 0, 0],
[0, 0, 0],
[1, 0, 0],
[0, 1, 1],
])
y_pred = torch.tensor([
[1, 1, 0],
[1, 0, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
])
state = default_evaluator.run([[y_pred, y_true]])
print(state.metrics["cr"].keys())
print(state.metrics["cr"]["0"])
print(state.metrics["cr"]["1"])
print(state.metrics["cr"]["2"])
print(state.metrics["cr"]["macro avg"])
.. testoutput:: 2
dict_keys(['0', '1', '2', 'macro avg'])
{'precision': 0.2, 'recall': 1.0, 'f1-score': 0.3333...}
{'precision': 0.5, 'recall': 1.0, 'f1-score': 0.6666...}
{'precision': 0.0, 'recall': 0.0, 'f1-score': 0.0}
{'precision': 0.2333..., 'recall': 0.6666..., 'f1-score': 0.3333...}
"""
# setup all the underlying metrics
precision = Precision(average=False, is_multilabel=is_multilabel, output_transform=output_transform, device=device)
recall = Recall(average=False, is_multilabel=is_multilabel, output_transform=output_transform, device=device)
fbeta = Fbeta(beta, average=False, precision=precision, recall=recall)
averaged_precision = precision.mean()
averaged_recall = recall.mean()
averaged_fbeta = fbeta.mean()
def _wrapper(
re: torch.Tensor, pr: torch.Tensor, f: torch.Tensor, a_re: torch.Tensor, a_pr: torch.Tensor, a_f: torch.Tensor
) -> Union[Collection[str], Dict]:
if pr.shape != re.shape:
raise ValueError(
"Internal error: Precision and Recall have mismatched shapes: "
f"{pr.shape} vs {re.shape}. Please, open an issue "
"with a reference on this error. Thank you!"
)
dict_obj = {}
for idx, p_label in enumerate(pr):
dict_obj[_get_label_for_class(idx)] = {
"precision": p_label.item(),
"recall": re[idx].item(),
"f{0}-score".format(beta): f[idx].item(),
}
dict_obj["macro avg"] = {
"precision": a_pr.item(),
"recall": a_re.item(),
"f{0}-score".format(beta): a_f.item(),
}
return dict_obj if output_dict else json.dumps(dict_obj)
# helper method to get a label for a given class
def _get_label_for_class(idx: int) -> str:
return labels[idx] if labels else str(idx)
return MetricsLambda(_wrapper, recall, precision, fbeta, averaged_recall, averaged_precision, averaged_fbeta)
|
from ignite.metrics.gan.fid import FID
from ignite.metrics.gan.inception_score import InceptionScore
__all__ = [
"InceptionScore",
"FID",
]
|
from typing import Callable, Optional, Union
import torch
from packaging.version import Version
from ignite.metrics.metric import Metric
class InceptionModel(torch.nn.Module):
r"""Inception Model pre-trained on the ImageNet Dataset.
Args:
return_features: set it to `True` if you want the model to return features from the last pooling
layer instead of prediction probabilities.
device: specifies which device updates are accumulated on. Setting the
metric's device to be the same as your ``update`` arguments ensures the ``update`` method is
non-blocking. By default, CPU.
"""
def __init__(self, return_features: bool, device: Union[str, torch.device] = "cpu") -> None:
try:
import torchvision
from torchvision import models
except ImportError:
raise ModuleNotFoundError("This module requires torchvision to be installed.")
super(InceptionModel, self).__init__()
self._device = device
if Version(torchvision.__version__) < Version("0.13.0"):
model_kwargs = {"pretrained": True}
else:
model_kwargs = {"weights": models.Inception_V3_Weights.DEFAULT}
self.model = models.inception_v3(**model_kwargs).to(self._device)
if return_features:
self.model.fc = torch.nn.Identity()
else:
self.model.fc = torch.nn.Sequential(self.model.fc, torch.nn.Softmax(dim=1))
self.model.eval()
@torch.no_grad()
def forward(self, data: torch.Tensor) -> torch.Tensor:
if data.dim() != 4:
raise ValueError(f"Inputs should be a tensor of dim 4, got {data.dim()}")
if data.shape[1] != 3:
raise ValueError(f"Inputs should be a tensor with 3 channels, got {data.shape}")
if data.device != torch.device(self._device):
data = data.to(self._device)
return self.model(data)
class _BaseInceptionMetric(Metric):
def __init__(
self,
num_features: Optional[int],
feature_extractor: Optional[torch.nn.Module],
output_transform: Callable = lambda x: x,
device: Union[str, torch.device] = torch.device("cpu"),
) -> None:
if num_features is None:
raise ValueError("Argument num_features must be provided, if feature_extractor is specified.")
if feature_extractor is None:
feature_extractor = torch.nn.Identity()
if num_features <= 0:
raise ValueError(f"Argument num_features must be greater to zero, got: {num_features}")
if not isinstance(feature_extractor, torch.nn.Module):
raise TypeError(
f"Argument feature_extractor must be of type torch.nn.Module, got {type(self._feature_extractor)}"
)
self._num_features = num_features
self._feature_extractor = feature_extractor.to(device)
super(_BaseInceptionMetric, self).__init__(output_transform=output_transform, device=device)
def _check_feature_shapes(self, samples: torch.Tensor) -> None:
if samples.dim() != 2:
raise ValueError(f"feature_extractor output must be a tensor of dim 2, got: {samples.dim()}")
if samples.shape[0] == 0:
raise ValueError(f"Batch size should be greater than one, got: {samples.shape[0]}")
if samples.shape[1] != self._num_features:
raise ValueError(
f"num_features returned by feature_extractor should be {self._num_features}, got: {samples.shape[1]}"
)
def _extract_features(self, inputs: torch.Tensor) -> torch.Tensor:
inputs = inputs.detach()
if inputs.device != torch.device(self._device):
inputs = inputs.to(self._device)
with torch.no_grad():
outputs = self._feature_extractor(inputs).to(self._device, dtype=torch.float64)
self._check_feature_shapes(outputs)
return outputs
|
import warnings
from typing import Callable, Optional, Sequence, Union
import torch
from packaging.version import Version
from ignite.metrics.gan.utils import _BaseInceptionMetric, InceptionModel
from ignite.metrics.metric import reinit__is_reduced, sync_all_reduce
__all__ = [
"FID",
]
if Version(torch.__version__) <= Version("1.7.0"):
torch_outer = torch.ger
else:
torch_outer = torch.outer
def fid_score(
mu1: torch.Tensor, mu2: torch.Tensor, sigma1: torch.Tensor, sigma2: torch.Tensor, eps: float = 1e-6
) -> float:
try:
import numpy as np
except ImportError:
raise ModuleNotFoundError("fid_score requires numpy to be installed.")
try:
import scipy.linalg
except ImportError:
raise ModuleNotFoundError("fid_score requires scipy to be installed.")
mu1, mu2 = mu1.cpu(), mu2.cpu()
sigma1, sigma2 = sigma1.cpu(), sigma2.cpu()
diff = mu1 - mu2
# Product might be almost singular
covmean, _ = scipy.linalg.sqrtm(sigma1.mm(sigma2), disp=False)
# Numerical error might give slight imaginary component
if np.iscomplexobj(covmean):
if not np.allclose(np.diagonal(covmean).imag, 0, atol=1e-3):
m = np.max(np.abs(covmean.imag))
raise ValueError("Imaginary component {}".format(m))
covmean = covmean.real
tr_covmean = np.trace(covmean)
if not np.isfinite(covmean).all():
tr_covmean = np.sum(np.sqrt(((np.diag(sigma1) * eps) * (np.diag(sigma2) * eps)) / (eps * eps)))
return float(diff.dot(diff).item() + torch.trace(sigma1) + torch.trace(sigma2) - 2 * tr_covmean)
class FID(_BaseInceptionMetric):
r"""Calculates Frechet Inception Distance.
.. math::
\text{FID} = |\mu_{1} - \mu_{2}| + \text{Tr}(\sigma_{1} + \sigma_{2} - {2}\sqrt{\sigma_1*\sigma_2})
where :math:`\mu_1` and :math:`\sigma_1` refer to the mean and covariance of the train data and
:math:`\mu_2` and :math:`\sigma_2` refer to the mean and covariance of the test data.
More details can be found in `Heusel et al. 2002`__
__ https://arxiv.org/pdf/1706.08500.pdf
In addition, a faster and online computation approach can be found in `Chen et al. 2014`__
__ https://arxiv.org/pdf/2009.14075.pdf
Remark:
This implementation is inspired by `pytorch_fid` package which can be found `here`__
__ https://github.com/mseitzer/pytorch-fid
.. note::
The default Inception model requires the `torchvision` module to be installed.
FID also requires `scipy` library for matrix square root calculations.
Args:
num_features: number of features predicted by the model or the reduced feature vector of the image.
Default value is 2048.
feature_extractor: a torch Module for extracting the features from the input data.
It returns a tensor of shape (batch_size, num_features).
If neither ``num_features`` nor ``feature_extractor`` are defined, by default we use an ImageNet
pretrained Inception Model. If only ``num_features`` is defined but ``feature_extractor`` is not
defined, ``feature_extractor`` is assigned Identity Function.
Please note that the model will be implicitly converted to device mentioned in the ``device``
argument.
output_transform: a callable that is used to transform the
:class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into the
form expected by the metric. This can be useful if, for example, you have a multi-output model and
you want to compute the metric with respect to one of the outputs.
By default, metrics require the output as ``(y_pred, y)`` or ``{'y_pred': y_pred, 'y': y}``.
device: specifies which device updates are accumulated on. Setting the
metric's device to be the same as your ``update`` arguments ensures the ``update`` method is
non-blocking. By default, CPU.
Examples:
For more information on how metric works with :class:`~ignite.engine.engine.Engine`, visit :ref:`attach-engine`.
.. include:: defaults.rst
:start-after: :orphan:
.. testcode::
metric = FID(num_features=1, feature_extractor=default_model)
metric.attach(default_evaluator, "fid")
y_true = torch.ones(10, 4)
y_pred = torch.ones(10, 4)
state = default_evaluator.run([[y_pred, y_true]])
print(state.metrics["fid"])
.. testoutput::
0.0
.. note::
The default `torchvision` model used is InceptionV3 pretrained on ImageNet.
This can lead to differences in results with `pytorch_fid`. To find comparable results,
the following model wrapper should be used:
.. code::
import torch.nn as nn
# wrapper class as feature_extractor
class WrapperInceptionV3(nn.Module):
def __init__(self, fid_incv3):
super().__init__()
self.fid_incv3 = fid_incv3
@torch.no_grad()
def forward(self, x):
y = self.fid_incv3(x)
y = y[0]
y = y[:, :, 0, 0]
return y
# use cpu rather than cuda to get comparable results
device = "cpu"
# pytorch_fid model
dims = 2048
block_idx = InceptionV3.BLOCK_INDEX_BY_DIM[dims]
model = InceptionV3([block_idx]).to(device)
# wrapper model to pytorch_fid model
wrapper_model = WrapperInceptionV3(model)
wrapper_model.eval();
# comparable metric
pytorch_fid_metric = FID(num_features=dims, feature_extractor=wrapper_model)
Important, `pytorch_fid` results depend on the batch size if the device is `cuda`.
.. versionadded:: 0.4.6
"""
_state_dict_all_req_keys = ("_num_examples", "_train_total", "_test_total", "_train_sigma", "_test_sigma")
def __init__(
self,
num_features: Optional[int] = None,
feature_extractor: Optional[torch.nn.Module] = None,
output_transform: Callable = lambda x: x,
device: Union[str, torch.device] = torch.device("cpu"),
) -> None:
try:
import numpy as np # noqa: F401
except ImportError:
raise ModuleNotFoundError("This module requires numpy to be installed.")
try:
import scipy # noqa: F401
except ImportError:
raise ModuleNotFoundError("This module requires scipy to be installed.")
if num_features is None and feature_extractor is None:
num_features = 1000
feature_extractor = InceptionModel(return_features=False, device=device)
self._eps = 1e-6
super(FID, self).__init__(
num_features=num_features,
feature_extractor=feature_extractor,
output_transform=output_transform,
device=device,
)
@staticmethod
def _online_update(features: torch.Tensor, total: torch.Tensor, sigma: torch.Tensor) -> None:
total += features
sigma += torch_outer(features, features)
def _get_covariance(self, sigma: torch.Tensor, total: torch.Tensor) -> torch.Tensor:
r"""
Calculates covariance from mean and sum of products of variables
"""
sub_matrix = torch_outer(total, total)
sub_matrix = sub_matrix / self._num_examples
return (sigma - sub_matrix) / (self._num_examples - 1)
@reinit__is_reduced
def reset(self) -> None:
self._train_sigma = torch.zeros(
(self._num_features, self._num_features), dtype=torch.float64, device=self._device
)
self._train_total = torch.zeros(self._num_features, dtype=torch.float64, device=self._device)
self._test_sigma = torch.zeros(
(self._num_features, self._num_features), dtype=torch.float64, device=self._device
)
self._test_total = torch.zeros(self._num_features, dtype=torch.float64, device=self._device)
self._num_examples: int = 0
super(FID, self).reset()
@reinit__is_reduced
def update(self, output: Sequence[torch.Tensor]) -> None:
train, test = output
train_features = self._extract_features(train)
test_features = self._extract_features(test)
if train_features.shape[0] != test_features.shape[0] or train_features.shape[1] != test_features.shape[1]:
raise ValueError(
f"""
Number of Training Features and Testing Features should be equal ({train_features.shape} != {test_features.shape})
"""
)
# Updates the mean and covariance for the train features
for features in train_features:
self._online_update(features, self._train_total, self._train_sigma)
# Updates the mean and covariance for the test features
for features in test_features:
self._online_update(features, self._test_total, self._test_sigma)
self._num_examples += train_features.shape[0]
@sync_all_reduce("_num_examples", "_train_total", "_test_total", "_train_sigma", "_test_sigma")
def compute(self) -> float:
fid = fid_score(
mu1=self._train_total / self._num_examples,
mu2=self._test_total / self._num_examples,
sigma1=self._get_covariance(self._train_sigma, self._train_total),
sigma2=self._get_covariance(self._test_sigma, self._test_total),
eps=self._eps,
)
if torch.isnan(torch.tensor(fid)) or torch.isinf(torch.tensor(fid)):
warnings.warn("The product of covariance of train and test features is out of bounds.")
return fid
|
from typing import Callable, Optional, Union
import torch
from ignite.exceptions import NotComputableError
from ignite.metrics.gan.utils import _BaseInceptionMetric, InceptionModel
# These decorators helps with distributed settings
from ignite.metrics.metric import reinit__is_reduced, sync_all_reduce
__all__ = ["InceptionScore"]
class InceptionScore(_BaseInceptionMetric):
r"""Calculates Inception Score.
.. math::
\text{IS(G)} = \exp(\frac{1}{N}\sum_{i=1}^{N} D_{KL} (p(y|x^{(i)} \parallel \hat{p}(y))))
where :math:`p(y|x)` is the conditional probability of image being the given object and
:math:`p(y)` is the marginal probability that the given image is real, `G` refers to the
generated image and :math:`D_{KL}` refers to KL Divergence of the above mentioned probabilities.
More details can be found in `Barratt et al. 2018`__.
__ https://arxiv.org/pdf/1801.01973.pdf
Args:
num_features: number of features predicted by the model or number of classes of the model. Default
value is 1000.
feature_extractor: a torch Module for predicting the probabilities from the input data.
It returns a tensor of shape (batch_size, num_features).
If neither ``num_features`` nor ``feature_extractor`` are defined, by default we use an ImageNet
pretrained Inception Model. If only ``num_features`` is defined but ``feature_extractor`` is not
defined, ``feature_extractor`` is assigned Identity Function.
Please note that the class object will be implicitly converted to device mentioned in the
``device`` argument.
output_transform: a callable that is used to transform the
:class:`~ignite.engine.engine.Engine`'s ``process_function``'s output into the
form expected by the metric. This can be useful if, for example, you have a multi-output model and
you want to compute the metric with respect to one of the outputs.
By default, metrics require the output as ``y_pred``.
device: specifies which device updates are accumulated on. Setting the
metric's device to be the same as your ``update`` arguments ensures the ``update`` method is
non-blocking. By default, CPU.
.. note::
The default Inception model requires the `torchvision` module to be installed.
Examples:
For more information on how metric works with :class:`~ignite.engine.engine.Engine`, visit :ref:`attach-engine`.
.. include:: defaults.rst
:start-after: :orphan:
.. code-block:: python
metric = InceptionScore()
metric.attach(default_evaluator, "is")
y = torch.rand(10, 3, 299, 299)
state = default_evaluator.run([y])
print(state.metrics["is"])
.. testcode::
metric = InceptionScore(num_features=1, feature_extractor=default_model)
metric.attach(default_evaluator, "is")
y = torch.zeros(10, 4)
state = default_evaluator.run([y])
print(state.metrics["is"])
.. testoutput::
1.0
.. versionadded:: 0.4.6
"""
_state_dict_all_req_keys = ("_num_examples", "_prob_total", "_total_kl_d")
def __init__(
self,
num_features: Optional[int] = None,
feature_extractor: Optional[torch.nn.Module] = None,
output_transform: Callable = lambda x: x,
device: Union[str, torch.device] = torch.device("cpu"),
) -> None:
if num_features is None and feature_extractor is None:
num_features = 1000
feature_extractor = InceptionModel(return_features=False, device=device)
self._eps = 1e-16
super(InceptionScore, self).__init__(
num_features=num_features,
feature_extractor=feature_extractor,
output_transform=output_transform,
device=device,
)
@reinit__is_reduced
def reset(self) -> None:
self._num_examples = 0
self._prob_total = torch.zeros(self._num_features, dtype=torch.float64, device=self._device)
self._total_kl_d = torch.zeros(self._num_features, dtype=torch.float64, device=self._device)
super(InceptionScore, self).reset()
@reinit__is_reduced
def update(self, output: torch.Tensor) -> None:
probabilities = self._extract_features(output)
prob_sum = torch.sum(probabilities, 0, dtype=torch.float64)
log_prob = torch.log(probabilities + self._eps)
if log_prob.dtype != probabilities.dtype:
log_prob = log_prob.to(probabilities)
kl_sum = torch.sum(probabilities * log_prob, 0, dtype=torch.float64)
self._num_examples += probabilities.shape[0]
self._prob_total += prob_sum
self._total_kl_d += kl_sum
@sync_all_reduce("_num_examples", "_prob_total", "_total_kl_d")
def compute(self) -> float:
if self._num_examples == 0:
raise NotComputableError("InceptionScore must have at least one example before it can be computed.")
mean_probs = self._prob_total / self._num_examples
log_mean_probs = torch.log(mean_probs + self._eps)
if log_mean_probs.dtype != self._prob_total.dtype:
log_mean_probs = log_mean_probs.to(self._prob_total)
excess_entropy = self._prob_total * log_mean_probs
avg_kl_d = torch.sum(self._total_kl_d - excess_entropy) / self._num_examples
return torch.exp(avg_kl_d).item()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.