python_code
stringlengths 0
258k
|
---|
"""
Compute the benchmark score given a frozen score configuration and current benchmark data.
"""
import argparse
import json
import math
import sys
import os
import re
import yaml
import importlib
from tabulate import tabulate
from pathlib import Path
from collections import defaultdict
from .generate_score_config import generate_bench_cfg
TORCHBENCH_V0_REF_DATA = Path(__file__).parent.joinpath("configs/v0/config-v0.yaml")
def _get_model_task(model_name):
"""
Helper function which extracts the task the model belongs to
by iterating over the Model attributes.
"""
try:
module = importlib.import_module(f'torchbenchmark.models.{model_name}', package=__name__)
except:
raise ValueError(f"Unable to get task for model: {model_name}")
Model = getattr(module, 'Model')
return Model.task.value
class TorchBenchScoreV0:
def __init__(self, ref_data, spec, target):
self.spec = spec
self.target = target
if not ref_data:
ref_data = TORCHBENCH_V0_REF_DATA
self.ref_data = ref_data
self.weights = None
self.norm = None
# V0: setup weights and benchmark norms
self._setup_weights()
self._setup_benchmark_norms()
def _setup_weights(self):
"""
Calculates the static benchmark weights by iterating the spec
file and constructs a dictionary with (key, value) pair
is (task, weight_for_benchmark_per_task)
"""
# Load the spec file
with open(self.spec) as spec_file:
self.spec = yaml.full_load(spec_file)
self.weights = defaultdict(float)
category_spec = self.spec['hierarchy']['model']
domain_weight = 1.0/ len(category_spec)
for domain in category_spec:
tasks = category_spec[domain]
task_weight = 1.0 / len(tasks)
for task in tasks:
benchmarks = tasks[task]
benchmark_weight = 1.0 / len(benchmarks)
self.weights[task] = domain_weight * task_weight * benchmark_weight
def _setup_benchmark_norms(self):
"""
Helper function which gets the normalization values per benchmark
by going through the reference data file.
"""
if self.ref_data == TORCHBENCH_V0_REF_DATA:
with open(self.ref_data) as ref_file:
ref = yaml.full_load(ref_file)
self.norm = {b: ref['benchmarks'][b]['norm'] for b in ref['benchmarks']}
else:
self.norm = {b['name']: b['stats']['mean'] for b in self.ref_data['benchmarks']}
def get_score_per_config(self, data, weighted_score=False):
"""
This function iterates over found benchmark dictionary
and calculates the weight_sum and benchmark_score.
A score_db is then constructed to calculate the cumulative
score per config. Here config refers to device, mode and test
configurations the benchmark was run on.
For eg., if the benchmark was run in eval mode on a GPU in Torchscript JIT,
config = (train, cuda, jit)
This helper returns the score_db .
"""
found_benchmarks = defaultdict(lambda: defaultdict(list))
score_db = defaultdict(float)
# Construct a benchmark database by going over through the data file
# for the run and update the dictionary by task and model_name
for b in data['benchmarks']:
name, mean = b['name'], b['stats']['mean']
test, model_name, device, mode = re.match(r"test_(.*)\[(.*)\-(.*)\-(.*)\]", name).groups()
config = (test, device, mode)
task = _get_model_task(model_name)
found_benchmarks[task][model_name].append((mean, config, name))
for task, models in found_benchmarks.items():
for name, all_configs in models.items():
weight = self.weights[task] * (1.0/len(all_configs))
for mean, config, benchmark in all_configs:
benchmark_score = weight * math.log(self.norm[benchmark] / mean)
score_db[config] += benchmark_score
# Get the weights per config and calibrate it to the
# target score
if weighted_score:
for config, score in score_db.items():
score_db[config] = score * 0.125
score_db[config] = self.target * math.exp(score)
return score_db
def compute_score(self, data):
"""
This API calculates the total V0 score for all the
benchmarks that was run by reading the data (.json) file.
The weights are then calibrated to the target score.
"""
score = 0.0
score_db = self.get_score_per_config(data)
score = sum(score_db.values())
score = self.target * math.exp(score)
return score
def get_norm(self, data):
return generate_bench_cfg(self.spec, data, self.target)
|
from accelerate.utils.dataclasses import DeepSpeedPlugin
import torch
import math
import os
from pathlib import Path
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
from torch.utils.data import DataLoader
from torchbenchmark.util.e2emodel import E2EBenchmarkModel
from torchbenchmark.tasks import NLP
import evaluate
from accelerate import Accelerator
from transformers import (
AdamW,
AutoConfig,
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
default_data_collator,
get_scheduler,
)
from typing import Optional
from torchbenchmark.util.framework.transformers.text_classification.dataset import prep_dataset, preprocess_dataset, prep_labels
from torchbenchmark.util.framework.transformers.text_classification.args import parse_args, parse_torchbench_args
try:
import torch._dynamo
except ImportError:
pass
# setup environment variable
CURRENT_DIR = Path(os.path.dirname(os.path.realpath(__file__)))
class Model(E2EBenchmarkModel):
task = NLP.LANGUAGE_MODELING
DEFAULT_TRAIN_BSIZE: int = 32
DEFAULT_EVAL_BSIZE: int = 1
def __init__(self, test, batch_size=None, extra_args=[]):
super().__init__(test=test, batch_size=batch_size, extra_args=extra_args)
# TODO: currently only support 1 GPU device
self.device = "cuda"
self.device_num = 1
# Parse the extra arguments
self.tb_args = parse_torchbench_args(self.extra_args)
torch.manual_seed(1337)
torch.backends.cudnn.deterministic = False
torch.backends.cudnn.benchmark = True
# Parameters
model_name = "bert-base-cased"
max_seq_length = "128"
learning_rate = "2e-5"
num_train_epochs = "3"
max_train_steps = "100" # overrides num_train_epochs to run faster
# this benchmark runs on a single GPU
cuda_visible_devices = "0"
output_dir = os.path.join(CURRENT_DIR, ".output")
os.environ["CUDA_VISIBLE_DEVICES"] = cuda_visible_devices
in_arg = ["--model_name_or_path", model_name, "--task_name", self.tb_args.task_name,
"--max_length", max_seq_length,
"--per_device_train_batch_size", str(self.batch_size),
"--per_device_eval_batch_size", str(self.batch_size),
"--learning_rate", learning_rate,
"--num_train_epochs", num_train_epochs,
"--max_train_steps", max_train_steps,
"--output_dir", output_dir]
hf_args = parse_args(in_arg)
self.num_epochs = hf_args.num_train_epochs
# ideally we don't modify the model code directly, but attaching deepspeed
# must be done before self.prep initializes accelerator.
if self.tb_args.distributed not in ["deepspeed", "ddp", "fsdp", "none"]:
raise RuntimeError(f"Unsupported distributed scheme {self.tb_args.distributed} for model hf_t5")
if self.tb_args.distributed == "deepspeed":
zero_opt_cfg = {
"zero_optimization": {
"stage": 1,
"reduce_bucket_size": 2e8,
"overlap_comm": True,
"contiguous_gradients": False
}
}
hf_args.deepspeed_plugin = DeepSpeedPlugin()
hf_args.deepspeed_plugin.deepspeed_config.update(zero_opt_cfg)
hf_args.distributed = self.tb_args.distributed # pass in distributed config to prep as a hf_arg
# setup other members
self.prep(hf_args)
if test == "train":
self.num_examples = len(self.train_dataloader) * self.batch_size
elif test == "eval":
self.num_examples = len(self.eval_dataloader) * self.batch_size
def prep(self, hf_args):
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
if hf_args.distributed == "deepspeed":
# Note: self.tb_args.fp16 could be renamed to better clarify its meaning
assert self.tb_args.fp16=="amp", "deepspeed is only supported with bf16/amp enabled"
accelerator = Accelerator(deepspeed_plugin=hf_args.deepspeed_plugin, mixed_precision='bf16')
else:
accelerator = Accelerator(mixed_precision='fp16' if self.tb_args.fp16=='amp' else 'no')
accelerator.wait_for_everyone()
raw_datasets = prep_dataset(hf_args)
num_labels, label_list, is_regression = prep_labels(hf_args, raw_datasets)
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
config = AutoConfig.from_pretrained(hf_args.model_name_or_path, num_labels=num_labels, finetuning_task=hf_args.task_name)
tokenizer = AutoTokenizer.from_pretrained(hf_args.model_name_or_path, use_fast=not hf_args.use_slow_tokenizer)
model = AutoModelForSequenceClassification.from_pretrained(
hf_args.model_name_or_path,
from_tf=bool(".ckpt" in hf_args.model_name_or_path),
config=config,)
train_dataset, eval_dataset, self.mnli_eval_dataset = preprocess_dataset(hf_args, config, model, \
tokenizer, raw_datasets, num_labels, label_list, is_regression, accelerator)
# DataLoaders creation:
if hf_args.pad_to_max_length:
# If padding was already done ot max length, we use the default data collator that will just convert everything
# to tensors.
self.data_collator = default_data_collator
else:
# Otherwise, `DataCollatorWithPadding` will apply dynamic padding for us (by padding to the maximum length of
# the samples passed). When using mixed precision, we add `pad_to_multiple_of=8` to pad all tensors to multiple
# of 8s, which will enable the use of Tensor Cores on NVIDIA hardware with compute capability >= 7.5 (Volta).
self.data_collator = DataCollatorWithPadding(tokenizer, pad_to_multiple_of=(8 if accelerator.use_fp16 else None))
train_dataloader = DataLoader(
train_dataset, shuffle=True, collate_fn=self.data_collator, batch_size=hf_args.per_device_train_batch_size)
eval_dataloader = DataLoader(eval_dataset, collate_fn=self.data_collator, batch_size=hf_args.per_device_eval_batch_size)
# transform model for DDP and FSDP
if hf_args.distributed == "ddp":
# prepare before wrap w/ DDP (or else error)
model = accelerator.prepare(model)
local_rank = int(os.getenv("LOCAL_RANK", -1))
model = DDP(
model,
device_ids=[local_rank],
# If buffer broadcast is necessary, specific optimizations might be
# necessary to optimize performance. Disable it by default.
broadcast_buffers=False,
# Set gradient as bucket view to avoid unnecessary copies
gradient_as_bucket_view=True,
# TODO: tune bucket_cap_mb
static_graph=True,
)
elif hf_args.distributed == "fsdp":
# model needs to be prepared and wrapped w/ FSDP before optimizer is created, because FSDP flattens params
model = accelerator.prepare(model)
local_rank = int(os.getenv("LOCAL_RANK", -1))
torch.cuda.set_device(local_rank)
model = FSDP(
model,
device_id = torch.cuda.current_device()
)
# Setup metrics
# Get the metric function
if hf_args.task_name is not None:
self.metric = evaluate.load("glue", hf_args.task_name)
else:
self.metric = evaluate.load("accuracy")
# Setup class members (model and the dataloaders will be updated in _prep_optimizer_and_scheduler() below)
self.hf_args = hf_args
self.is_regression = is_regression
self.accelerator = accelerator
self.model = model
self.train_dataloader = train_dataloader
self.eval_dataloader = eval_dataloader
# Optimizer
# Split weights in two groups, one with weight decay and the other not.
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{
"params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
"weight_decay": hf_args.weight_decay,
},
{
"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)],
"weight_decay": 0.0,
},
]
self.optimizer = AdamW(optimizer_grouped_parameters, lr=hf_args.learning_rate)
self._update_everything_with_optimizer()
def _update_everything_with_optimizer(self) -> None:
# Prepare everything with our `accelerator` with deepspeed or non-distributed environment.
if self.hf_args.distributed == "deepspeed" or self.hf_args.distributed == "none":
# deepspeed will error unless all components prepared at the same time
self.model, self.train_dataloader, self.eval_dataloader, self.optimizer = self.accelerator.prepare(
self.model, self.train_dataloader, self.eval_dataloader, self.optimizer)
else:
# ddp and fsdp need model prepared before wrapping.
self.train_dataloader, self.eval_dataloader, self.optimizer = self.accelerator.prepare(
self.train_dataloader, self.eval_dataloader, self.optimizer)
# Note -> the training dataloader needs to be prepared before we grab his length below (cause its length will be
# shorter in multiprocess)
# Scheduler and math around the number of training steps.
num_update_steps_per_epoch = math.ceil(len(self.train_dataloader) / self.hf_args.gradient_accumulation_steps)
if self.hf_args.max_train_steps is None:
self.hf_args.max_train_steps = self.hf_args.num_train_epochs * num_update_steps_per_epoch
else:
self.hf_args.num_train_epochs = math.ceil(self.hf_args.max_train_steps / num_update_steps_per_epoch)
self.lr_scheduler = get_scheduler(
name=self.hf_args.lr_scheduler_type,
optimizer=self.optimizer,
num_warmup_steps=self.hf_args.num_warmup_steps,
num_training_steps=self.hf_args.max_train_steps,
)
def train(self) -> Optional[dict]:
completed_steps = 0
eval_metric = None
for _epoch in range(self.hf_args.num_train_epochs):
self.model.train()
for step, batch in enumerate(self.train_dataloader):
loss = self.run_forward(batch)
loss = loss / self.hf_args.gradient_accumulation_steps
self.run_backward(loss)
if step % self.hf_args.gradient_accumulation_steps == 0 or step == len(self.train_dataloader) - 1:
self.run_optimizer_step()
completed_steps += 1
if completed_steps >= self.hf_args.max_train_steps:
break
if self.tb_args.validate_in_train:
self.model.eval()
for step, batch in enumerate(self.eval_dataloader):
outputs = self.run_eval(batch)
predictions = outputs.logits.argmax(dim=-1) if not self.is_regression else outputs.logits.squeeze()
self.metric.add_batch(
predictions=self.accelerator.gather(predictions),
references=self.accelerator.gather(batch["labels"]),
)
eval_metric = self.metric.compute()
if self.tb_args.validate_in_train:
if self.hf_args.task_name == "mnli":
# Final evaluation on mismatched validation set
eval_dataset = self.mnli_eval_dataset
eval_dataloader = DataLoader(
eval_dataset, collate_fn=self.data_collator, batch_size=self.hf_args.per_device_eval_batch_size
)
eval_dataloader = self.accelerator.prepare(eval_dataloader)
self.model.eval()
for step, batch in enumerate(eval_dataloader):
outputs = self.run_eval(batch)
predictions = outputs.logits.argmax(dim=-1)
self.metric.add_batch(
predictions=self.accelerator.gather(predictions),
references=self.accelerator.gather(batch["labels"]),
)
eval_metric = self.metric.compute()
# store accuracy results
if self.hf_args.task_name == "cola" and self.tb_args.validate_in_train:
self.accuracy = eval_metric["matthews_correlation"]
return eval_metric
def eval(self) -> Optional[dict]:
self.model.eval()
for _step, batch in enumerate(self.eval_dataloader):
with torch.no_grad():
outputs = self.run_eval(batch)
predictions = outputs.logits.argmax(dim=-1) if not self.is_regression else outputs.logits.squeeze()
self.metric.add_batch(
predictions=self.accelerator.gather(predictions),
references=self.accelerator.gather(batch["labels"]),
)
eval_metric = self.metric.compute()
return eval_metric
def get_optimizer(self):
return self.optimizer
def set_optimizer(self, optimizer) -> None:
self.optimizer = optimizer
self._update_everything_with_optimizer()
def next_batch(self):
return next(iter(self.train_dataloader))
def run_forward(self, input):
"""
compute model forward and return loss
"""
if self.dynamo:
backend = self.opt_args.torchdynamo
return torch._dynamo.optimize(backend)(self._run_forward)(input)
else:
return self._run_forward(input)
def _run_forward(self, input):
return self.model(**input).loss
def run_backward(self, loss):
if self.dynamo:
backend = self.opt_args.torchdynamo
return torch._dynamo.optimize(backend)(self._run_backward)(loss)
else:
return self._run_backward(loss)
def _run_backward(self, loss):
self.accelerator.backward(loss)
def get_optimizer(self):
return self.optimizer
def set_optimizer(self, optimizer) -> None:
self.optimizer = optimizer
def run_optimizer_step(self):
if self.dynamo and not self.opt_args.dynamo_disable_optimizer_step:
backend = self.opt_args.torchdynamo
return torch._dynamo.optimize(backend)(self._run_optimizer_step)()
else:
return self._run_optimizer_step()
def _run_optimizer_step(self):
self.optimizer.step()
self.lr_scheduler.step()
self.optimizer.zero_grad()
def run_eval(self, input):
if self.dynamo:
backend = self.opt_args.torchdynamo
return torch._dynamo.optimize(backend)(self._run_eval)(input)
else:
return self._run_eval(input)
def _run_eval(self, input):
return self.model(**input)
|
import subprocess
import sys
def pip_install_requirements():
subprocess.check_call([sys.executable, '-m', 'pip', 'install', '-q', '-r', 'requirements.txt'])
if __name__ == '__main__':
pip_install_requirements()
|
import torch
import math
import os
from pathlib import Path
from torch.utils.data import DataLoader
from ...util.model import BenchmarkModel
from torchbenchmark.tasks import NLP
from accelerate import Accelerator
from transformers import (
AdamW,
AutoConfig,
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
default_data_collator,
get_scheduler,
)
from torchbenchmark.util.framework.transformers.text_classification.dataset import prep_dataset, preprocess_dataset, prep_labels
from torchbenchmark.util.framework.transformers.text_classification.args import parse_args
# setup environment variable
CURRENT_DIR = Path(os.path.dirname(os.path.realpath(__file__)))
OUTPUT_DIR = os.path.join(CURRENT_DIR, ".output")
torch.manual_seed(1337)
torch.backends.cudnn.deterministic = False
torch.backends.cudnn.benchmark = True
class Model(BenchmarkModel):
task = NLP.LANGUAGE_MODELING
def __init__(self, device=None, train_bs=32, task_name="cola"):
super().__init__()
self.device = device
model_name = "bert-base-cased"
max_seq_length = "128"
learning_rate = "2e-5"
num_train_epochs = "3"
# this benchmark runs on a single GPU
cuda_visible_devices = "0"
os.environ["CUDA_VISIBLE_DEVICES"] = cuda_visible_devices
output_dir = OUTPUT_DIR
in_arg = ["--model_name_or_path", model_name, "--task_name", task_name,
"--do_train", "--do_eval", "--max_seq_length", max_seq_length,
"--per_device_train_batch_size", str(train_bs),
"--learning_rate", learning_rate,
"--num_train_epochs", num_train_epochs,
"--output_dir", OUTPUT_DIR]
model_args, data_args, training_args = parse_args(in_arg)
# setup other members
self.prep(model_args, data_args, training_args)
def prep(self, model_args, data_args, training_args):
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
accelerator = Accelerator()
accelerator.wait_for_everyone()
raw_datasets = prep_dataset(data_args, training_args)
num_labels, label_list, is_regression = prep_labels(data_args, raw_datasets)
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
config = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path,
num_labels=num_labels,
finetuning_task=data_args.task_name,
# cache_dir=model_args.cache_dir,
# revision=model_args.model_revision,
# use_auth_token=True if model_args.use_auth_token else None,
)
tokenizer = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path,
# cache_dir=model_args.cache_dir,
use_fast=model_args.use_fast_tokenizer,
# revision=model_args.model_revision,
# use_auth_token=True if model_args.use_auth_token else None,
)
model = AutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path,
from_tf=bool(".ckpt" in model_args.model_name_or_path),
config=config,
# cache_dir=model_args.cache_dir,
# revision=model_args.model_revision,
# use_auth_token=True if model_args.use_auth_token else None,
)
train_dataset, eval_dataset, _predict_dataset = preprocess_dataset(data_args, training_args, config, model, \
tokenizer, raw_datasets, num_labels, label_list, is_regression)
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
data_collator = default_data_collator
elif training_args.fp16:
data_collator = DataCollatorWithPadding(tokenizer, pad_to_multiple_of=8)
else:
data_collator = None
train_dataloader = DataLoader(
train_dataset, shuffle=True, collate_fn=data_collator, batch_size=training_args.per_device_train_batch_size)
eval_dataloader = DataLoader(eval_dataset, collate_fn=data_collator, batch_size=training_args.per_device_eval_batch_size)
# Optimizer
# Split weights in two groups, one with weight decay and the other not.
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{
"params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
"weight_decay": training_args.weight_decay,
},
{
"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)],
"weight_decay": 0.0,
},
]
# Set class members
self.optimizer = AdamW(optimizer_grouped_parameters, lr=training_args.learning_rate)
self.training_args = training_args
self.is_regression = is_regression
self.model = model
self.train_dataloader = train_dataloader
self.eval_dataloader = eval_dataloader
self.accelerator = accelerator
# Will set self.lr_scheduler
self._prepare_accelerator()
# Prepare everything with our `accelerator` and set the lr_scheduler
def _prepare_accelerator(self):
self.model, self.optimizer, self.train_dataloader, self.eval_dataloader = self.accelerator.prepare(
self.model, self.optimizer, self.train_dataloader, self.eval_dataloader
)
# Note -> the training dataloader needs to be prepared before we grab its length below (since its length will be
# shorter in multiprocess)
# Scheduler and math around the number of training steps.
num_update_steps_per_epoch = math.ceil(len(self.train_dataloader) / self.training_args.gradient_accumulation_steps)
if self.training_args.max_steps is None or self.training_args.max_steps == -1:
self.training_args.max_steps = self.training_args.num_train_epochs * num_update_steps_per_epoch
else:
self.training_args.num_train_epochs = math.ceil(self.training_args.max_steps / num_update_steps_per_epoch)
self.training_args.num_train_epochs = int(self.training_args.num_train_epochs)
self.lr_scheduler = get_scheduler(
name=self.training_args.lr_scheduler_type,
optimizer=self.optimizer,
num_warmup_steps=self.training_args.warmup_steps,
num_training_steps=self.training_args.max_steps,
)
def get_module(self):
raise NotImplementedError("get_module is not supported by this model")
def get_optimizer(self):
return self.optimizer
def set_optimizer(self, optimizer) -> None:
self.optimizer = optimizer
self._prepare_accelerator()
def train(self):
if self.jit:
raise NotImplementedError("JIT is not supported by this model")
if not self.device == "cuda":
raise NotImplementedError("Only CUDA is supported by this model")
assert self.training_args.do_train, "Must train with `do_train` arg being set"
completed_steps = 0
for _epoch in range(self.training_args.num_train_epochs):
self.model.train()
for step, batch in enumerate(self.train_dataloader):
outputs = self.model(**batch)
loss = outputs.loss
loss = loss / self.training_args.gradient_accumulation_steps
self.accelerator.backward(loss)
if step % self.training_args.gradient_accumulation_steps == 0 or step == len(self.train_dataloader) - 1:
self.optimizer.step()
self.lr_scheduler.step()
self.optimizer.zero_grad()
completed_steps += 1
if completed_steps >= self.training_args.max_steps:
break
self.model.eval()
for step, batch in enumerate(self.eval_dataloader):
outputs = self.model(**batch)
predictions = outputs.logits.argmax(dim=-1) if not self.is_regression else outputs.logits.squeeze() |
from accelerate.utils.dataclasses import DeepSpeedPlugin
import functools
import torch
import numpy as np
import math
import os
from pathlib import Path
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp.wrap import transformer_auto_wrap_policy
from torch.utils.data import DataLoader
from torchbenchmark.util.e2emodel import E2EBenchmarkModel
from torchbenchmark.tasks import NLP
import evaluate
from accelerate import Accelerator
from transformers import (
CONFIG_MAPPING,
AutoConfig,
AutoModelForSeq2SeqLM,
AutoTokenizer,
DataCollatorForSeq2Seq,
default_data_collator,
get_scheduler,
MBartTokenizer,
MBartTokenizerFast
)
from transformers.models.t5.modeling_t5 import T5Block
from torchbenchmark.util.framework.transformers.translation.dataset import prep_dataset, preprocess_dataset
from torchbenchmark.util.framework.transformers.translation.args import parse_args, parse_torchbench_args, task_to_keys
try:
import torch._dynamo
except ImportError:
pass
# setup environment variable
CURRENT_DIR = Path(os.path.dirname(os.path.realpath(__file__)))
class Model(E2EBenchmarkModel):
task = NLP.TRANSLATION
DEFAULT_TRAIN_BSIZE: int = 32
DEFAULT_EVAL_BSIZE: int = 1
def __init__(self, test, batch_size=None, extra_args=[]):
super().__init__(test=test, batch_size=batch_size, extra_args=extra_args)
self.device = "cuda"
self.device_num = 1
# Parse the extra arguments
self.tb_args = parse_torchbench_args(self.extra_args)
torch.manual_seed(1337)
torch.backends.cudnn.deterministic = False
torch.backends.cudnn.benchmark = True
# Parameters
model_name = "t5-base"
max_source_length = "1024"
max_target_length = "128"
learning_rate = "2e-5"
num_train_epochs = "3" # this takes a rather long time for wmt-en-ro
max_train_steps = "100" # overrides num_train_epochs to run faster
checkpointing_steps = None # set to a string value, like "1000"
task_name = self.tb_args.task_name
task_args = task_to_keys[task_name] # dataset specific hf_args
# T5 requires source prefix to know what to translate
if task_name == "wmt-en-ro":
source_prefix = "translate English to Romanian: "
elif task_name == "wmt-en-de":
source_prefix = "translate English to German: "
else:
raise RuntimeError(f"Unsupported translation task {task_name} for model hf_t5")
task_args.extend(["--source_prefix", source_prefix])
# this benchmark runs on a single GPU
cuda_visible_devices = "0"
output_dir = os.path.join(CURRENT_DIR, ".output")
os.environ["CUDA_VISIBLE_DEVICES"] = cuda_visible_devices
in_arg = ["--model_name_or_path", model_name,
"--max_source_length", max_source_length,
"--max_target_length", max_target_length,
"--per_device_train_batch_size", str(self.batch_size),
"--per_device_eval_batch_size", str(self.batch_size),
"--learning_rate", learning_rate,
"--num_train_epochs", num_train_epochs,
"--max_train_steps", max_train_steps,
"--checkpointing_steps", checkpointing_steps,
"--output_dir", output_dir]
in_arg.extend(task_args)
hf_args = parse_args(in_arg)
self.num_epochs = hf_args.num_train_epochs
# ideally we don't modify the model code directly, but attaching deepspeed
# must be done before self.prep initialiazes accelerator.
hf_args.distributed = self.tb_args.distributed
# supported distributed backends
if hf_args.distributed not in ["deepspeed", "ddp", "fsdp", "none"]:
raise RuntimeError(f"Unsupported distributed scheme {self.tb_args.distributed} for model hf_t5")
# prep args for any distributed backend that needs it
if self.tb_args.distributed == "deepspeed":
zero_opt_cfg = {
"zero_optimization": {
"stage": 1,
"reduce_bucket_size": 2e8,
"overlap_comm": True,
"contiguous_gradients": False
}
}
hf_args.deepspeed_plugin = DeepSpeedPlugin()
hf_args.deepspeed_plugin.deepspeed_config.update(zero_opt_cfg)
# setup other members
self.prep(hf_args)
if test == "train":
self.num_examples = len(self.train_dataloader) * self.batch_size
elif test == "eval":
self.num_examples = len(self.eval_dataloader) * self.batch_size
def prep(self, hf_args):
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
if hf_args.distributed == "deepspeed":
# Note: self.tb_args.fp16 could be renamed to better clarify its meaning
assert self.tb_args.fp16=="amp", "deepspeed is only supported with bf16/amp enabled"
accelerator = Accelerator(deepspeed_plugin=hf_args.deepspeed_plugin, mixed_precision='bf16')
else:
accelerator = Accelerator(mixed_precision='fp16' if self.tb_args.fp16=='amp' else 'no')
# Handle the repository creation
if accelerator.is_main_process:
if hf_args.output_dir is not None:
os.makedirs(hf_args.output_dir, exist_ok=True)
accelerator.wait_for_everyone()
raw_datasets = prep_dataset(hf_args)
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
if hf_args.config_name:
config = AutoConfig.from_pretrained(hf_args.config_name)
elif hf_args.model_name_or_path:
config = AutoConfig.from_pretrained(hf_args.model_name_or_path)
else:
config = CONFIG_MAPPING[hf_args.model_type]()
# logger.warning("You are instantiating a new config instance from scratch.")
if hf_args.tokenizer_name:
tokenizer = AutoTokenizer.from_pretrained(hf_args.tokenizer_name, use_fast=not hf_args.use_slow_tokenizer)
elif hf_args.model_name_or_path:
tokenizer = AutoTokenizer.from_pretrained(hf_args.model_name_or_path, use_fast=not hf_args.use_slow_tokenizer)
else:
raise ValueError(
"You are instantiating a new tokenizer from scratch. This is not supported by this script."
"You can do it from another script, save it, and load it from here, using --tokenizer_name."
)
if hf_args.model_name_or_path:
model = AutoModelForSeq2SeqLM.from_pretrained(
hf_args.model_name_or_path,
from_tf=bool(".ckpt" in hf_args.model_name_or_path),
config=config,
)
else:
# logger.info("Training new model from scratch")
model = AutoModelForSeq2SeqLM.from_config(config)
model.resize_token_embeddings(len(tokenizer))
# Set decoder_start_token_id
if model.config.decoder_start_token_id is None and isinstance(tokenizer, (MBartTokenizer, MBartTokenizerFast)):
assert (
hf_args.target_lang is not None and hf_args.source_lang is not None
), "mBart requires --target_lang and --source_lang"
if isinstance(tokenizer, MBartTokenizer):
model.config.decoder_start_token_id = tokenizer.lang_code_to_id[hf_args.target_lang]
else:
model.config.decoder_start_token_id = tokenizer.convert_tokens_to_ids(hf_args.target_lang)
if model.config.decoder_start_token_id is None:
raise ValueError("Make sure that `config.decoder_start_token_id` is correctly defined")
# For translation we set the codes of our source and target languages (only useful for mBART, the others will
# ignore those attributes).
if isinstance(tokenizer, (MBartTokenizer, MBartTokenizerFast)):
if hf_args.source_lang is not None:
tokenizer.src_lang = hf_args.source_lang
if hf_args.target_lang is not None:
tokenizer.tgt_lang = hf_args.target_lang
prefix = hf_args.source_prefix if hf_args.source_prefix is not None else ""
train_dataset, eval_dataset = preprocess_dataset(hf_args, raw_datasets, tokenizer, prefix, accelerator)
# # Log a few random samples from the training set:
# for index in random.sample(range(len(train_dataset)), 3):
# logger.info(f"Sample {index} of the training set: {train_dataset[index]}.")
# DataLoaders creation:
label_pad_token_id = -100 if hf_args.ignore_pad_token_for_loss else tokenizer.pad_token_id
if hf_args.pad_to_max_length:
# If padding was already done ot max length, we use the default data collator that will just convert everything
# to tensors.
self.data_collator = default_data_collator
else:
# Otherwise, `DataCollatorWithPadding` will apply dynamic padding for us (by padding to the maximum length of
# the samples passed). When using mixed precision, we add `pad_to_multiple_of=8` to pad all tensors to multiple
# of 8s, which will enable the use of Tensor Cores on NVIDIA hardware with compute capability >= 7.5 (Volta).
self.data_collator = DataCollatorForSeq2Seq(
tokenizer,
model=model,
label_pad_token_id=label_pad_token_id,
pad_to_multiple_of=8 if accelerator.use_fp16 else None,
)
train_dataloader = DataLoader(
train_dataset, shuffle=True, collate_fn=self.data_collator, batch_size=hf_args.per_device_train_batch_size)
eval_dataloader = DataLoader(eval_dataset, collate_fn=self.data_collator, batch_size=hf_args.per_device_eval_batch_size)
# set distributed strategy before creating optimizer
if hf_args.distributed == "ddp":
model = accelerator.prepare(model)
local_rank = int(os.getenv("LOCAL_RANK", -1))
model = DDP(
model,
device_ids=[local_rank],
# If buffer broadcast is necessary, specific optimizations might be
# necessary to optimize performance. Disable it by default.
broadcast_buffers=False,
# Set gradient as bucket view to avoid unnecessary copies
gradient_as_bucket_view=True,
# TODO: tune bucket_cap_mb
static_graph=True,
)
elif hf_args.distributed == "fsdp":
model = accelerator.prepare(model)
transformer_auto_wrapper_policy = functools.partial(
transformer_auto_wrap_policy,
transformer_layer_cls={
T5Block,
},
)
local_rank = int(os.getenv("LOCAL_RANK", -1))
torch.cuda.set_device(local_rank)
model = FSDP(
model,
# TODO: seems to make benchmark slower? and profile doesn't work? investigate
# auto_wrap_policy=transformer_auto_wrapper_policy,
device_id = torch.cuda.current_device()
)
elif hf_args.distributed == "none":
model = accelerator.prepare(model)
# Figure out how many steps we should save the Accelerator states
if hasattr(hf_args.checkpointing_steps, "isdigit"):
hf_args.checkpointing_steps = hf_args.checkpointing_steps
if hf_args.checkpointing_steps.isdigit():
hf_args.checkpointing_steps = int(hf_args.checkpointing_steps)
else:
hf_args.checkpointing_steps = None
def postprocess_text(preds, labels):
preds = [pred.strip() for pred in preds]
labels = [[label.strip()] for label in labels]
return preds, labels
metric = evaluate.load("sacrebleu")
# Setup class members
self.hf_args = hf_args
self.model = model
self.train_dataloader = train_dataloader
self.eval_dataloader = eval_dataloader
self.accelerator = accelerator
self.tokenizer = tokenizer
self.metric = metric
self.config = config
self.postprocess_text = postprocess_text
# Optimizer
# Split weights in two groups, one with weight decay and the other not.
no_decay = ["bias", "LayerNorm.weight", "layer_norm.weight"]
optimizer_grouped_parameters = [
{
"params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
"weight_decay": hf_args.weight_decay,
},
{
"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)],
"weight_decay": 0.0,
},
]
self.optimizer = torch.optim.AdamW(optimizer_grouped_parameters, lr=hf_args.learning_rate)
self._update_everything_with_optimizer()
def _update_everything_with_optimizer(self):
# Scheduler and math around the number of training steps.
overrode_max_train_steps = False
num_update_steps_per_epoch = math.ceil(len(self.train_dataloader) / self.hf_args.gradient_accumulation_steps)
if self.hf_args.max_train_steps is None:
self.hf_args.max_train_steps = self.hf_args.num_train_epochs * num_update_steps_per_epoch
overrode_max_train_steps = True
self.lr_scheduler = get_scheduler(
name=self.hf_args.lr_scheduler_type,
optimizer=self.optimizer,
num_warmup_steps=self.hf_args.num_warmup_steps,
num_training_steps=self.hf_args.max_train_steps,
)
# Prepare everything with our `accelerator`.
if self.hf_args.distributed == "deepspeed":
# deepspeed will error unless all components prepared at the same time
self.model, self.train_dataloader, self.eval_dataloader, self.optimizer, self.lr_scheduler = self.accelerator.prepare(
self.model, self.train_dataloader, self.eval_dataloader, self.optimizer, self.lr_scheduler)
else:
# ddp and fsdp need model prepared before wrapping.
self.train_dataloader, self.eval_dataloader, self.optimizer, self.lr_scheduler = self.accelerator.prepare(
self.train_dataloader, self.eval_dataloader, self.optimizer, self.lr_scheduler)
# We need to recalculate our total training steps as the size of the training dataloader may have changed.
num_update_steps_per_epoch = math.ceil(len(self.train_dataloader) / self.hf_args.gradient_accumulation_steps)
if overrode_max_train_steps:
self.hf_args.max_train_steps = self.hf_args.num_train_epochs * num_update_steps_per_epoch
# Afterwards we recalculate our number of training epochs
self.hf_args.num_train_epochs = math.ceil(self.hf_args.max_train_steps / num_update_steps_per_epoch)
def train(self):
completed_steps = 0
eval_metric = None
for epoch in range(self.hf_args.num_train_epochs):
self.model.train()
for step, batch in enumerate(self.train_dataloader):
loss = self.run_forward(batch)
loss = loss / self.hf_args.gradient_accumulation_steps
self.run_backward(loss)
if step % self.hf_args.gradient_accumulation_steps == 0 or step == len(self.train_dataloader) - 1:
self.run_optimizer_step()
completed_steps += 1
if isinstance(self.hf_args.checkpointing_steps, int):
if completed_steps % self.hf_args.checkpointing_steps == 0:
output_dir = f"step_{completed_steps }"
if self.hf_args.output_dir is not None:
output_dir = os.path.join(self.hf_args.output_dir, output_dir)
self.accelerator.save_state(output_dir)
if completed_steps >= self.hf_args.max_train_steps:
break
if self.tb_args.validate_in_train:
eval_metric = self.eval() # run evaluation
# store accuracy results
if self.tb_args.validate_in_train:
self.accuracy = eval_metric["score"]
return eval_metric
def eval(self):
self.model.eval()
if self.hf_args.val_max_target_length is None:
self.hf_args.val_max_target_length = self.hf_args.max_target_length
gen_kwargs = {
"max_length": self.hf_args.val_max_target_length if self.hf_args is not None else self.config.max_length,
"num_beams": self.hf_args.num_beams,
}
samples_seen = 0
for step, batch in enumerate(self.eval_dataloader):
with torch.no_grad():
generated_tokens = self.accelerator.unwrap_model(self.model).generate(
batch["input_ids"],
attention_mask=batch["attention_mask"],
**gen_kwargs,
)
generated_tokens = self.accelerator.pad_across_processes(
generated_tokens, dim=1, pad_index=self.tokenizer.pad_token_id
)
labels = batch["labels"]
if not self.hf_args.pad_to_max_length:
# If we did not pad to max length, we need to pad the labels too
labels = self.accelerator.pad_across_processes(batch["labels"], dim=1, pad_index=self.tokenizer.pad_token_id)
generated_tokens = self.accelerator.gather(generated_tokens).cpu().numpy()
labels = self.accelerator.gather(labels).cpu().numpy()
if self.hf_args.ignore_pad_token_for_loss:
# Replace -100 in the labels as we can't decode them.
labels = np.where(labels != -100, labels, self.tokenizer.pad_token_id)
decoded_preds = self.tokenizer.batch_decode(generated_tokens, skip_special_tokens=True)
decoded_labels = self.tokenizer.batch_decode(labels, skip_special_tokens=True)
decoded_preds, decoded_labels = self.postprocess_text(decoded_preds, decoded_labels)
# If we are in a multiprocess environment, the last batch has duplicates
if self.accelerator.num_processes > 1:
if step == len(self.eval_dataloader) - 1:
decoded_preds = decoded_preds[: len(self.eval_dataloader.dataset) - samples_seen]
decoded_labels = decoded_labels[: len(self.eval_dataloader.dataset) - samples_seen]
else:
samples_seen += len(decoded_labels)
self.metric.add_batch(predictions=decoded_preds, references=decoded_labels)
eval_metric = self.metric.compute()
# logger.info({"bleu": eval_metric["score"]})
return eval_metric
def get_optimizer(self):
return self.optimizer
def set_optimizer(self, optimizer) -> None:
self.optimizer = optimizer
self._update_everything_with_optimizer()
def next_batch(self):
return next(iter(self.train_dataloader))
def run_forward(self, input):
"""
compute model forward and return loss
"""
if self.dynamo:
backend = self.opt_args.torchdynamo
return torch._dynamo.optimize(backend)(self._run_forward)(input)
else:
return self._run_forward(input)
def _run_forward(self, input):
return self.model(**input).loss
def run_backward(self, loss):
if self.dynamo:
backend = self.opt_args.torchdynamo
return torch._dynamo.optimize(backend)(self._run_backward)(loss)
else:
return self._run_backward(loss)
def _run_backward(self, loss):
self.accelerator.backward(loss)
def get_optimizer(self):
return self.optimizer
def set_optimizer(self, optimizer) -> None:
self.optimizer = optimizer
def run_optimizer_step(self):
if self.dynamo and not self.opt_args.dynamo_disable_optimizer_step:
backend = self.opt_args.torchdynamo
return torch._dynamo.optimize(backend)(self._run_optimizer_step)()
else:
return self._run_optimizer_step()
def _run_optimizer_step(self):
self.optimizer.step()
self.lr_scheduler.step()
self.optimizer.zero_grad()
|
import subprocess
import sys
def pip_install_requirements():
subprocess.check_call([sys.executable, '-m', 'pip', 'install', '-q', '-r', 'requirements.txt'])
if __name__ == '__main__':
pip_install_requirements()
|
# upstream repo: https://github.com/kuangliu/pytorch-cifar
import torch
import torchvision
import torchvision.transforms as transforms
from torchbenchmark.util.e2emodel import E2EBenchmarkModel
from torchbenchmark.tasks import COMPUTER_VISION
import os
from tqdm import tqdm
from pathlib import Path
# setup environment variable
CURRENT_DIR = Path(os.path.dirname(os.path.realpath(__file__)))
class Model(E2EBenchmarkModel):
task = COMPUTER_VISION.CLASSIFICATION
DEFAULT_TRAIN_BSIZE: int = 128
DEFAULT_EVAL_BSIZE: int = 1
def __init__(self, test, batch_size=None, extra_args=[]):
super().__init__(test=test, batch_size=batch_size, extra_args=extra_args)
self.device = "cuda"
self.device_num = 1
data_root = CURRENT_DIR.joinpath(".data")
assert torch.cuda.is_available(), f"This model requires CUDA device available."
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
trainset = torchvision.datasets.CIFAR10(
root=str(data_root), train=True, download=True, transform=transform_train)
self.trainloader = torch.utils.data.DataLoader(
trainset, batch_size=self.batch_size, shuffle=True, num_workers=2)
self.num_examples = len(trainset)
testset = torchvision.datasets.CIFAR10(
root=str(data_root), train=False, download=True, transform=transform_test)
self.testloader = torch.utils.data.DataLoader(
testset, batch_size=self.batch_size, shuffle=False, num_workers=2)
self.classes = ('plane', 'car', 'bird', 'cat', 'deer',
'dog', 'frog', 'horse', 'ship', 'truck')
self.lr = 0.1
self.T_max = 200
# initialize accuracy
self.accuracy = 0.0
if self.test == "train":
# by default, run 200 epochs
self.num_epochs = 200
# use random init model for train
self.model = torchvision.models.resnet50().to(self.device)
from .resnet import ResNet50
self.model = ResNet50().to(self.device)
self.model.train()
self.criterion = torch.nn.CrossEntropyLoss()
self.optimizer = torch.optim.SGD(self.model.parameters(), lr=self.lr,
momentum=0.9, weight_decay=5e-4)
self.scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(self.optimizer, T_max=self.T_max)
else:
# use pretrained model for eval
self.model = torchvision.models.resnet50(pretrained=True).to(self.device)
self.model.eval()
def get_optimizer(self):
return self.optimizer
def set_optimizer(self, optimizer) -> None:
self.optimizer = optimizer
self.scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(self.optimizer, T_max=self.T_max)
def _test_loop(self):
self.model.eval()
test_loss = 0
correct = 0
total = 0
with torch.no_grad():
for _batch_idx, (inputs, targets) in enumerate(self.testloader):
inputs, targets = inputs.to(self.device), targets.to(self.device)
outputs = self.model(inputs)
loss = self.criterion(outputs, targets)
test_loss += loss.item()
_, predicted = outputs.max(1)
total += targets.size(0)
correct += predicted.eq(targets).sum().item()
self.accuracy = 100. * correct / total
def _train_loop(self):
for _batch_idx, (inputs, targets) in enumerate(self.trainloader):
inputs, targets = inputs.to(self.device), targets.to(self.device)
self.optimizer.zero_grad()
outputs = self.model(inputs)
loss = self.criterion(outputs, targets)
loss.backward()
self.optimizer.step()
def train(self):
self.model.train()
# Train num_epochs
for _epoch in tqdm(range(self.num_epochs), desc = "Training epoch"):
self._train_loop()
# calculate total accuracy
self._test_loop()
def eval(self):
raise NotImplementedError("Eval is not yet implemented for this model.")
def get_optimizer(self):
return self.optimizer
def set_optimizer(self, optimizer) -> None:
self.optimizer = optimizer
|
import torch.nn as nn
import torch.nn.functional as F
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1):
super(BasicBlock, self).__init__()
self.conv1 = nn.Conv2d(
in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3,
stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion*planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion*planes,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion*planes)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, in_planes, planes, stride=1):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3,
stride=stride, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, self.expansion *
planes, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(self.expansion*planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion*planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion*planes,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion*planes)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = F.relu(self.bn2(self.conv2(out)))
out = self.bn3(self.conv3(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, num_blocks, num_classes=10):
super(ResNet, self).__init__()
self.in_planes = 64
self.conv1 = nn.Conv2d(3, 64, kernel_size=3,
stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)
self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)
self.linear = nn.Linear(512*block.expansion, num_classes)
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1]*(num_blocks-1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = F.avg_pool2d(out, 4)
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
def ResNet18():
return ResNet(BasicBlock, [2, 2, 2, 2])
def ResNet34():
return ResNet(BasicBlock, [3, 4, 6, 3])
def ResNet50():
return ResNet(Bottleneck, [3, 4, 6, 3])
def ResNet101():
return ResNet(Bottleneck, [3, 4, 23, 3])
def ResNet152():
return ResNet(Bottleneck, [3, 8, 36, 3])
|
import os
import sys
import torch
import subprocess
from pathlib import Path
from dataclasses import dataclass
from torchbenchmark.util.e2emodel import E2EBenchmarkModel
from typing import Optional, List
CURRENT_DIR = Path(os.path.dirname(os.path.realpath(__file__)))
FAMBENCH_ROOT = CURRENT_DIR.parent.parent.parent.joinpath("submodules", "FAMBench")
def _create_data_dir(data_dir: str):
data_dir = Path(data_dir)
data_dir.mkdir(parents=True, exist_ok=True)
return data_dir
def _get_fambench_test_root(name: str):
xlmr_ootb_root = FAMBENCH_ROOT.joinpath("benchmarks")
assert xlmr_ootb_root.exists(), f"Can't find FAMBench source at {xlmr_ootb_root.absolute()}," \
"please check out the submodules."
return xlmr_ootb_root
@dataclass
class FAMBenchXLMREvalConfig:
"""
Original config reference:
https://github.com/facebookresearch/FAMBench/blob/main/benchmarks/run_xlmr_ootb.sh
"""
config_name = "default-config"
nbatches = 10
batchsize = 16
seqlength = 16
vocabsize = 250000
warmupbatches = 1
log_dir = os.path.join(CURRENT_DIR, ".data", "logs")
config_flags=["--inference-only", f"--num-batches={nbatches}", f"--batch-size={batchsize}", \
f"--sequence-length={seqlength}", f"--vocab-size={vocabsize}", \
f"--famconfig={config_name}", "--half-model", f"--warmup-batches={warmupbatches}", \
f"--logdir={log_dir}"]
class Model(E2EBenchmarkModel):
DEFAULT_EVAL_BSIZE = FAMBenchXLMREvalConfig.batchsize
CANNOT_SET_CUSTOM_OPTIMIZER = True
def __init__(self, test: str, batch_size: Optional[int]=None, extra_args: List[str]=[]):
super().__init__(test=test, batch_size=batch_size, extra_args=extra_args)
if not torch.cuda.is_available():
raise NotImplementedError("FAMBench only support running on Nvidia GPU.")
self.device = "cuda"
self.device_num = torch.cuda.device_count()
self.name = "xlmr"
self.implementation = "ootb"
self.code_root = _get_fambench_test_root(self.name)
if test == "eval":
self.config = FAMBenchXLMREvalConfig()
self.config.batchsize = self.batch_size
self.num_examples = self.config.nbatches * self.batch_size
_create_data_dir(self.config.log_dir)
def train(self):
raise NotImplementedError("FAMBench XLMR train is not implemented yet.")
def eval(self):
prog_args = [sys.executable, f"{self.name}/{self.implementation}/{self.name}.py"]
prog_args.extend(self.config.config_flags)
subprocess.check_call(prog_args, cwd=self.code_root)
|
import sys
import subprocess
def pip_install_requirements():
subprocess.check_call([sys.executable, '-m', 'pip', 'install', '-q', '-r', 'requirements.txt'])
if __name__ == '__main__':
pip_install_requirements()
|
import importlib
import sys
from urllib import request
from typing import List, Dict
TORCH_DEPS = ['torch', 'torchvision', 'torchtext', 'torchaudio']
proxy_suggestion = "Unable to verify https connectivity, " \
"required for setup.\n" \
"Do you need to use a proxy?"
class add_path():
def __init__(self, path):
self.path = path
def __enter__(self):
sys.path.insert(0, self.path)
def __exit__(self, exc_type, exc_value, traceback):
try:
sys.path.remove(self.path)
except ValueError:
pass
def get_pkg_versions(packages: List[str], reload: bool=False) -> Dict[str, str]:
versions = {}
for module in packages:
module = importlib.import_module(module)
if reload:
module = importlib.reload(module)
versions[module.__name__] = module.__version__
return versions
def _test_https(test_url: str = 'https://github.com', timeout: float = 0.5) -> bool:
try:
request.urlopen(test_url, timeout=timeout)
except OSError:
return False
return True
|
from typing import Any, List, Optional
import boto3
import os
import json
from pathlib import Path
USERBENCHMARK_S3_BUCKET = "ossci-metrics"
USERBENCHMARK_S3_OBJECT = "torchbench-userbenchmark"
class S3Client:
def __init__(self, bucket, object):
self.s3 = boto3.client('s3')
self.bucket = bucket
self.object = object
def download_file(self, key: str, dest_dir: str) -> None:
filename = S3Client.get_filename_from_key(key)
assert filename, f"Expected non-empty filename from key {key}."
with open(os.path.join(dest_dir, filename), 'wb') as f:
self.s3.download_fileobj(self.bucket, key, f)
def upload_file(self, prefix: str, file_path: Path) -> None:
file_name = file_path.name
s3_key = f"{self.object}/{prefix}/{file_name}" if prefix else f"{self.object}/{file_name}"
response = self.s3.upload_file(str(file_path), self.bucket, s3_key)
print(f"S3 client response: {response}")
def get_file_as_json(self, key: str) -> Any:
obj = self.s3.get_object(Bucket=self.bucket, Key=key)
return json.loads(obj['Body'].read().decode('utf-8'))
def exists(self, prefix: str, file_name: str) -> Optional[str]:
"""Test if the key object/prefix/file_name exists in the S3 bucket.
If True, return the S3 object key. Return None otherwise. """
s3_key = f"{self.object}/{prefix}/{file_name}" if prefix else f"{self.object}/{file_name}"
result = self.s3.list_objects_v2(Bucket=self.bucket, Prefix=s3_key)
if 'Contents' in result:
return s3_key
return None
def list_directory(self, directory=None) -> List[str]:
"""List the directory files in the S3 bucket path.
If the directory doesn't exist, report an error. """
prefix = f"{self.object}/{directory}/" if directory else f"{self.object}/"
pages = self.s3.get_paginator("list_objects").paginate(Bucket=self.bucket, Prefix=prefix)
keys = filter(lambda x: not x == prefix, [e['Key'] for p in pages for e in p['Contents']])
return list(keys)
def get_filename_from_key(object_key: str) -> str:
filename = object_key.split('/')[-1]
return filename
|
import argparse
import subprocess
DEFAULT_PYTHON_VERSION = "3.10"
PYTHON_VERSION_MAP = {
"3.8": {
"pytorch_url": "cp38",
},
"3.10": {
"pytorch_url": "cp310",
},
}
def create_conda_env(pyver: str, name: str):
command = [ "conda", "create", "-n", name, "-y", f"python={pyver}" ]
subprocess.check_call(command)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--pyver", type=str, default=DEFAULT_PYTHON_VERSION, help="Specify the Python version.")
parser.add_argument("--create-conda-env", type=str, default=None, help="Create conda environment of the default Python version.")
args = parser.parse_args()
if args.create_conda_env:
create_conda_env(args.pyver, args.create_conda_env)
|
import os
import re
import importlib
import argparse
import subprocess
from pathlib import Path
from typing import Optional
# defines the default CUDA version to compile against
DEFAULT_CUDA_VERSION = "11.7"
CUDA_VERSION_MAP = {
"11.6": {
"pytorch_url": "cu116",
"magma_version": "magma-cuda116",
},
"11.7": {
"pytorch_url": "cu117",
"magma_version": "magma-cuda117",
},
"11.8": {
"pytorch_url": "cu118",
"magma_version": "magma-cuda118",
}
}
PIN_CMAKE_VERSION = "3.22.*"
TORCHBENCH_TORCH_NIGHTLY_PACKAGES = ["torch", "torchtext", "torchvision", "torchaudio"]
def _nvcc_output_match(nvcc_output, target_cuda_version):
regex = 'release (.*),'
version = re.search(regex, nvcc_output).groups()[0]
return version == target_cuda_version
def prepare_cuda_env(cuda_version: str, dryrun=False):
assert cuda_version in CUDA_VERSION_MAP, f"Required CUDA version {cuda_version} doesn't exist in {CUDA_VERSION_MAP.keys()}."
env = os.environ.copy()
# step 1: setup CUDA path and environment variables
cuda_path = Path("/").joinpath("usr", "local", f"cuda-{cuda_version}")
assert cuda_path.exists() and cuda_path.is_dir(), f"Expected CUDA Library path {cuda_path} doesn't exist."
cuda_path_str = str(cuda_path.resolve())
env["CUDA_ROOT"] = cuda_path_str
env["CUDA_HOME"] = cuda_path_str
env["PATH"] = f"{cuda_path_str}/bin:{env['PATH']}"
env["CMAKE_CUDA_COMPILER"] = str(cuda_path.joinpath('bin', 'nvcc').resolve())
env["LD_LIBRARY_PATH"] = f"{cuda_path_str}/lib64:{cuda_path_str}/extras/CUPTI/lib64:{env['LD_LIBRARY_PATH']}"
if dryrun:
print(f"CUDA_HOME is set to {env['CUDA_HOME']}")
# step 2: test call to nvcc to confirm the version is correct
test_nvcc = ["nvcc", "--version"]
if dryrun:
print(f"Checking nvcc version, command {test_nvcc}")
else:
output = subprocess.check_output(test_nvcc, stderr=subprocess.STDOUT, env=env).decode()
print(f"NVCC version output: {output}")
assert _nvcc_output_match(output, cuda_version), f"Expected CUDA version {cuda_version}, getting nvcc test result {output}"
# step 3: install the correct magma version
install_magma_cmd = ["conda", "install", "-c", "pytorch", CUDA_VERSION_MAP[cuda_version]['magma_version']]
if dryrun:
print(f"Installing CUDA magma: {install_magma_cmd}")
subprocess.check_call(install_magma_cmd, env=env)
return env
def setup_cuda_softlink(cuda_version: str):
assert cuda_version in CUDA_VERSION_MAP, f"Required CUDA version {cuda_version} doesn't exist in {CUDA_VERSION_MAP.keys()}."
cuda_path = Path("/").joinpath("usr", "local", f"cuda-{cuda_version}")
assert cuda_path.exists() and cuda_path.is_dir(), f"Expected CUDA Library path {cuda_path} doesn't exist."
current_cuda_path = Path("/").joinpath("usr", "local", "cuda")
if current_cuda_path.exists():
assert current_cuda_path.is_symlink(), f"Expected /usr/local/cuda to be a symlink."
current_cuda_path.unlink()
os.symlink(str(cuda_path.resolve()), str(current_cuda_path.resolve()))
def install_pytorch_nightly(cuda_version: str, env, dryrun=False):
uninstall_torch_cmd = ["pip", "uninstall", "-y"]
uninstall_torch_cmd.extend(TORCHBENCH_TORCH_NIGHTLY_PACKAGES)
if dryrun:
print(f"Uninstall pytorch: {uninstall_torch_cmd}")
else:
# uninstall multiple times to make sure the env is clean
for _loop in range(3):
subprocess.check_call(uninstall_torch_cmd)
pytorch_nightly_url = f"https://download.pytorch.org/whl/nightly/{CUDA_VERSION_MAP[cuda_version]['pytorch_url']}"
install_torch_cmd = ["pip", "install", "--pre"]
install_torch_cmd.extend(TORCHBENCH_TORCH_NIGHTLY_PACKAGES)
install_torch_cmd.extend(["-i", pytorch_nightly_url])
if dryrun:
print(f"Install pytorch nightly: {install_torch_cmd}")
else:
subprocess.check_call(install_torch_cmd, env=env)
def install_torch_deps(cuda_version: str):
# install magma
magma_pkg = CUDA_VERSION_MAP[cuda_version]["magma_version"]
cmd = ["conda", "install", "-y", magma_pkg, "-c", "pytorch"]
subprocess.check_call(cmd)
# install other dependencies
torch_deps = ["numpy", "requests", "ninja", "pyyaml", "setuptools", "gitpython", "beautifulsoup4", "regex"]
cmd = ["conda", "install", "-y"] + torch_deps
subprocess.check_call(cmd)
# install unittest-xml-reporting
cmd = ["pip", "install", "unittest-xml-reporting"]
subprocess.check_call(cmd)
def install_torch_build_deps(cuda_version: str):
install_torch_deps(cuda_version=cuda_version)
# Pin cmake version to stable
# See: https://github.com/pytorch/builder/pull/1269
torch_build_deps = ["cffi", "sympy", "typing_extensions", "future", "six", "dataclasses", "tabulate", "tqdm", "mkl", "mkl-include", \
f"cmake={PIN_CMAKE_VERSION}"]
cmd = ["conda", "install", "-y"] + torch_build_deps
subprocess.check_call(cmd)
def get_torch_nightly_version(pkg_name: str):
pkg = importlib.import_module(pkg_name)
version = pkg.__version__
regex = ".*dev([0-9]+).*"
date_str = re.match(regex, version).groups()[0]
pkg_ver = {"version": version, "date": date_str}
return (pkg_name, pkg_ver)
def check_torch_nightly_version(force_date: Optional[str] = None):
pkg_versions = dict(map(get_torch_nightly_version, TORCHBENCH_TORCH_NIGHTLY_PACKAGES))
pkg_dates = list(map(lambda x: x[1]["date"], pkg_versions.items()))
if not len(set(pkg_dates)) == 1:
raise RuntimeError(f"Found more than 1 dates in the torch nightly packages: {pkg_versions}.")
if force_date and not pkg_dates[0] == force_date:
raise RuntimeError(f"Force date value {force_date}, but found torch packages {pkg_versions}.")
force_date_str = f"User force date {force_date}" if force_date else ""
print(f"Installed consistent torch nightly packages: {pkg_versions}. {force_date_str}")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--cudaver", default=DEFAULT_CUDA_VERSION, help="Specify the default CUDA version")
parser.add_argument("--setup-cuda-softlink", action="store_true", help="Setup the softlink to /usr/local/cuda")
parser.add_argument("--install-torch-deps", action="store_true", help="Install pytorch runtime requirements")
parser.add_argument("--install-torch-build-deps", action="store_true", help="Install pytorch build requirements")
parser.add_argument("--install-torch-nightly", action="store_true", help="Install pytorch nightlies")
parser.add_argument("--check-torch-nightly-version", action="store_true", help="Validate pytorch nightly package consistency")
parser.add_argument("--force-date", type=str, default=None, help="Force Pytorch nightly release date version. Date string format: YYmmdd")
args = parser.parse_args()
if args.setup_cuda_softlink:
setup_cuda_softlink(cuda_version=args.cudaver)
if args.install_torch_deps:
install_torch_deps(cuda_version=args.cudaver)
if args.install_torch_build_deps:
install_torch_build_deps(cuda_version=args.cudaver)
if args.install_torch_nightly:
install_pytorch_nightly(cuda_version=args.cudaver, env=os.environ)
if args.check_torch_nightly_version:
assert not args.install_torch_nightly, "Error: Can't run install torch nightly and check version in the same command."
check_torch_nightly_version(args.force_date)
|
"""Add Task abstraction to reduce the friction of controlling a remote worker."""
import abc
import ast
import functools
import inspect
import marshal
import textwrap
import typing
from components._impl.workers import base
class TaskBase(abc.ABC):
"""Convenience layer to allow methods to be called in a worker.
Because workers are stateful, this implicitly assumes that a Task wraps
a single worker. However `run_in_worker` is largely agnostic; it simply
calls `self.worker` and dispatches work to whatever Worker is returned.
"""
@abc.abstractproperty
def worker(self) -> base.WorkerBase:
...
def parse_f(f: typing.Callable) -> typing.Tuple[inspect.Signature, str]:
"""Extract the source code from a callable."""
if not inspect.isfunction(f):
raise TypeError(f"Expected function, got {type(f)}. ({f})")
signature = inspect.signature(f)
# It isn't strictly necessary for `f` to be completely type annotated,
# however one of the key advantages of `run_in_worker` over manually
# passing strings is that it gives the type checker an opportunity to catch
# errors. (Which may become more difficult once the code has been shipped)
# to the worker. And because this is a developer rather than user facing
# API, it isn't a problem to enforce this strict criteria. This also
# provides some weak protection against decorators. (See below.)
for arg, arg_parameter in signature.parameters.items():
if arg_parameter.kind == inspect.Parameter.VAR_POSITIONAL:
raise TypeError(
f"Variadic positional argument `*{arg}` not permitted "
"for `run_in_worker` function.")
if arg_parameter.kind == inspect.Parameter.VAR_KEYWORD:
raise TypeError(
f"Variadic keywork argument `**{arg}` not permitted "
"for `run_in_worker` function.")
if arg_parameter.annotation == inspect.Parameter.empty:
raise TypeError(f"Missing type annotation for parameter `{arg}`")
if signature.return_annotation == inspect.Parameter.empty:
raise TypeError("Missing return annotation.")
# We serialize the function by grabbing source from the body, so
# decorators pose a correctness problem. The structure of a decorator is:
# ```
# def my_decorator(wrapped_f: typing.Callable) -> typing.Callable:
#
# @functools.wraps(wrapped_f) # <- Optional
# def inner(*args, **kwargs): # Most decorators don't know the signature of `wrapped_f`
# # Generally a call to `wrapped_f(*args, **kwargs)` appears
# # somewhere, though not required.
# ...
#
# return g
# ```
# The inclusion or omission of `functools.wraps` is rather important.
# If included, it will provide the breadcrumbs to map `inner` back to
# `wrapped_f`, and the `inspect` library (namely `signature` and
# `getsource`) will parse the "True" function: `wrapped_f`. (This allows,
# among other things, type checkers to analyze decorated code.) Otherwise
# `inspect` will stop at `inner`.
#
# In the case that a function is decorated but does not use
# `functools.wraps`, it is HIGHLY likely that it uses variadic arguments
# so that it can forward them. (And thus will fail the signature checks)
# above. If we are passed a function which is decorated with a "proper"
# decorator, we catch it here by checking the `__wrapped__` property.
#
# The final case of a decorator with a concrete signature but no
# `functools.wraps` is not detectable (by design) and is thus caveat
# emptor.
if getattr(f, "__wrapped__", None):
raise TypeError(textwrap.dedent("""
`f` cannot be decorated below `@run_in_worker` (except for
@staticmethod) because the extraction logic would not carry through
said decorator(s).
Ok:
@my_decorator
@run_in_worker()
def foo() -> None:
...
Not ok:
@run_in_worker()
@my_decorator
def foo() -> None:
...
""").strip())
# Dedent, as `f` may have been defined in a scoped context.
f_src = textwrap.dedent(inspect.getsource(f))
# We don't want to be in the business of writing a Python parser.
# Fortunately our needs are relatively modest: we simply need to run a few
# sanity checks, and get the position where the body starts. This means
# that we can rely on the `ast` library to do the heavy lifting, and grab
# a few key values at the end.
f_ast = ast.parse(f_src)
assert len(f_ast.body) == 1
assert isinstance(f_ast.body[0], ast.FunctionDef)
assert f_ast.body[0].body
# For some reason ast one indexes lineno.
src_lines = f_src.splitlines(keepends=False)
node: ast.AST
for node in f_ast.body[0].body:
# In Python 3.7, there is a bug in `ast` that causes it to incorrectly
# report the start line of bare multi-line strings:
# https://bugs.python.org/issue16806
# Given that the only use for such strings is a docstring (or multi
# line comment), we simply elect to skip over them and index on the
# first node that will give valid indices.
if node.col_offset == -1:
assert isinstance(node.value, ast.Str), f"Expected `ast.Str`, got {type(node)}. ({node}) {node.lineno}"
continue
raw_body_lines = src_lines[node.lineno - 1:]
col_offset = node.col_offset
break
else:
raise TypeError("Could not find valid start of body.")
body_lines: typing.List[str] = []
for i, l in enumerate(raw_body_lines):
prefix, suffix = l[:col_offset], l[col_offset:]
# The first line of the body may overlap with the signature.
# e.g. `def f(): pass`
# For all other lines, the prefix must only be indentation.
assert not i or not l.strip() or not prefix.strip(), f"{l}, {col_offset}"
body_lines.append(suffix)
return signature, "\n".join(body_lines)
def run_in_worker(scoped: bool = True) -> typing.Callable[..., typing.Any]:
"""Decorator to run Task method in worker rather than the caller.
The Worker + Task model dictates that the caller generates a string of
Python source code. However this is not particularly ergonomic; there is
significant tooling (syntax highlighting, type checking, etc.) which is
lost if code must be provided as a string literal.
Moreover, moving values from the caller to the worker can be tedious.
Simply templating them into a string literal is brittle (because __repr__
may not produce valid source) and may subtly alter the value (e.g. the
string representation of a float will not produce the same number as the
original value). `WorkerBase.store` will safely move values, but does not
alleviate the ergonomic issues.
Consider the following, where we want the worker to open a file, read up to
`n` lines, and then return them to the caller. One implementation would be:
```
def head(self, fpath: str, n: int) -> List[str]:
self.worker.store("fpath", fpath)
self.worker.store("n", n)
self.worker.run(textwrap.dedent('''
lines = []
with open(fpath, "rt") as f:
for i, l in enumerate(f):
if i == n:
break
lines.append(l)
'''))
return self.worker.load("lines")
```
It works, but it's not very easy to read and leaks lots of variables
(fpath, n, lines, f, etc.) into the worker's global namespace. This
decorator allows the following code to be written instead:
```
@run_in_worker(scoped=True)
def head(fpath: str, n: int) -> List[str]:
lines = []
with open(fpath, "rt") as f:
for i, l in enumerate(f):
if i == n:
break
lines.append(l)
return lines
```
Code in the main thread can call `head` just like any normal function, but
it is executed in the worker. And unlike the first example, we will not
pollute the global namespace. (This is because `scoped=True`) There are
three aspects to `run_in_worker`:
1) Serialize arguments and revive them in the worker.
2) Extract the function body.
3) Retrieve the result from the worker.
All three are entirely mechanical; `run_in_worker` uses Python AST rather
than raw string parsing, so it is quite robust. Because ambiguity would be
very difficult to diagnose in this context, `run_in_worker` requires that
a complete type annotated signature be provided and that there are no
variadic arguments. (*args or **kwargs) Moreover, it has same restriction
for inputs and outputs as `store` and `load`: the values must be
serializable by the `marshal` library. (i.e. basic Python types)
"""
def outer(f: typing.Callable[..., typing.Any]) -> typing.Callable[..., typing.Any]:
# This will unwrap the `@staticmethod` descriptor and recover the true f
# https://stackoverflow.com/questions/53694087/unwraping-and-wrapping-again-staticmethod-in-meta-class
#
# Note: The `@staticmethod` decorator must appear BELOW the
# `@run_in_worker` decorator.
try:
f = f.__get__(object, None) # type: ignore[attr-defined]
except AttributeError:
pass
signature, f_body = parse_f(f)
has_return_value = (signature.return_annotation is not None)
if has_return_value and not scoped:
raise TypeError(
"Unscoped (globally executed) call can not have a return value.")
@functools.wraps(f)
def inner(
self: TaskBase,
*args: typing.Any,
**kwargs: typing.Any
) -> typing.Any:
bound_signature = signature.bind(*args, **kwargs)
bound_signature.apply_defaults()
body: typing.List[str] = ["# Deserialize args", "import marshal"]
for arg_name, arg_value in bound_signature.arguments.items():
try:
arg_bytes = marshal.dumps(arg_value)
except ValueError:
raise ValueError(f"unmarshallable arg {arg_name}: {arg_value}")
body.append(f"{arg_name} = marshal.loads(bytes.fromhex({repr(arg_bytes.hex())})) # {arg_value}")
body.extend(["", "# Wrapped source"] + f_body.splitlines(keepends=False))
src = "\n".join([
"def _run_in_worker_f():",
textwrap.indent("\n".join(body), " " * 4),
textwrap.dedent("""
try:
# Clear prior value if it exists.
del _run_in_worker_result
except NameError:
pass
_run_in_worker_result = _run_in_worker_f()
""")
])
# `worker.load` is not free, so for void functions we skip it.
if has_return_value:
self.worker.run(src)
return self.worker.load("_run_in_worker_result")
else:
src = f"{src}\nassert _run_in_worker_result is None"
self.worker.run(src)
return inner
return outer
|
import marshal
import textwrap
import typing
from components._impl.workers import base
class InProcessWorker(base.WorkerBase):
"""Worker which reuses the current Python process.
The implementation of this worker borrows from the builtin `timeit.Timer`
class, and simply reuses the current interpreter. (Making it comparatively
simple.) Note that as a result, it offers no protection against the GIL.
"""
def __init__(self, globals: typing.Dict[str, typing.Any]):
super().__init__()
self._globals: typing.Dict[str, typing.Any] = globals
@property
def in_process(self) -> bool:
return True
def run(self, snippet: str) -> None:
code = compile(
textwrap.dedent(snippet),
"<in-process-worker>",
"exec",
)
exec(code, self._globals) # noqa: P204
# Serialize and deserialize during store and load to match the behavior of
# workers with `in_process=False`.
def store(self, name: str, value: typing.Any, in_memory: bool = False) -> None:
if not in_memory:
value = marshal.loads(marshal.dumps(value))
self._globals[name] = value
def load(self, name: str) -> typing.Any:
try:
result = self._globals[name]
except KeyError:
# Our use of a globals dict is an implementation detail, but
# NameError is the canonical error when a variable does not exist.
raise NameError(f"name '{name}' is not defined")
return marshal.loads(marshal.dumps(result))
|
import contextlib
import datetime
import io
import os
import marshal
import pathlib
import shutil
import signal
import subprocess
import sys
import tempfile
import textwrap
import time
import typing
from pathlib import Path
import components
from components._impl.workers import base
from components._impl.workers import subprocess_rpc
class SubprocessWorker(base.WorkerBase):
"""Open a subprocess using `python -i`, and use it to execute code.
This class wraps a subprocess which runs a clean instance of Python.
This enables hermetic execution of stateful code, GIL free concurrent
benchmarking, and easy use of command line tools from Python.
When using SubprocessWorker, it is important to remember that while the
environment is (or at least tries to be) identical to the parent, it does
not share state or initialization with the parent process. Imports must be
re-run in the worker, and shared resources (such as file descriptors) will
not be available. For most applications this mirrors the semantics of
`timeit.Timer`.
The principle extension point for SubprocessWorker is the `args`
property. By overriding it, subclasses can change the nature of the
underlying subprocess while reusing all of the generic communication and
fault handling facilities of the base class. For example, suppose we want
to use TaskSet to pin the worker to a single core. The code is simply:
```
class TasksetZeroWorker(SubprocessWorker):
@property
def args(self) -> typing.List[str]:
return ["taskset", "--cpu-list", "0"] + super().args
```
"""
_working_dir: str
_alive: bool = False
_bootstrap_timeout: int = 10 # seconds
def __init__(self, timeout: typing.Optional[float] = None, extra_env: typing.Optional[typing.Dict[str, str]]=None) -> None:
super().__init__()
# Log inputs and outputs for debugging.
self._command_log = os.path.join(self.working_dir, "commands.log")
pathlib.Path(self._command_log).touch()
self._stdout_f: io.FileIO = io.FileIO(
os.path.join(self.working_dir, "stdout.txt"), mode="w",
)
self._stderr_f: io.FileIO = io.FileIO(
os.path.join(self.working_dir, "stderr.txt"), mode="w",
)
# `self._run` has strong assumptions about how `_input_pipe` and
# `_output_pipe` are used. They should not be accessed in any other
# context. (The same is true for `self.load` and `_load_pipe`.)
self._input_pipe = subprocess_rpc.Pipe()
self._output_pipe = subprocess_rpc.Pipe(
timeout=timeout,
timeout_callback=self._kill_proc,
)
self._load_pipe = subprocess_rpc.Pipe(
timeout=timeout,
timeout_callback=self._kill_proc,
)
# Windows and Unix differ in how pipes are shared with children.
# In Unix they are inherited, while in Windows the child consults the
# OS to get access. Most of this complexity is handled by
# `subprocess_rpc.Pipe`, however we also have to make sure Popen
# exposes the pipes in a platform appropriate way.
child_fds = [
self._input_pipe.read_fd,
self._output_pipe.write_fd,
self._load_pipe.write_fd,
]
if subprocess_rpc.IS_WINDOWS:
for fd in child_fds:
os.set_inheritable(fd, True)
startupinfo = subprocess.STARTUPINFO()
startupinfo.lpAttributeList["handle_list"].extend(
[subprocess_rpc.to_handle(fd) for fd in child_fds])
popen_kwargs = {
"startupinfo": startupinfo,
}
else:
popen_kwargs = {
"close_fds": True,
"pass_fds": child_fds,
}
worker_env = os.environ.copy()
if extra_env:
worker_env.update(extra_env)
# Append the parent process's sys.path to child process environment
parent_sys_path = ":".join(list(filter(lambda x: x, sys.path)))
if "PYTHONPATH" in worker_env:
worker_env["PYTHONPATH"] = f'{worker_env["PYTHONPATH"]}:{parent_sys_path}'
else:
worker_env["PYTHONPATH"] = parent_sys_path
self._proc = subprocess.Popen(
args=self.args,
stdin=subprocess.PIPE,
stdout=self._stdout_f,
stderr=self._stderr_f,
env=worker_env,
encoding=subprocess_rpc.ENCODING,
bufsize=1,
cwd=os.getcwd(),
**popen_kwargs,
)
# setup the pid of child process in the output pipe
self._output_pipe.set_writer_pid(self._proc.pid)
self._worker_bootstrap_finished: bool = False
self._bootstrap_worker()
self._alive = True
def proc_pid(self):
return self._proc.pid
@property
def working_dir(self) -> str:
# A subclass might need to access `self.working_dir` before calling
# `super().__init__` in order to properly construct `args`, so we need
# to lazily initialize it.
if getattr(self, "_working_dir", None) is None:
self._working_dir = tempfile.mkdtemp()
return self._working_dir
@property
def args(self) -> typing.List[str]:
return [sys.executable, "-i", "-u"]
def run(self, snippet: str) -> None:
self._run(snippet)
def store(self, name: str, value: typing.Any, in_memory: bool = False) -> None:
if in_memory:
raise NotImplementedError("SubprocessWorker does not support `in_memory`")
# NB: we convert the bytes to a hex string to avoid encoding issues.
self._run(f"""
{name} = {subprocess_rpc.WORKER_IMPL_NAMESPACE}["marshal"].loads(
bytes.fromhex({repr(marshal.dumps(value).hex())})
)
""")
def load(self, name: str) -> typing.Any:
self._run(f"""
{subprocess_rpc.WORKER_IMPL_NAMESPACE}["load_pipe"].write(
{subprocess_rpc.WORKER_IMPL_NAMESPACE}["marshal"].dumps({name})
)
""")
return marshal.loads(self._load_pipe.read())
@property
def in_process(self) -> bool:
return False
@property
def alive(self) -> bool:
return self._alive and self._proc.poll() is None
def _bootstrap_worker(self) -> None:
"""Import subprocess_rpc in the worker, and start the work loop.
Commands are executed by writing to `self._input_pipe`, and waiting for
a response on `self._output_pipe`. This presumes, however, that there
is a worker doing the opposite: listening to the input pipe and writing
to the output pipe. At startup `self._proc` is a simple interactive
Python process, so we have to bootstrap it to start the work loop or
else `self._run` will hang waiting for jobs to be processed.
"""
# NB: This gets sent directly to `self._proc`'s stdin, so it MUST be
# a single expression and may NOT contain any empty lines. (Due to
# how Python processes commands.)
bootstrap_command = textwrap.dedent(f"""
try:
import marshal
import sys
import traceback
sys_path_old = list(sys.path)
sys.path = marshal.loads(
bytes.fromhex({repr(marshal.dumps(sys.path).hex())})
)
# The parent gets priority, but a subclass could set PYTHONPATH
# so we have to respect extra paths.
sys.path.extend([i for i in sys_path_old if i and i not in sys.path])
from components._impl.workers import subprocess_rpc
output_pipe = subprocess_rpc.Pipe(
write_handle={self._output_pipe.write_handle})
output_pipe.write(subprocess_rpc.BOOTSTRAP_IMPORT_SUCCESS)
subprocess_rpc.run_loop(
input_handle={self._input_pipe.read_handle},
output_pipe=output_pipe,
load_handle={self._load_pipe.write_handle},
)
except Exception as e:
traceback.print_exc()
print(str(e))
sys.exit(1)
""").strip()
if self._proc.poll() is not None:
raise ValueError("Process has already exited.")
proc_stdin = self._proc.stdin
assert proc_stdin is not None
self._log_cmd(bootstrap_command)
# We need two newlines for Python to stop waiting for more input.
proc_stdin.write(f"{bootstrap_command}\n\n")
proc_stdin.flush()
with self.watch_stdout_stderr() as get_output:
try:
# Bootstrapping is very fast. (Unlike user code where we have
# no a priori expected upper bound.) If we don't get a response
# prior to the timeout, it is overwhelmingly likely that the
# worker died or the bootstrap failed. (E.g. failed to resolve
# import path.) This simply allows us to raise a good error.
bootstrap_pipe = subprocess_rpc.Pipe(
writer_pid=self._proc.pid,
read_handle=self._output_pipe.read_handle,
write_handle=self._output_pipe.write_handle,
timeout=self._bootstrap_timeout,
)
result = bootstrap_pipe.read()
assert result == subprocess_rpc.BOOTSTRAP_IMPORT_SUCCESS, result
result = bootstrap_pipe.read()
assert result == subprocess_rpc.BOOTSTRAP_INPUT_LOOP_SUCCESS, result
self._worker_bootstrap_finished = True
assert self._proc.poll() is None
except (Exception, KeyboardInterrupt) as e:
stdout, stderr = get_output()
cause = "import failed" if self._proc.poll() else "timeout"
raise e from RuntimeError(
f"Failed to bootstrap worker ({cause}):\n"
f" working_dir: {self.working_dir}\n"
f" stdout:\n{textwrap.indent(stdout, ' ' * 8)}\n\n"
f" stderr:\n{textwrap.indent(stderr, ' ' * 8)}"
)
def _log_cmd(self, snippet: str) -> None:
with open(self._command_log, "at", encoding="utf-8") as f:
now = datetime.datetime.now().strftime("[%Y-%m-%d] %H:%M:%S.%f")
f.write(f"# {now}\n{snippet}\n\n")
@contextlib.contextmanager
def watch_stdout_stderr(self):
# Get initial state for stdout and stderr, since we only want to
# capture output since the contextmanager started.
stdout_stat = os.stat(self._stdout_f.name)
stderr_stat = os.stat(self._stderr_f.name)
def get() -> typing.Tuple[str, str]:
with open(self._stdout_f.name, "rb") as f:
_ = f.seek(stdout_stat.st_size)
stdout = f.read().decode("utf-8").strip()
with open(self._stderr_f.name, "rb") as f:
_ = f.seek(stderr_stat.st_size)
stderr = f.read().decode("utf-8").strip()
return stdout, stderr
yield get
def _run(self, snippet: str) -> None:
"""Helper method for running code in a subprocess."""
assert self._worker_bootstrap_finished
assert self.alive, "Process has exited"
snippet = textwrap.dedent(snippet)
with self.watch_stdout_stderr() as get_output:
self._input_pipe.write(snippet.encode(subprocess_rpc.ENCODING))
self._log_cmd(snippet)
result = marshal.loads(self._output_pipe.read())
if isinstance(result, str):
assert result == subprocess_rpc.SUCCESS
return
assert isinstance(result, dict)
if not result:
stdout, stderr = get_output()
raise subprocess.SubprocessError(
"Uncaught Exception in worker:"
f" working_dir: {self.working_dir}\n"
f" stdout:\n{textwrap.indent(stdout, ' ' * 8)}\n\n"
f" stderr:\n{textwrap.indent(stderr, ' ' * 8)}")
serialized_e = subprocess_rpc.SerializedException(**result)
stdout, stderr = get_output()
subprocess_rpc.SerializedException.raise_from(
serialized_e=serialized_e,
extra_context=(
f" working_dir: {self.working_dir}\n"
f" stdout:\n{textwrap.indent(stdout, ' ' * 8)}\n\n"
f" stderr:\n{textwrap.indent(stderr, ' ' * 8)}"
)
)
def _kill_proc(self) -> None:
"""Best effort to kill subprocess."""
if getattr(self, "_proc", None) is None:
# We failed in the constructor, so there's nothing to clean up.
return
self._input_pipe.write(subprocess_rpc.HARD_EXIT)
try:
self._proc.wait(timeout=1)
except subprocess.TimeoutExpired:
if not subprocess_rpc.IS_WINDOWS:
self._proc.send_signal(signal.SIGINT)
try:
self._proc.terminate()
except PermissionError:
# NoisePoliceWorker runs under sudo, and thus will not allow
# SIGTERM to be sent.
print(f"Failed to clean up process {self._proc.pid}")
# Unfortunately Popen does not clean up stdin when using PIPE. However
# we also can't unconditionally close the fd as it could interfere with
# the orderly teardown of the process. We try our best to kill
# `self._proc` in the previous block; if `self._proc` is terminated we
# make sure its stdin TextIOWrapper is closed as well.
try:
self._proc.wait(timeout=1)
proc_stdin = self._proc.stdin
if proc_stdin is not None:
proc_stdin.close()
except subprocess.TimeoutExpired:
pass
self._alive = False
def __del__(self) -> None:
self._kill_proc()
# We own these fd's, and it seems that we can unconditionally close
# them without impacting the shutdown of `self._proc`.
self._stdout_f.close()
self._stderr_f.close()
# Finally, make sure we don't leak any files.
shutil.rmtree(self._working_dir, ignore_errors=True)
|
"""Utilities to handle communication between parent worker.
This module implements three principle facilities:
1) Raw IPC (via the Pipe class)
2) Exception propagation (via the SerializedException class)
3) A run loop for the worker (via the run_loop function)
"""
import contextlib
import dataclasses
import datetime
import io
import marshal
import os
import pickle
import psutil
import struct
import sys
import textwrap
import threading
import time
import traceback
import types
import typing
# Shared static values / namespace between worker and parent
BOOTSTRAP_IMPORT_SUCCESS = b"BOOTSTRAP_IMPORT_SUCCESS"
BOOTSTRAP_INPUT_LOOP_SUCCESS = b"BOOTSTRAP_INPUT_LOOP_SUCCESS"
WORKER_IMPL_NAMESPACE = "__worker_impl_namespace"
# Constants for passing to and from pipes
_CHECK = b"\x00\x00"
_TIMEOUT = b"\x01\x01"
_DEAD = b"\x02\x02"
assert len(_CHECK) == len(_TIMEOUT) == len(_DEAD)
_ULL = "Q" # Unsigned long long
_ULL_SIZE = len(struct.pack(_ULL, 0))
assert _ULL_SIZE == 8
# Text encoding for input commands.
ENCODING = "utf-8"
SUCCESS = "SUCCESS"
# In Python, `sys.exit()` is a soft exit. It throws a SystemExit, and only
# exits if that is not caught. `os._exit()` is not catchable, and is suitable
# for cases where we really, really need to exit. This is of particular
# importance because the worker run loop does its very best to swallow
# exceptions.
HARD_EXIT = "import os\nos._exit(0)".encode(ENCODING)
# Precompute serialized normal return values
EMPTY_RESULT = marshal.dumps({})
SUCCESS_BYTES = marshal.dumps(SUCCESS)
# =============================================================================
# == Raw Communication ========================================================
# =============================================================================
# Windows does not allow subprocesses to inherit file descriptors, so instead
# we have to go the the OS and get get the handle for the backing resource.
IS_WINDOWS = sys.platform == "win32"
if IS_WINDOWS:
import msvcrt
def to_handle(fd: typing.Optional[int]) -> typing.Optional[int]:
return None if fd is None else msvcrt.get_osfhandle(fd)
def from_handle(handle: typing.Optional[int], mode: int) -> typing.Optional[int]:
return None if handle is None else msvcrt.open_osfhandle(handle, mode)
else:
to_handle = lambda fd: fd
from_handle = lambda fd, _: fd
class _TimeoutPIPE:
"""Allow Pipe to interrupt its read.
`os.read` is a syscall, which means it is not interruptable. This means
that normal timeout mechanisms such as `asyncio.wait_for(..., timeout=...)`
will not work because they rely on the awaited function returning control
to the event loop. An alternate formulation uses `run_in_executor` and
`asyncio.wait`, which places the read on a side thread under the hood.
However this is also not suitable, because:
1) This additional machinery increases the cost when data is already
present in the Pipe (most common case) ~1000x, from O(us) to O(ms)
2) We have to poll the future, which wastes the awaitable nature `read`
Instead of trying to interrupt the pipe read, we cause it to terminate by
writing to the pipe; because we control the read (via `Pipe.read`) we can
catch the sentinel timeout value and raise appropriately.
This class is designed to be extremely lightweight. Timeouts should be on
the order of seconds (or minutes), and are only to prevent deadlocks in the
case of catastrophic worker failure. As a result, we prioritize low
resource usage over the ability to support small timeouts.
"""
_singleton_lock = threading.Lock()
_singleton: typing.Optional["_TimeoutPIPE"] = None
_loop_lock = threading.Lock()
_active_reads: typing.Dict[int, typing.Tuple[float, float, int]]
_loop_cadence = 1 # second
@classmethod
def singleton(cls) -> "_TimeoutPIPE":
# This class will spawn a thread, so we only want one active at a time.
with cls._singleton_lock:
if cls._singleton is None:
cls._singleton = cls()
return cls._singleton
def __init__(self) -> None:
self._active_reads = {}
self._thread = threading.Thread(target=self._loop)
self._thread.daemon = True
self._thread.start()
def _loop(self):
# This loop is scoped to the life of the process, so we rely on process
# teardown to pull the rug out from under the daemonic thread running
# this function.
while True:
time.sleep(self._loop_cadence)
now = time.time()
with self._loop_lock:
for w_fd, (timeout, start_time, writer_pid) in tuple(self._active_reads.items()):
# if child process is in zombie status, check its exit code
if psutil.pid_exists(writer_pid):
p = psutil.Process(writer_pid)
if p.status() == psutil.STATUS_ZOMBIE:
# wait 1 second for the exit code
exit_code = p.wait(timeout=self._loop_cadence)
if exit_code:
os.write(w_fd, _DEAD + struct.pack(_ULL, abs(int(exit_code))))
self.pop(w_fd)
# check if process timeout
if timeout:
if now - start_time >= timeout and w_fd in self._active_reads:
os.write(w_fd, _TIMEOUT)
self.pop(w_fd)
def pop(self, w_fd: int) -> None:
self._active_reads.pop(w_fd, None)
@classmethod
@contextlib.contextmanager
def maybe_timeout_read(cls, pipe: "Pipe") -> None:
timeout = pipe.timeout
# Spawn a loop thread to periodically check the liveness of subprocess
w_fd = pipe.write_fd
assert w_fd is not None, "Cannot timeout without write file descriptor."
assert pipe.get_writer_pid() is not None, "Cannot check process liveness without pid."
singleton = cls.singleton()
with singleton._loop_lock:
# This will only occur in the case of concurrent reads on different
# threads (not supported) or a leaked case.
assert w_fd not in singleton._active_reads, f"{w_fd} is already being watched."
singleton._active_reads[w_fd] = (timeout, time.time(), pipe.get_writer_pid())
try:
yield
finally:
singleton.pop(w_fd)
class Pipe:
"""Helper class to move data in a robust fashion.
This class handles:
1) Child process liveness checks if pipe is read by parent
2) File descriptor lifetimes
3) File descriptor inheritance
4) Message packing and unpacking
5) (Optional) timeouts for reads
NOTE: we don't check liveness of parent since the parent process
shouldn't regularly fail without proper clean up.
"""
def __init__(
self,
# writer_pid only exists when `self` is a pipe read by parent
# in which case, write_pid is the pid of the child process
writer_pid: typing.Optional[int] = None,
read_handle: typing.Optional[int] = None,
write_handle: typing.Optional[int] = None,
timeout: typing.Optional[float] = None,
timeout_callback: typing.Callable[[], typing.NoReturn] = (lambda: None),
) -> None:
self._writer_pid = writer_pid
self._owns_pipe = read_handle is None and write_handle is None
if self._owns_pipe:
self.read_fd, self.write_fd = os.pipe()
else:
self.read_fd = from_handle(read_handle, os.O_RDONLY)
self.write_fd = from_handle(write_handle, os.O_WRONLY)
self.read_handle = read_handle or to_handle(self.read_fd)
self.write_handle = write_handle or to_handle(self.write_fd)
self.timeout = timeout
self.timeout_callback = timeout_callback
def _read(self, size: int) -> bytes:
"""Handle the low level details of reading from the PIPE."""
if self.read_fd is None:
raise IOError("Cannot read from PIPE, we do not have the read handle")
# `self._write_pid` is not None iff `self` is the read pipe from parent process
# only support timeout and child process liveness check in this case
if self._writer_pid:
with _TimeoutPIPE.maybe_timeout_read(self):
raw_msg = os.read(self.read_fd, len(_CHECK) + size)
else:
raw_msg = os.read(self.read_fd, len(_CHECK) + size)
check_bytes, msg = raw_msg[:len(_CHECK)], raw_msg[len(_CHECK):]
if check_bytes == _TIMEOUT:
self.timeout_callback() # Give caller the chance to cleanup.
raise IOError(f"Exceeded timeout: {self.timeout}")
if check_bytes == _DEAD:
raise IOError(f"Subprocess terminates with code {int.from_bytes(msg, sys.byteorder)}")
if check_bytes != _CHECK:
raise IOError(f"{check_bytes} != {_CHECK}, {msg}")
if len(msg) != size:
raise IOError(f"len(msg) != size: {len(msg)} vs. {size}")
return msg
def read(self) -> bytes:
msg_size = struct.unpack(_ULL, self._read(_ULL_SIZE))[0]
return self._read(msg_size)
def write(self, msg: bytes) -> None:
if self.write_fd is None:
raise IOError("Cannot write from PIPE, we do not have the write handle")
assert isinstance(msg, bytes), msg
packed_msg = (
# First read: message length
_CHECK + struct.pack(_ULL, len(msg)) +
# Second read: message contents
_CHECK + msg
)
os.write(self.write_fd, packed_msg)
def get_writer_pid(self) -> int:
assert self._writer_pid is not None, "Writer pid is not specified. Maybe calling from child process or input pipe.\
Please report a bug."
return self._writer_pid
def set_writer_pid(self, writer_pid: int) -> None:
self._writer_pid = writer_pid
def _close_fds(self):
"""Factor cleanup to a helper so we can test when it runs."""
os.close(self.read_fd)
os.close(self.write_fd)
def __del__(self) -> None:
if self._owns_pipe:
self._close_fds()
# =============================================================================
# == Exception Propagation ===================================================
# =============================================================================
class ExceptionUnpickler(pickle.Unpickler):
"""Unpickler which is specialized for Exception types.
When we catch an exception that we want to raise in another process, we
need to include the type of Exception. For custom exceptions this is a
problem, because pickle dynamically resolves imports which means we might
not be able to unpickle in the parent. (And reviving them by replaying
the constructor args might not work.) So in the interest of robustness, we
confine ourselves to builtin Exceptions. (With UnserializableException as
a fallback.)
However it is not possible to marshal even builtin Exception types, so
instead we use pickle and check that the type is builtin in `find_class`.
"""
@classmethod
def load_bytes(cls, data: bytes) -> typing.Type[Exception]:
result = cls(io.BytesIO(data)).load()
# Make sure we have an Exception class, but not an instantiated
# Exception.
if not issubclass(result, Exception):
raise pickle.UnpicklingError(f"{result} is not an Exception")
if isinstance(result, Exception):
raise pickle.UnpicklingError(
f"{result} is an Exception instance, not a class.")
return result # type: ignore[no-any-return]
def find_class(self, module: str, name: str) -> typing.Any:
if module != "builtins":
raise pickle.UnpicklingError(f"Invalid object: {module}.{name}")
return super().find_class(module, name)
class UnserializableException(Exception):
"""Fallback class for if a non-builtin Exception is raised."""
def __init__(self, type_repr: str, args_repr: str) -> None:
self.type_repr = type_repr
self.args_repr = args_repr
super().__init__(type_repr, args_repr)
class ChildTraceException(Exception):
"""Used to display a raising child's stack trace in the parent's stderr."""
pass
@dataclasses.dataclass(init=True, frozen=True)
class SerializedException:
_is_serializable: bool
_type_bytes: bytes
_args_bytes: bytes
# Fallbacks for UnserializableException
_type_repr: str
_args_repr: str
_traceback_print: str
@staticmethod
def from_exception(e: Exception, tb: types.TracebackType) -> "SerializedException":
"""Best effort attempt to serialize Exception.
Because this will be used to communicate from a subprocess to its
parent, we want to surface as much information as possible. It is
not possible to serialize a traceback because it is too intertwined
with the runtime; however what we really want is the traceback so we
can print it. We can grab that string and send it without issue. (And
providing a stack trace is very important for debuggability.)
ExceptionUnpickler explicitly refuses to load any non-builtin exception
(for the same reason we prefer `marshal` to `pickle`), so we won't be
able to serialize all cases. However we don't want to simply give up
as this will make it difficult for a user to diagnose what's going on.
So instead we extract what information we can, and raise an
UnserializableException in the main process with whatever we were able
to scrape up from the child process.
"""
try:
print_file = io.StringIO()
python_vinfo = sys.version_info
if python_vinfo.major == 3 and python_vinfo.minor < 10:
# Starting from Python 3.10, trackback renames the `etype` parameter to `exc`
# and make it positional-only.
# doc: https://docs.python.org/3/library/traceback.html#traceback.print_exception
traceback.print_exception(
etype=type(e),
value=e,
tb=tb,
file=print_file,
)
else:
traceback.print_exception(
type(e),
value=e,
tb=tb,
file=print_file,
)
print_file.seek(0)
traceback_print: str = print_file.read()
except Exception as e:
traceback_print = textwrap.dedent("""
Traceback
Failed to extract traceback from worker. This is not expected.
""").strip()
try:
args_bytes: bytes = marshal.dumps(e.args)
type_bytes = pickle.dumps(e.__class__)
# Make sure we'll be able to get something out on the other side.
revived_type = ExceptionUnpickler.load_bytes(data=type_bytes)
revived_e = revived_type(*marshal.loads(args_bytes))
is_serializable: bool = True
except Exception:
is_serializable = False
args_bytes = b""
type_bytes = b""
# __repr__ can contain arbitrary code, so we can't trust it to noexcept.
def hardened_repr(o: typing.Any) -> str:
try:
return repr(o)
except Exception:
return "< Unknown >"
return SerializedException(
_is_serializable=is_serializable,
_type_bytes=type_bytes,
_args_bytes=args_bytes,
_type_repr=hardened_repr(e.__class__),
_args_repr=hardened_repr(getattr(e, "args", None)),
_traceback_print=traceback_print,
)
@staticmethod
def raise_from(
serialized_e: "SerializedException",
extra_context: typing.Optional[str] = None,
) -> None:
"""Revive `serialized_e`, and raise.
We raise the revived exception type (if possible) so that any higher
try catch logic will see the original exception type. In other words:
```
try:
worker.run("assert False")
except AssertionError:
...
```
will flow identically to:
```
try:
assert False
except AssertionError:
...
```
If for some reason we can't move the true exception type to the main
process (e.g. a custom Exception) we raise UnserializableException as
a fallback.
"""
if serialized_e._is_serializable:
revived_type = ExceptionUnpickler.load_bytes(data=serialized_e._type_bytes)
e = revived_type(*marshal.loads(serialized_e._args_bytes))
else:
e = UnserializableException(serialized_e._type_repr, serialized_e._args_repr)
traceback_str = serialized_e._traceback_print
if extra_context:
traceback_str = f"{traceback_str}\n{extra_context}"
raise e from ChildTraceException(traceback_str)
# =============================================================================
# == Snippet Execution =======================================================
# =============================================================================
def _log_progress(suffix: str) -> None:
now = datetime.datetime.now().strftime("[%Y-%m-%d] %H:%M:%S.%f")
print(f"{now}: TIMER_SUBPROCESS_{suffix}")
def _run_block(
*,
input_pipe: Pipe,
output_pipe: Pipe,
globals_dict: typing.Dict[str, typing.Any],
):
result = EMPTY_RESULT
try:
_log_progress("BEGIN_READ")
cmd = input_pipe.read().decode(ENCODING)
_log_progress("BEGIN_EXEC")
exec( # noqa: P204
compile(cmd, "<subprocess-worker>", "exec"),
globals_dict
)
_log_progress("SUCCESS")
result = SUCCESS_BYTES
except (Exception, KeyboardInterrupt, SystemExit) as e:
tb = sys.exc_info()[2]
assert tb is not None
serialized_e = SerializedException.from_exception(e, tb)
result = marshal.dumps(dataclasses.asdict(serialized_e))
_log_progress("FAILED")
finally:
output_pipe.write(result)
_log_progress("FINISHED")
sys.stdout.flush()
sys.stderr.flush()
def run_loop(
*,
input_handle: int,
output_pipe: Pipe,
load_handle: int,
) -> None:
input_pipe = Pipe(read_handle=input_handle)
# In general, we want a clean separation between user code and framework
# code. However, certain methods in SubprocessWorker (store and load)
# want to access implementation details in this module. As a result, we
# run tasks through a context where globals start out clean EXCEPT for
# a namespace where we can stash implementation details.
globals_dict = {
WORKER_IMPL_NAMESPACE: {
"subprocess_rpc": sys.modules[__name__],
"marshal": marshal,
"load_pipe": Pipe(write_handle=load_handle)
}
}
output_pipe.write(BOOTSTRAP_INPUT_LOOP_SUCCESS)
while True:
_run_block(
input_pipe=input_pipe,
output_pipe=output_pipe,
globals_dict=globals_dict,
)
|
import abc
import ast
import typing
class WorkerBase(abc.ABC):
"""Interface for the core worker abstraction.
Conceptually, a worker is modeled as a remote interactive Python terminal.
One can send code to be executed (analogous to writing to stdin), and
perform basic stores and loads (analogous to RPC).
It is the responsibility of higher layers of the stack (e.g. those that
call the worker) to generate any code that the worker needs; the worker
itself is deliberately dumb. As a result, there are several restrictions
on the semantics of a worker:
1) Workers are individually scoped, and one should not assume any state
is shared between the caller and worker unless explicitly set with
`store` and `load` calls. However, worker state does persist across
calls.
2) Stores and loads go through a serialization step. This means they
should only be basic types (specifically those supported by the
marshal library), and the results will be copies rather than references.
3) Framework code will often live side-by-side with user code. Framework
code should take care to scope implementation details to avoid leaking
variables, and choose names defensively.
Good:
```
def _timer_impl_call():
import my_lib
my_value = 1
my_lib.foo(my_value)
_timer_impl_call()
del _timer_impl_call
```
Bad: (Leaks `my_lib` and `my_value` into user variable space.)
```
import my_lib
my_value = 1
my_lib.foo(my_value)
```
4) One must take care when importing code in the worker, because changes
to `sys.path` in the parent may not be reflected in the child.
See `in_process_worker.InProcessWorker` for a concise example of the
concrete semantics of a worker.
"""
@abc.abstractmethod
def run(self, snippet: str) -> None:
"""Execute snippet (Python code), and return when complete."""
...
@abc.abstractmethod
def store(self, name: str, value: typing.Any, *, in_memory: bool = False) -> None:
"""Assign `value` to `name` in the worker.
(This will be a copy if `in_memory=False`)
"""
...
@abc.abstractmethod
def load(self, name: str) -> typing.Any:
"""Fetch a copy of `name` from worker, and return it to the caller."""
...
@abc.abstractproperty
def in_process(self) -> bool:
"""Is this worker in the same process as the caller.
This property can be used to gate certain features. (Such as sharing
in-memory objects) However it should be used sparringly, as it violates
the abstraction of a purely remote worker.
"""
...
def load_stmt(self, stmt: str) -> typing.Any:
"""Convenience wrapper to extract simple bits of computation.
Suppose we have an object `my_object` which implements a property `foo`.
It is much more convenient and more readable to write:
`foo = worker.load_stmt("my_object.foo")`
than to manually save the property and extract it in user code.
"""
# Check the AST so we fail early and with a clear message when stmt
# is invalid.
try:
ast.parse(stmt, mode="single")
except SyntaxError:
raise SyntaxError(f"Invalid stmt: {stmt}")
self.run(f"__worker_lambda_result = ({stmt})")
result = self.load("__worker_lambda_result")
self.run("del __worker_lambda_result")
return result
|
"""Unit tests specifically for the components of SubprocessWorker.
End-to-end tests (e.g. does SubprocessWorker properly implement the
WorkerBase API) still live in `test_worker`.
"""
import functools
import os
import sys
import textwrap
import threading
import typing
from torch.testing._internal.common_utils import TestCase, run_tests
try:
from components._impl.tasks import base as task_base
from components._impl.workers import subprocess_rpc
except (ImportError, ModuleNotFoundError):
print(f"""
This test must be run from the repo root directory as
`python -m components.test.{os.path.splitext(os.path.basename(__file__))[0]}`
""")
raise
class TestParseFunction(TestCase):
@staticmethod
def _indent(s: str) -> str:
return textwrap.indent(s, " " * 12)
def test_parse_trivial(self) -> None:
def f(x: int) -> None:
pass
_, body = task_base.parse_f(f)
self.assertExpectedInline(
self._indent(body), """\
pass""",
)
def test_parse_simple(self) -> None:
def f(
x: int,
) -> None:
for _ in range(10):
pass
_, body = task_base.parse_f(f)
self.assertExpectedInline(
self._indent(body), """\
for _ in range(10):
pass""",
)
def test_parse_inline(self) -> None:
def f(x: typing.Any, y: int = 1) -> None: print([x for _ in range(y)])
_, body = task_base.parse_f(f)
self.assertExpectedInline(
self._indent(body), """\
print([x for _ in range(y)])""",
)
def test_parse_with_comments(self) -> None:
def f(
x: int, # This is a comment
y: bool, # also a comment
# More comments.
) -> typing.Any: # Comment on return line.
"""Docstring
Note: This will be dropped in Python 3.7. See `parse_f` for details.
"""
x += 1
y = """
This is preserved.
"""
# Comment in src.
return y
_, body = task_base.parse_f(f)
# Python 3.7 removes docstring but 3.8+ doesn't. See `parse_f` for details.
docstring = """\
\"\"\"Docstring
Note: This will be dropped in Python 3.7. See `parse_f` for details.
\"\"\"\n\n""" if sys.version_info[:2] > (3,7) else ""
self.assertExpectedInline(
self._indent(body), f"""{docstring}\
x += 1
y = \"\"\"
This is preserved.
\"\"\"
# Comment in src.
return y""",
)
def test_parse_method(self) -> None:
class MyClass:
@staticmethod
def f(x: int) -> int:
"""Identity, but with more steps"""
return x
@staticmethod
def g(x: int) -> int:
"""Identity, but with more steps
Culled, as this is a multi-line docstring
"""
return x
_, body = task_base.parse_f(MyClass.f)
self.assertExpectedInline(
self._indent(body), """\
\"\"\"Identity, but with more steps\"\"\"
return x""",
)
_, body = task_base.parse_f(MyClass.g)
# Python 3.7 removes docstring but 3.8+ doesn't. See `parse_f` for details.
docstring = """\
\"\"\"Identity, but with more steps
Culled, as this is a multi-line docstring
\"\"\"\n""" if sys.version_info[:2] > (3,7) else ""
self.assertExpectedInline(
self._indent(body), f"""{docstring}\
return x""",
)
def test_parse_pathological(self) -> None:
def f(
x: \
int,
y: typing.Dict[str, int],
*,
z: str,
# Isn't that a charming (but legal) indentation?
) \
-> typing.Optional[typing.Union[
float, int]
]: # Just for good measure.
"""Begin the actual body.
(For better or worse...)
"""
del x
q = y.get(
z,
None,
)
# Intermediate comment
if False:
return 1
elif q:
raise ValueError
q = 1
_, body = task_base.parse_f(f)
# Python 3.7 removes docstring but 3.8+ doesn't. See `parse_f` for details.
docstring = """\
\"\"\"Begin the actual body.
(For better or worse...)
\"\"\"\n""" if sys.version_info[:2] > (3,7) else ""
self.assertExpectedInline(
self._indent(body), f"""{docstring}\
del x
q = y.get(
z,
None,
)
# Intermediate comment
if False:
return 1
elif q:
raise ValueError
q = 1""",
)
def test_fully_typed(self) -> None:
def f(x):
pass
with self.assertRaisesRegex(
TypeError,
"Missing type annotation for parameter `x`"
):
task_base.parse_f(f)
def g(x: int):
pass
with self.assertRaisesRegex(
TypeError,
"Missing return annotation."
):
task_base.parse_f(g)
def test_no_functor(self) -> None:
class F:
def __call__(self) -> None:
pass
with self.assertRaisesRegex(TypeError, "Expected function, got"):
task_base.parse_f(F())
def test_no_variadic(self) -> None:
def f(*args) -> None:
pass
with self.assertRaisesRegex(
TypeError,
r"Variadic positional argument `\*args` not permitted for `run_in_worker` function."
):
task_base.parse_f(f)
def g(**kwargs) -> None:
pass
with self.assertRaisesRegex(
TypeError,
r"Variadic keywork argument `\*\*kwargs` not permitted for `run_in_worker` function."
):
task_base.parse_f(g)
def test_no_decorator(self) -> None:
def my_decorator(f: typing.Callable) -> typing.Callable:
@functools.wraps(f)
def g(*args, **kwargs) -> typing.Any:
return f(*args, **kwargs)
return g
@my_decorator
def f() -> None:
pass
with self.assertRaisesRegex(
TypeError,
"`f` cannot be decorated below `@run_in_worker`"
):
task_base.parse_f(f)
class TestSubprocessRPC(TestCase):
def test_pipe_basic_read_write(self) -> None:
pipe = subprocess_rpc.Pipe()
# Test small read.
msg = b"abc"
pipe.write(msg)
self.assertEqual(msg, pipe.read())
# Test large read.
msg = b"asdjkf" * 1024
pipe.write(msg)
self.assertEqual(msg, pipe.read())
def test_pipe_stacked_read_write(self) -> None:
pipe = subprocess_rpc.Pipe()
pipe.write(b"abc")
pipe.write(b"def")
pipe.write(b"ghi")
self.assertEqual(b"abc", pipe.read())
self.assertEqual(b"def", pipe.read())
self.assertEqual(b"ghi", pipe.read())
def test_pipe_clone(self) -> None:
msg = b"msg"
pipe = subprocess_rpc.Pipe()
alt_pipe_0 = subprocess_rpc.Pipe(write_handle=pipe.write_handle)
alt_pipe_0.write(msg)
self.assertEqual(msg, pipe.read())
with self.assertRaises(IOError):
alt_pipe_0.read()
alt_pipe_1 = subprocess_rpc.Pipe(read_handle=pipe.read_handle)
pipe.write(msg)
self.assertEqual(msg, alt_pipe_1.read())
with self.assertRaises(IOError):
alt_pipe_1.write(msg)
def test_pipe_timeout(self) -> None:
result = {}
def callback():
result["callback_run"] = True
# We have to run this in a thread, because if the timeout mechanism
# fails we don't want the entire unit test suite to hang.
pipe = subprocess_rpc.Pipe(writer_pid=os.getpid(), timeout=0.5, timeout_callback=callback)
def target():
try:
pipe.read()
except Exception as e:
result["e"] = e
thread = threading.Thread(target=target)
thread.daemon = True
thread.start()
thread.join(timeout=10)
e: typing.Optional[Exception] = result.get("e", None)
self.assertIsNotNone(e)
with self.assertRaisesRegex(OSError, "Exceeded timeout: 0.5"):
raise e
self.assertTrue(result.get("callback_run", None), True)
def test_pipe_concurrent_timeout(self) -> None:
result = {"callback_count": 0, "exceptions": []}
def callback():
result["callback_count"] += 1
timeouts = [0.5, 1.0, 1.5]
pipes = [
subprocess_rpc.Pipe(writer_pid=os.getpid(), timeout=timeout, timeout_callback=callback)
for timeout in timeouts
]
def target(pipe):
try:
pipe.read()
except Exception as e:
result["exceptions"].append(e)
threads = [threading.Thread(target=target, args=(pipe,)) for pipe in pipes]
[t.start() for t in threads]
[t.join(timeout=5) for t in threads]
self.assertEqual(result["callback_count"], 3)
self.assertEqual(len(result["exceptions"]), 3)
for e in result["exceptions"]:
with self.assertRaisesRegex(OSError, "Exceeded timeout:"):
raise e
def test_pipe_cleanup(self) -> None:
assertTrue = self.assertTrue
assertFalse = self.assertFalse
del_audit = {"count": 0}
class OwnCheckingPipe(subprocess_rpc.Pipe):
def __init__(self):
super().__init__()
self._cleanup_was_run = False
assertTrue(self._owns_pipe)
def _close_fds(self) -> None:
super()._close_fds()
self._cleanup_was_run = True
def __del__(self) -> None:
super().__del__()
assert self._cleanup_was_run
del_audit["count"] += 1
class NonOwnCheckingPipe(subprocess_rpc.Pipe):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
assertFalse(self._owns_pipe)
def _close_fds(self) -> None:
raise IOError("This would damage the owning pipe")
def __del__(self) -> None:
super().__del__()
del_audit["count"] += 1
pipe = OwnCheckingPipe()
alt_pipe_0 = NonOwnCheckingPipe(read_handle=pipe.read_handle)
alt_pipe_1 = NonOwnCheckingPipe(write_handle=pipe.write_handle)
alt_pipe_2 = NonOwnCheckingPipe(
read_handle=pipe.read_handle,
write_handle=pipe.write_handle,
)
del pipe
del alt_pipe_0
del alt_pipe_1
del alt_pipe_2
# Make sure the tests we expect in __del__ actually ran.
self.assertEqual(del_audit["count"], 4)
class TestSubprocessExceptions(TestCase):
def _test_raise(
self,
raise_type: typing.Type[Exception],
reraise_type: typing.Type[Exception],
) -> None:
try:
raise raise_type("Fail")
except Exception as e:
e_raised = e # `e` is scoped to the `except` block
tb = sys.exc_info()[2]
serialized_e = subprocess_rpc.SerializedException.from_exception(e=e, tb=tb)
with self.assertRaises(reraise_type):
subprocess_rpc.SerializedException.raise_from(serialized_e)
if raise_type is reraise_type:
try:
subprocess_rpc.SerializedException.raise_from(serialized_e)
self.fail("`raise_from` failed to raise.")
except Exception as e:
self.assertEqual(e_raised.args, e.args)
def _test_raise_builtin(self, raise_type: typing.Type[Exception]) -> None:
self._test_raise(raise_type=raise_type, reraise_type=raise_type)
def test_unserializable(self) -> None:
# Make sure we can always get an exception out, even if we can't
# extract any debug info.
serialized_e = subprocess_rpc.SerializedException.from_exception(e=None, tb=None)
with self.assertRaises(subprocess_rpc.UnserializableException):
subprocess_rpc.SerializedException.raise_from(serialized_e)
class MyException(Exception):
pass
class MyIOError(IOError):
pass
self._test_raise(MyException, subprocess_rpc.UnserializableException)
self._test_raise(MyIOError, subprocess_rpc.UnserializableException)
def test_serializable(self) -> None:
self._test_raise_builtin(Exception)
self._test_raise_builtin(AssertionError)
self._test_raise_builtin(IOError)
self._test_raise_builtin(NameError)
self._test_raise_builtin(ValueError)
if __name__ == '__main__':
run_tests()
|
import os
import subprocess
import sys
import signal
import textwrap
import typing
import torch
from torch.testing._internal.common_utils import TestCase, run_tests
try:
from components._impl.workers import base as base_worker
from components._impl.workers import in_process_worker
from components._impl.workers import subprocess_worker
from components._impl.workers import subprocess_rpc
except (ImportError, ModuleNotFoundError):
print(f"""
This test must be run from the repo root directory as
`python -m components.test.{os.path.splitext(os.path.basename(__file__))[0]}`
""")
raise
class CustomClass:
"""Used to test handline of non-builtin objects."""
pass
class TestBenchmarkWorker(TestCase):
def _test_namespace_isolation(self, worker: base_worker.WorkerBase):
worker_global_vars: typing.Dict[str, str] = worker.load_stmt(
r"{k: repr(type(v)) for k, v in globals().items()}")
allowed_keys = {
"__builtins__",
subprocess_rpc.WORKER_IMPL_NAMESPACE,
}
extra_vars = {
k: v for k, v in worker_global_vars.items()
if k not in allowed_keys
}
self.assertDictEqual(extra_vars, {})
def _subtest_cleanup(
self,
worker: base_worker.WorkerBase,
test_vars: typing.Tuple[str, ...]
) -> None:
worker.run("\n".join([f"del {v}" for v in test_vars]))
self._test_namespace_isolation(worker)
def _check_basic_store_and_load(self, worker: base_worker.WorkerBase) -> None:
worker.store("y", 2)
self.assertEqual(worker.load("y"), 2)
worker.run("del y")
with self.assertRaisesRegex(NameError, "name 'y' is not defined"):
worker.load("y")
def _check_custom_store_and_load(self, worker: base_worker.WorkerBase) -> None:
with self.assertRaisesRegex(ValueError, "unmarshallable object"):
worker.store("my_class", CustomClass())
worker.run("""
class CustomClass:
pass
my_class = CustomClass()
""")
with self.assertRaisesRegex(ValueError, "unmarshallable object"):
worker.load("my_class")
self._subtest_cleanup(worker, ("my_class", "CustomClass"))
def _check_load_stmt(self, worker: base_worker.WorkerBase) -> None:
self.assertDictEqual(
{"a": 1 + 3, 2: "b"},
worker.load_stmt('{"a": 1 + 3, 2: "b"}'),
)
self._subtest_cleanup(worker, ())
def _check_complex_stmts(self, worker: base_worker.WorkerBase) -> None:
worker.run("""
def test_fn():
x = 10
y = 2
# Make sure we can handle blank lines.
return x + y
z = test_fn()
""")
self.assertEqual(worker.load("z"), 12)
# Ensure variables persist across invocations. (In this case, `f`)
worker.run("z = test_fn() + 1")
self.assertEqual(worker.load("z"), 13)
# Ensure invocations have access to global variables.
worker.store("captured_var", 5)
worker.run("""
def test_fn():
# Make sure closures work properly
return captured_var + 1
z = test_fn()
""")
self.assertEqual(worker.load("z"), 6)
self._subtest_cleanup(worker, ("captured_var", "z", "test_fn"))
def _check_environment_consistency(self, worker: base_worker.WorkerBase) -> None:
# It is important that the worker mirrors the caller. Otherwise imports
# may not resolve, or may resolve to incorrect paths. As a result, the
# worker must ensure that it faithfully reproduces the caller's
# environment.
worker.run("""
import os
import sys
cwd = os.getcwd()
sys_executable = sys.executable
sys_path = sys.path
""")
self.assertEqual(worker.load("cwd"), os.getcwd())
self.assertEqual(worker.load("sys_executable"), sys.executable)
self.assertEqual(worker.load("sys_path"), sys.path)
# Environment parity is especially important for `torch`, since
# importing an incorrect version will result in silently garbage
# results.
worker.run("""
import torch
torch_file = torch.__file__
""")
self.assertEqual(worker.load("torch_file"), torch.__file__)
self._subtest_cleanup(
worker,
("os", "sys", "cwd", "sys_executable", "sys_path", "torch", "torch_file"),
)
def _test_exceptions(self, worker: base_worker.WorkerBase):
with self.assertRaisesRegex(AssertionError, "False is not True"):
worker.run("assert False, 'False is not True'")
with self.assertRaisesRegex(ValueError, "Test msg"):
worker.run("raise ValueError('Test msg')")
def _test_child_trace_exception(
self,
worker: subprocess_worker.SubprocessWorker,
) -> None:
try:
worker.run("print('This should not appear.')")
worker.run("""
print("This is not going to work")
with open("this_file_does_not_exist") as f:
pass
""")
self.fail("Worker should have raised.")
except FileNotFoundError as e:
e_cause = e.__cause__
self.assertIsInstance(e_cause, subprocess_rpc.ChildTraceException)
extra_debug_info: str = e_cause.args[0]
assert isinstance(extra_debug_info, str)
# stdout / stderr plumbing is only for the failing snippet. Print
# stmts from earlier expressions should not be included.
self.assertNotRegex(extra_debug_info, "This should not appear")
# Make sure the worker provided a stack trace.
self.assertRegex(
extra_debug_info,
textwrap.dedent(r"""
Traceback \(most recent call last\):
\s+ File.*subprocess_rpc.*
""").strip()
)
self.assertRegex(
extra_debug_info,
r"No such file or directory: .this_file_does_not_exist."
)
# Make sure stdout / stderr were plumbed from the worker.
self.assertRegex(extra_debug_info, "This is not going to work")
def _generic_worker_tests(self, worker: base_worker.WorkerBase) -> None:
# Make sure we have a clean start.
self._test_namespace_isolation(worker)
self._check_basic_store_and_load(worker)
self._check_load_stmt(worker)
self._check_custom_store_and_load(worker)
self._check_complex_stmts(worker)
self._check_environment_consistency(worker)
self._test_exceptions(worker)
self._test_namespace_isolation(worker)
def test_in_process_worker(self) -> None:
worker = in_process_worker.InProcessWorker(globals={})
self._generic_worker_tests(worker)
# InProcessWorker specific tests include passing non-empty globals.
worker = in_process_worker.InProcessWorker(globals={"x": 1})
# Make sure worker is actually using globals passed.
self.assertEqual(worker.load("x"), 1)
# Test `in_memory` exception for InProcessWorker.
worker.store("my_class", CustomClass(), in_memory=True)
self.assertIsInstance(worker._globals["my_class"], CustomClass)
def test_subprocess_worker(self) -> None:
worker = subprocess_worker.SubprocessWorker()
self._generic_worker_tests(worker)
self._test_child_trace_exception(worker)
def test_subprocess_worker_segv_handling(self):
worker = subprocess_worker.SubprocessWorker(timeout=1)
with self.assertRaisesRegex(OSError, f"Subprocess terminates with code {int(signal.SIGSEGV)}"):
worker.run("""
import os
import signal
os.kill(os.getpid(), signal.SIGSEGV)
""")
def test_subprocess_worker_fault_handling(self):
worker = subprocess_worker.SubprocessWorker(timeout=1)
with self.assertRaisesRegex(OSError, "Exceeded timeout"):
worker.run("""
import os
os._exit(0)
""")
self.assertFalse(worker.alive)
with self.assertRaisesRegex(AssertionError, "Process has exited"):
worker.run("pass")
worker = subprocess_worker.SubprocessWorker(timeout=1)
with self.assertRaisesRegex(OSError, "Exceeded timeout"):
worker.run("""
import time
time.sleep(2)
""")
# Once a command times out, the integrity of the underlying subprocess
# cannot be guaranteed and the worker neeeds to refuse future work.
# This is different from an Exception being thrown in the subprocess;
# in that case communication is still in an orderly state.
self.assertFalse(worker.alive)
with self.assertRaisesRegex(AssertionError, "Process has exited"):
worker.run("pass")
# Make sure `_kill_proc` is idempotent.
worker._kill_proc()
worker._kill_proc()
def test_subprocess_worker_sys_exit(self):
worker = subprocess_worker.SubprocessWorker(timeout=1)
with self.assertRaisesRegex(subprocess_rpc.UnserializableException, "SystemExit"):
worker.run("import sys")
worker.run("sys.exit()")
if __name__ == '__main__':
run_tests()
|
"""
This is a test file for TorchBenchAnalyzer
"""
from model_analyzer.TorchBenchAnalyzer import ModelAnalyzer
def work():
# A simple mm test
import torch
n=4096
x = torch.ones((n, n), dtype=torch.float32, device="cuda")
y = torch.ones((n, n),dtype=torch.float32, device="cuda")
# configure model analyzer
model_analyzer = ModelAnalyzer()
model_analyzer.start_monitor()
# run the computation part
for i in range(200):
if i % 100 == 0:
print(i)
torch.mm(x, y)
# start test app here
# run_app(4096)
# stop and aggregate the profiling results
model_analyzer.stop_monitor()
model_analyzer.aggregate()
tflops = model_analyzer.calculate_flops()
print('{:<20} {:>20}'.format("FLOPS:", "%.4f TFLOPs per second" % tflops, sep=''))
if __name__ == "__main__":
work()
|
from typing import Optional, OrderedDict, Tuple
from .dcgm.cpu_monitor import CPUMonitor
from .dcgm.dcgm_monitor import DCGMMonitor
from .dcgm.nvml_monitor import NVMLMonitor
from .tb_dcgm_types.da_exceptions import TorchBenchAnalyzerException
from .tb_dcgm_types.gpu_device_factory import GPUDeviceFactory
from .dcgm import dcgm_fields
from .dcgm.dcgm_structs import DCGMError
from .tb_dcgm_types.gpu_tensoractive import GPUTensorActive
from .tb_dcgm_types.gpu_utilization import GPUUtilization
from .tb_dcgm_types.gpu_power_usage import GPUPowerUsage
from .tb_dcgm_types.gpu_free_memory import GPUFreeMemory
from .tb_dcgm_types.gpu_peak_memory import GPUPeakMemory
from .tb_dcgm_types.gpu_fp32active import GPUFP32Active
from .tb_dcgm_types.gpu_dram_active import GPUDRAMActive
from .tb_dcgm_types.gpu_pcie_rx import GPUPCIERX
from .tb_dcgm_types.gpu_pcie_tx import GPUPCIETX
from .tb_dcgm_types.cpu_peak_memory import CPUPeakMemory
from .tb_dcgm_types.record import RecordType
from .tb_dcgm_types.record_aggregator import RecordAggregator
from .tb_dcgm_types.tb_logger import set_logger, LOGGER_NAME
from .tb_dcgm_types.config import *
import logging
logger = logging.getLogger(LOGGER_NAME)
import json
from time import time_ns
class ModelAnalyzer:
def __init__(self, export_metrics_file=None, metrics_needed=[], metrics_gpu_backend='nvml', cpu_monitored_pid=None):
# For debug
# set_logger(logging.DEBUG)
set_logger()
# delay the initialization to start_monitor
self.gpu_factory = None
self.gpus = None
# the cpu metrics to be collected
# self.gpu_metrics = [GPUUtilization, GPUPowerUsage,
# GPUFreeMemory, GPUPeakMemory, GPUFP32Active, GPUTensorActive, GPUDRAMActive, GPUPCIERX, GPUPCIETX]
self.gpu_metrics = []
# the final metric results. Its format is {GPU_UUID: {GPUUtilization: }}
# Example:
# {'GPU-4177e846-1274-84e3-dcde':
# {<class '.tb_dcgm_types.gpu_fp32active.GPUFP32Active'>:
# <.tb_dcgm_types.gpu_fp32active.GPUFP32Active object at 0x7f14bbae2280>
# }
# }
self.gpu_metric_value = {}
# There are two kinds of GPU monitor: DCGMMonitor and NVMLMonitor
self.gpu_monitor = None
self.gpu_monitor_started = False
self.gpu_records = None
self.config = AnalayzerConfig()
self.gpu_record_aggregator = RecordAggregator()
self.export_csv_name = None
self.set_export_csv_name(export_metrics_file)
# the cpu metrics to be collected. available metrics are [CPUPeakMemory, ]
self.cpu_metrics = []
self.cpu_monitor = None
self.cpu_monitor_started = False
self.cpu_records = None
self.cpu_record_aggregator = RecordAggregator()
self.cpu_metric_value = {}
self.cpu_monitored_pid = cpu_monitored_pid
# GPU Monitor Backend
self.gpu_monitor_backend = metrics_gpu_backend
self.start_monitor_timestamp = None
self.stop_monitor_timestamp = None
self.metrics_backend_mapping = {}
self.process_metrics(metrics_needed, metrics_gpu_backend)
def process_metrics(self, metrics_needed, metrics_gpu_backend):
if 'gpu_peak_mem' in metrics_needed:
self.add_metric_gpu_peak_mem()
self.metrics_backend_mapping['gpu_peak_mem'] = 'dcgm' if metrics_gpu_backend == 'dcgm' else 'nvml'
if 'flops' in metrics_needed:
if metrics_gpu_backend == 'dcgm':
self.add_metric_gpu_flops()
self.metrics_backend_mapping['flops'] = 'dcgm'
else:
self.metrics_backend_mapping['flops'] = 'fvcore'
if 'cpu_peak_mem' in metrics_needed:
self.add_metric_cpu_peak_mem()
if metrics_gpu_backend == "default":
self.set_gpu_monitor_backend_nvml()
def add_metric_gpu_peak_mem(self):
self.gpu_metrics.append(GPUPeakMemory)
def add_metric_gpu_flops(self):
self.gpu_metrics.append(GPUFP32Active)
def add_metric_cpu_peak_mem(self):
self.cpu_metrics.append(CPUPeakMemory)
def set_gpu_monitor_backend_nvml(self):
self.gpu_monitor_backend = 'nvml'
def set_export_csv_name(self, export_csv_name=None):
if not export_csv_name:
return
self.export_csv_name = export_csv_name
# test for correct permission
with open(export_csv_name, 'w') as fout:
fout.write('')
def update_export_name(self, insert_str=''):
index = self.export_csv_name.find('.csv')
if not index == -1:
self.export_csv_name = self.export_csv_name[:index] + insert_str + self.export_csv_name[index:]
def start_monitor(self):
try:
self.start_monitor_timestamp = time_ns()
if self.gpu_metrics:
self.gpu_factory = GPUDeviceFactory(self.gpu_monitor_backend)
self.gpus = self.gpu_factory.verify_requested_gpus(['all', ])
if not self.gpus:
raise TorchBenchAnalyzerException('No GPU found')
if self.gpu_monitor_backend == 'dcgm':
self.gpu_monitor = DCGMMonitor(
self.gpus, self.config.monitoring_interval, self.gpu_metrics)
elif self.gpu_monitor_backend == 'nvml':
self.gpu_monitor = NVMLMonitor(
self.gpus, self.config.monitoring_interval, self.gpu_metrics)
if self.cpu_metrics:
self.cpu_monitor = CPUMonitor(self.config.monitoring_interval, self.cpu_metrics, self.cpu_monitored_pid)
if self.gpu_metrics:
self.gpu_monitor.start_recording_metrics()
self.gpu_monitor_started = True
if self.cpu_metrics:
self.cpu_monitor.start_recording_metrics()
self.cpu_monitor_started = True
except TorchBenchAnalyzerException:
self._destory_monitor()
raise
def _destory_monitor(self):
if self.gpu_monitor:
self.gpu_monitor.destroy()
self.gpu_monitor = None
self.gpu_monitor_started = False
if self.cpu_monitor:
self.cpu_monitor.destroy()
self.cpu_monitor = None
self.cpu_monitor_started = False
def stop_monitor(self):
self.stop_monitor_timestamp = time_ns()
if self.gpu_monitor:
self.gpu_records = self.gpu_monitor.stop_recording_metrics()
if self.cpu_monitor:
self.cpu_records = self.cpu_monitor.stop_recording_metrics()
# This must be called after stop_recording_metrics
self._destory_monitor()
def aggregate(self):
"""
Aaggregate must be called after stop_monitor.
"""
if self.gpu_records:
new_gpu_records = [record for record in self.gpu_records if record.timestamp() <= self.stop_monitor_timestamp]
if len(new_gpu_records) == 0:
self.gpu_records = self.gpu_records[:1]
else:
self.gpu_records = new_gpu_records
self.gpu_record_aggregator.insert_all(self.gpu_records)
records_groupby_gpu = self.gpu_record_aggregator.groupby(
self.gpu_metrics, lambda record: record.device_uuid())
for gpu in self.gpus:
self.gpu_metric_value[gpu.device_uuid()] = {}
for metric_type, metric in records_groupby_gpu.items():
for gpu_uuid, metric_value in metric.items():
self.gpu_metric_value[gpu_uuid][metric_type] = metric_value
if self.cpu_records:
new_cpu_records = [record for record in self.cpu_records if record.timestamp() <= self.stop_monitor_timestamp]
if len(new_cpu_records) == 0:
self.cpu_records = self.cpu_records[:1]
else:
self.cpu_records = new_cpu_records
self.cpu_record_aggregator.insert_all(self.cpu_records)
records_groupby_cpu = self.cpu_record_aggregator.groupby(
self.cpu_metrics, lambda record: record.device_uuid())
# detault cpu id is 0x1
self.cpu_metric_value[0x1] = {}
for metric_type, metric in records_groupby_cpu.items():
for cpu_uuid, metric_value in metric.items():
self.cpu_metric_value[cpu_uuid][metric_type] = metric_value
def set_monitoring_interval(self, attempted_interval):
"""
The default monitoring internval is DEFAULT_MONITORING_INTERVAL * 1000 ms.
"""
# if attempted_interval < 0.1:
# logger.warning("The attempted interval is too short, would cause untrusted profiling results.")
self.config.monitoring_interval = attempted_interval
def print_flops(self):
print("==========Summary==========")
for gpu_uuid in self.gpu_metric_value:
gpu = self.gpu_factory.get_device_by_uuid(gpu_uuid)
print(self.gpu_metric_value[gpu_uuid][GPUFP32Active].value())
# TFLOPs/second = Device_SM_Count x Device_FMAs_Per_Cycle_Per_SM x 2 x Running_Frequency_KHz x DCGM_Activity / 1e+9
print("GPU : TFLOPs/Second %.4f" % (gpu._sm_count * gpu._fma_count * 2 *
gpu._frequency * self.gpu_metric_value[gpu_uuid][GPUFP32Active].value() / 1e+9))
# @Yueming Hao: print all collected gpu records, for debug only
logger.debug(json.dumps([_.to_dict() for _ in self.gpu_records], indent=4))
def export_all_records_to_csv(self):
records_groupby_gpu = self.gpu_record_aggregator.groupby_wo_aggregate(
self.gpu_metrics, lambda record: record.device_uuid())
# {GPUUUID: {record_type: {timestamp: a_record, } }}
csv_records = {}
for gpu in self.gpus:
csv_records[gpu.device_uuid()] = OrderedDict()
for record_type in records_groupby_gpu:
csv_records[gpu.device_uuid()][record_type] = OrderedDict()
for gpu_uuid in records_groupby_gpu[record_type]:
cluster_records = records_groupby_gpu[record_type][gpu_uuid][record_type]
cluster_records.sort(key=lambda x: x.timestamp())
for record in cluster_records:
csv_records[gpu_uuid][record_type][record.timestamp()] = record.value()
with open(self.export_csv_name, 'w') as fout:
for gpu_uuid in csv_records:
# timestamp record in DCGM is microsecond
timestamps = set()
fout.write("timestamp(ms), ")
for record_type in csv_records[gpu_uuid]:
timestamps |= set(csv_records[gpu_uuid][record_type])
if record_type.tag == "gpu_fp32active":
tmp_line = "%s, " % (record_type.tag + '(%)')
elif record_type.tag.startswith('gpu_pice'):
tmp_line = "%s, " % (record_type.tag + '(bytes)')
elif record_type.tag == 'gpu_peak_memory':
tmp_line = "%s, " % (record_type.tag + '(MB)')
else:
tmp_line = "%s, " % record_type.tag
fout.write(tmp_line)
fout.write("duration(ms), ")
if GPUPCIERX in self.gpu_metrics:
fout.write("HtoD_throughput(GB/s), ")
if GPUPCIETX in self.gpu_metrics:
fout.write("DtoH_throughput(GB/s), ")
timestamps = list(timestamps)
timestamps.sort()
timestamp_start = timestamps[0]
fout.write('\n')
last_timestamp = timestamp_start
for a_timestamp in timestamps:
duration = (a_timestamp - last_timestamp) / 1e3
last_timestamp = a_timestamp
line = "%.2f, " % ((a_timestamp - timestamp_start) / 1000)
for record_type in csv_records[gpu_uuid]:
value = csv_records[gpu_uuid][record_type].get(a_timestamp, -1)
line += "%.2f, " % value
line += "%.2f, " % duration
if duration != 0 :
if GPUPCIERX in self.gpu_metrics:
pcierx_record = csv_records[gpu_uuid][GPUPCIERX].get(a_timestamp, -1)
if pcierx_record != -1:
line += "%.2f, " % (pcierx_record / duration * 1000 / 1024 / 1024 / 1024)
if GPUPCIETX in self.gpu_metrics:
pcietx_record = csv_records[gpu_uuid][GPUPCIETX].get(a_timestamp, -1)
line += "%.2f, " % (pcietx_record / duration * 1000 / 1024 / 1024 / 1024)
fout.write(line + "\n")
def calculate_flops(self, gpu_uuid=None) -> float:
"""
The function to calculate TFLOPs/second for the desired GPU or the first available GPU.
@return : a floating number representing TFLOPs/second.
"""
if gpu_uuid:
if gpu_uuid in self.gpu_metric_value:
gpu = self.gpu_factory.get_device_by_uuid(gpu_uuid)
return gpu._sm_count * gpu._fma_count * 2 * gpu._frequency * self.gpu_metric_value[gpu_uuid][GPUFP32Active].value() / 1e+9
else:
raise TorchBenchAnalyzerException("No available GPU with uuid ", gpu_uuid, " found!")
else:
# Will only return the first one's peak memory bandwidth. So please use CUDA_VISIBLE_DEVICES to specify the GPU.
gpu_uuid = next(iter(self.gpu_metric_value))
gpu = self.gpu_factory.get_device_by_uuid(gpu_uuid)
device_id = self.gpu_factory.get_device_by_uuid(gpu_uuid).device_id()
return device_id, gpu._sm_count * gpu._fma_count * 2 * gpu._frequency * self.gpu_metric_value[gpu_uuid][GPUFP32Active].value() / 1e+9
def calculate_gpu_peak_mem(self, gpu_uuid=None) -> Tuple[Optional[str], float]:
"""
The function to calculate GPU peak memory usage for the first available GPU.
@return : a floating number representing GB.
"""
if gpu_uuid:
if gpu_uuid in self.gpu_metric_value:
return self.gpu_metric_value[gpu_uuid][GPUPeakMemory].value() / 1024
else:
raise TorchBenchAnalyzerException("No available GPU with uuid ", gpu_uuid, " found!")
if len(self.gpu_metric_value) == 0:
raise TorchBenchAnalyzerException("No metrics collected!")
# Will only return the first one's peak memory bandwidth. So please use CUDA_VISIBLE_DEVICES to specify the GPU.
gpu_uuid = next(iter(self.gpu_metric_value))
device_id = self.gpu_factory.get_device_by_uuid(gpu_uuid).device_id()
return device_id, self.gpu_metric_value[gpu_uuid][GPUPeakMemory].value() / 1024
def calculate_cpu_peak_mem(self, cpu_uuid=None) -> float:
"""
The function to calculate CPU peak memory usage.
@return : a floating number representing GB.
"""
if len(self.cpu_metric_value) > 1:
logger.debug("There are multiple available CPUs and will only return the first one's peak memory bandwidth.")
cpu_uuid = next(iter(self.cpu_metric_value))
return self.cpu_metric_value[cpu_uuid][CPUPeakMemory].value() / 1024
def check_dcgm():
try:
temp_model_analyzer = ModelAnalyzer()
temp_model_analyzer.add_metric_gpu_flops()
temp_model_analyzer.start_monitor()
temp_model_analyzer.stop_monitor()
except DCGMError as e:
logger.error("ERROR: DCGM init failed. ", e)
exit(-1)
return True
def check_nvml():
try:
import pynvml
pynvml.nvmlInit()
pynvml.nvmlShutdown()
except Exception as e:
logger.error("ERROR: NVML init failed. Please check the installation of pynvml.", e)
exit(-1)
return True
|
# Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import ABC, abstractmethod
from multiprocessing.pool import ThreadPool
import time
from ..tb_dcgm_types.da_exceptions import TorchBenchAnalyzerException
class Monitor(ABC):
"""
Monitor abstract class is a parent class used for monitoring devices.
"""
def __init__(self, frequency, metrics):
"""
Parameters
----------
frequency : float
How often the metrics should be monitored. It is in seconds.
metrics : list
A list of Record objects that will be monitored.
Raises
------
TorchBenchAnalyzerException
"""
self._frequency = frequency
# Is the background thread active
self._thread_active = False
# Background thread collecting results
self._thread = None
# Thread pool
self._thread_pool = ThreadPool(processes=1)
self._metrics = metrics
def _monitoring_loop(self):
frequency = self._frequency
while self._thread_active:
begin = time.time()
# Monitoring iteration implemented by each of the subclasses
self._monitoring_iteration()
# print("======working======")
duration = time.time() - begin
if duration < frequency:
time.sleep(frequency - duration)
self._monitoring_iteration()
@abstractmethod
def _monitoring_iteration(self):
"""
Each of the subclasses must implement this.
This is called to execute a single round of monitoring.
"""
pass
@abstractmethod
def _collect_records(self):
"""
This method is called to collect all the monitoring records.
It is called in the stop_recording_metrics function after
the background thread has stopped.
Returns
-------
List of Records
The list of records collected by the monitor
"""
pass
def start_recording_metrics(self):
"""
Start recording the metrics.
"""
self._thread_active = True
self._thread = self._thread_pool.apply_async(self._monitoring_loop)
def stop_recording_metrics(self):
"""
Stop recording metrics. This will stop monitring all the metrics.
Returns
------
List of Records
Raises
------
TorchBenchAnalyzerException
"""
if not self._thread_active:
raise TorchBenchAnalyzerException(
"start_recording_metrics should be "
"called before stop_recording_metrics")
self._thread_active = False
self._thread.wait()
self._thread = None
return self._collect_records()
def destroy(self):
"""
Cleanup threadpool resources
"""
self._thread_pool.terminate()
self._thread_pool.close()
|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
# Python bindings for the internal API of DCGM library (dcgm_fields_internal.hpp)
##
from ctypes import *
from ctypes.util import find_library
from . import dcgm_structs
# Provides access to functions
dcgmFP = dcgm_structs._dcgmGetFunctionPointer
#internal-only fields
DCGM_FI_DEV_MEM_COPY_UTIL_SAMPLES = 210 #Memory utilization samples
DCGM_FI_DEV_GPU_UTIL_SAMPLES = 211 #SM utilization samples
DCGM_FI_DEV_GRAPHICS_PIDS = 220 #Graphics processes running on the GPU.
DCGM_FI_DEV_COMPUTE_PIDS = 221 #Compute processes running on the GPU.
|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
# Python bindings for the internal API of DCGM library (dcgm_fields.h)
##
from ctypes import *
from ctypes.util import find_library
from . import dcgm_structs
# Provides access to functions
dcgmFP = dcgm_structs._dcgmGetFunctionPointer
# Field Types are a single byte. List these in ASCII order
DCGM_FT_BINARY = 'b' # Blob of binary data representing a structure
DCGM_FT_DOUBLE = 'd' # 8-byte double precision
DCGM_FT_INT64 = 'i' # 8-byte signed integer
DCGM_FT_STRING = 's' # Null-terminated ASCII Character string
DCGM_FT_TIMESTAMP = 't' # 8-byte signed integer usec since 1970
# Field scope. What are these fields associated with
DCGM_FS_GLOBAL = 0 # Field is global (ex: driver version)
DCGM_FS_ENTITY = 1 # Field is associated with an entity (GPU, VGPU, ..etc)
DCGM_FS_DEVICE = DCGM_FS_ENTITY # Field is associated with a device. Deprecated. Use DCGM_FS_ENTITY
# DCGM_FI_DEV_CLOCK_THROTTLE_REASONS is a bitmap of why the clock is throttled.
# These macros are masks for relevant throttling, and are a 1:1 map to the NVML
# reasons documented in nvml.h. The notes for the header are copied blow:
# Nothing is running on the GPU and the clocks are dropping to Idle state
DCGM_CLOCKS_THROTTLE_REASON_GPU_IDLE = 0x0000000000000001
# GPU clocks are limited by current setting of applications clocks
DCGM_CLOCKS_THROTTLE_REASON_CLOCKS_SETTING = 0x0000000000000002
# SW Power Scaling algorithm is reducing the clocks below requested clocks
DCGM_CLOCKS_THROTTLE_REASON_SW_POWER_CAP = 0x0000000000000004
# HW Slowdown (reducing the core clocks by a factor of 2 or more) is engaged
#
# This is an indicator of:
# - temperature being too high
# - External Power Brake Assertion is triggered (e.g. by the system power supply)
# - Power draw is too high and Fast Trigger protection is reducing the clocks
# - May be also reported during PState or clock change
# - This behavior may be removed in a later release.
DCGM_CLOCKS_THROTTLE_REASON_HW_SLOWDOWN = 0x0000000000000008
# Sync Boost
#
# This GPU has been added to a Sync boost group with nvidia-smi or DCGM in
# order to maximize performance per watt. All GPUs in the sync boost group
# will boost to the minimum possible clocks across the entire group. Look at
# the throttle reasons for other GPUs in the system to see why those GPUs are
# holding this one at lower clocks.
DCGM_CLOCKS_THROTTLE_REASON_SYNC_BOOST = 0x0000000000000010
# SW Thermal Slowdown
#
# This is an indicator of one or more of the following:
# - Current GPU temperature above the GPU Max Operating Temperature
# - Current memory temperature above the Memory Max Operating Temperature
DCGM_CLOCKS_THROTTLE_REASON_SW_THERMAL = 0x0000000000000020
# HW Thermal Slowdown (reducing the core clocks by a factor of 2 or more) is engaged
#
# This is an indicator of:
# - temperature being too high
DCGM_CLOCKS_THROTTLE_REASON_HW_THERMAL = 0x0000000000000040
# HW Power Brake Slowdown (reducing the core clocks by a factor of 2 or more) is engaged
#
# This is an indicator of:
# - External Power Brake Assertion being triggered (e.g. by the system power supply)
DCGM_CLOCKS_THROTTLE_REASON_HW_POWER_BRAKE = 0x0000000000000080
# GPU clocks are limited by current setting of Display clocks
DCGM_CLOCKS_THROTTLE_REASON_DISPLAY_CLOCKS = 0x0000000000000100
#Field entity groups. Which type of entity is this field or field value associated with
DCGM_FE_NONE = 0 # Field is not associated with an entity. Field scope should be DCGM_FS_GLOBAL
DCGM_FE_GPU = 1 # Field is associated with a GPU entity
DCGM_FE_VGPU = 2 # Field is associated with a VGPU entity
DCGM_FE_SWITCH = 3 # Field is associated with a Switch entity
DCGM_FE_GPU_I = 4 # Field is associated with a GPU Instance entity
DCGM_FE_GPU_CI = 5 # Field is associated with a GPU Compute Instance entity
DCGM_FE_LINK = 6 # Field is associated with an NVLINK
c_dcgm_field_eid_t = c_uint32 #Represents an identifier for an entity within a field entity. For instance, this is the gpuId for DCGM_FE_GPU.
#System attributes
DCGM_FI_UNKNOWN = 0
DCGM_FI_DRIVER_VERSION = 1 #Driver Version
DCGM_FI_NVML_VERSION = 2 #Underlying NVML version
DCGM_FI_PROCESS_NAME = 3 #Process Name. Will be nv-hostengine or your process's name in embedded mode
DCGM_FI_DEV_COUNT = 4 #Number of Devices on the node
DCGM_FI_CUDA_DRIVER_VERSION = 5 #Cuda Driver Version as an integer. CUDA 11.1 = 11100
#Device attributes
DCGM_FI_DEV_NAME = 50 #Name of the GPU device
DCGM_FI_DEV_BRAND = 51 #Device Brand
DCGM_FI_DEV_NVML_INDEX = 52 #NVML index of this GPU
DCGM_FI_DEV_SERIAL = 53 #Device Serial Number
DCGM_FI_DEV_UUID = 54 #UUID corresponding to the device
DCGM_FI_DEV_MINOR_NUMBER = 55 #Device node minor number /dev/nvidia#
DCGM_FI_DEV_OEM_INFOROM_VER = 56 #OEM inforom version
DCGM_FI_DEV_PCI_BUSID = 57 #PCI attributes for the device
DCGM_FI_DEV_PCI_COMBINED_ID = 58 #The combined 16-bit device id and 16-bit vendor id
DCGM_FI_DEV_PCI_SUBSYS_ID = 59 #The 32-bit Sub System Device ID
DCGM_FI_GPU_TOPOLOGY_PCI = 60 #Topology of all GPUs on the system via PCI (static)
DCGM_FI_GPU_TOPOLOGY_NVLINK = 61 #Topology of all GPUs on the system via NVLINK (static)
DCGM_FI_GPU_TOPOLOGY_AFFINITY = 62 #Affinity of all GPUs on the system (static)
DCGM_FI_DEV_CUDA_COMPUTE_CAPABILITY = 63 #Cuda compute capability for the device
DCGM_FI_DEV_COMPUTE_MODE = 65 #Compute mode for the device
DCGM_FI_DEV_PERSISTENCE_MODE = 66 #Persistence mode for the device
DCGM_FI_DEV_MIG_MODE = 67 #MIG mode for the device
DCGM_FI_DEV_CUDA_VISIBLE_DEVICES_STR = 68 #String value for CUDA_VISIBLE_DEVICES for the device
DCGM_FI_DEV_MIG_MAX_SLICES = 69 #The maximum number of slices this GPU supports
DCGM_FI_DEV_CPU_AFFINITY_0 = 70 #Device CPU affinity. part 1/8 = cpus 0 - 63
DCGM_FI_DEV_CPU_AFFINITY_1 = 71 #Device CPU affinity. part 1/8 = cpus 64 - 127
DCGM_FI_DEV_CPU_AFFINITY_2 = 72 #Device CPU affinity. part 2/8 = cpus 128 - 191
DCGM_FI_DEV_CPU_AFFINITY_3 = 73 #Device CPU affinity. part 3/8 = cpus 192 - 255
DCGM_FI_DEV_CC_MODE = 74 #Device CC/APM mode
DCGM_FI_DEV_MIG_ATTRIBUTES = 75 #MIG device attributes
DCGM_FI_DEV_MIG_GI_INFO = 76 #GPU instance profile information
DCGM_FI_DEV_MIG_CI_INFO = 77 #Compute instance profile information
DCGM_FI_DEV_ECC_INFOROM_VER = 80 #ECC inforom version
DCGM_FI_DEV_POWER_INFOROM_VER = 81 #Power management object inforom version
DCGM_FI_DEV_INFOROM_IMAGE_VER = 82 #Inforom image version
DCGM_FI_DEV_INFOROM_CONFIG_CHECK = 83 #Inforom configuration checksum
DCGM_FI_DEV_INFOROM_CONFIG_VALID = 84 #Reads the infoROM from the flash and verifies the checksums
DCGM_FI_DEV_VBIOS_VERSION = 85 #VBIOS version of the device
DCGM_FI_DEV_BAR1_TOTAL = 90 #Total BAR1 of the GPU
DCGM_FI_SYNC_BOOST = 91 #Deprecated - Sync boost settings on the node
DCGM_FI_DEV_BAR1_USED = 92 #Used BAR1 of the GPU in MB
DCGM_FI_DEV_BAR1_FREE = 93 #Free BAR1 of the GPU in MB
#Clocks and power
DCGM_FI_DEV_SM_CLOCK = 100 #SM clock for the device
DCGM_FI_DEV_MEM_CLOCK = 101 #Memory clock for the device
DCGM_FI_DEV_VIDEO_CLOCK = 102 #Video encoder/decoder clock for the device
DCGM_FI_DEV_APP_SM_CLOCK = 110 #SM Application clocks
DCGM_FI_DEV_APP_MEM_CLOCK = 111 #Memory Application clocks
DCGM_FI_DEV_CLOCK_THROTTLE_REASONS = 112 #Current clock throttle reasons (bitmask of DCGM_CLOCKS_THROTTLE_REASON_*)
DCGM_FI_DEV_MAX_SM_CLOCK = 113 #Maximum supported SM clock for the device
DCGM_FI_DEV_MAX_MEM_CLOCK = 114 #Maximum supported Memory clock for the device
DCGM_FI_DEV_MAX_VIDEO_CLOCK = 115 #Maximum supported Video encoder/decoder clock for the device
DCGM_FI_DEV_AUTOBOOST = 120 #Auto-boost for the device (1 = enabled. 0 = disabled)
DCGM_FI_DEV_SUPPORTED_CLOCKS = 130 #Supported clocks for the device
DCGM_FI_DEV_MEMORY_TEMP = 140 #Memory temperature for the device
DCGM_FI_DEV_GPU_TEMP = 150 #Current temperature readings for the device, in degrees C
DCGM_FI_DEV_MEM_MAX_OP_TEMP = 151 #Maximum operating temperature for the memory of this GPU
DCGM_FI_DEV_GPU_MAX_OP_TEMP = 152 #Maximum operating temperature for this GPU
DCGM_FI_DEV_POWER_USAGE = 155 #Power usage for the device in Watts
DCGM_FI_DEV_TOTAL_ENERGY_CONSUMPTION = 156 #Total energy consumption for the GPU in mJ since the driver was last reloaded
DCGM_FI_DEV_SLOWDOWN_TEMP = 158 #Slowdown temperature for the device
DCGM_FI_DEV_SHUTDOWN_TEMP = 159 #Shutdown temperature for the device
DCGM_FI_DEV_POWER_MGMT_LIMIT = 160 #Current Power limit for the device
DCGM_FI_DEV_POWER_MGMT_LIMIT_MIN = 161 #Minimum power management limit for the device
DCGM_FI_DEV_POWER_MGMT_LIMIT_MAX = 162 #Maximum power management limit for the device
DCGM_FI_DEV_POWER_MGMT_LIMIT_DEF = 163 #Default power management limit for the device
DCGM_FI_DEV_ENFORCED_POWER_LIMIT = 164 #Effective power limit that the driver enforces after taking into account all limiters
DCGM_FI_DEV_PSTATE = 190 #Performance state (P-State) 0-15. 0=highest
DCGM_FI_DEV_FAN_SPEED = 191 #Fan speed for the device in percent 0-100
#Device utilization and telemetry
DCGM_FI_DEV_PCIE_TX_THROUGHPUT = 200 #Deprecated - PCIe Tx utilization information
DCGM_FI_DEV_PCIE_RX_THROUGHPUT = 201 #Deprecated - PCIe Rx utilization information
DCGM_FI_DEV_PCIE_REPLAY_COUNTER = 202 #PCIe replay counter
DCGM_FI_DEV_GPU_UTIL = 203 #GPU Utilization
DCGM_FI_DEV_MEM_COPY_UTIL = 204 #Memory Utilization
DCGM_FI_DEV_ACCOUNTING_DATA = 205 #Process accounting stats
DCGM_FI_DEV_ENC_UTIL = 206 #Encoder utilization
DCGM_FI_DEV_DEC_UTIL = 207 #Decoder utilization
# Fields 210, 211, 220, and 221 are internal-only. see dcgm_fields_internal.py
DCGM_FI_DEV_XID_ERRORS = 230 #XID errors. The value is the specific XID error
DCGM_FI_DEV_PCIE_MAX_LINK_GEN = 235 #PCIe Max Link Generation
DCGM_FI_DEV_PCIE_MAX_LINK_WIDTH = 236 #PCIe Max Link Width
DCGM_FI_DEV_PCIE_LINK_GEN = 237 #PCIe Current Link Generation
DCGM_FI_DEV_PCIE_LINK_WIDTH = 238 #PCIe Current Link Width
#Violation counters
DCGM_FI_DEV_POWER_VIOLATION = 240 #Power Violation time in usec
DCGM_FI_DEV_THERMAL_VIOLATION = 241 #Thermal Violation time in usec
DCGM_FI_DEV_SYNC_BOOST_VIOLATION = 242 #Sync Boost Violation time in usec
DCGM_FI_DEV_BOARD_LIMIT_VIOLATION = 243 #Board Limit Violation time in usec.
DCGM_FI_DEV_LOW_UTIL_VIOLATION = 244 #Low Utilization Violation time in usec.
DCGM_FI_DEV_RELIABILITY_VIOLATION = 245 #Reliability Violation time in usec.
DCGM_FI_DEV_TOTAL_APP_CLOCKS_VIOLATION = 246 #App Clocks Violation time in usec.
DCGM_FI_DEV_TOTAL_BASE_CLOCKS_VIOLATION = 247 #Base Clocks Violation time in usec.
#Framebuffer usage
DCGM_FI_DEV_FB_TOTAL = 250 #Total framebuffer memory in MB
DCGM_FI_DEV_FB_FREE = 251 #Total framebuffer used in MB
DCGM_FI_DEV_FB_USED = 252 #Total framebuffer free in MB
DCGM_FI_DEV_FB_RESERVED = 253 #Total framebuffer reserved in MB
#Device ECC Counters
DCGM_FI_DEV_ECC_CURRENT = 300 #Current ECC mode for the device
DCGM_FI_DEV_ECC_PENDING = 301 #Pending ECC mode for the device
DCGM_FI_DEV_ECC_SBE_VOL_TOTAL = 310 #Total single bit volatile ecc errors
DCGM_FI_DEV_ECC_DBE_VOL_TOTAL = 311 #Total double bit volatile ecc errors
DCGM_FI_DEV_ECC_SBE_AGG_TOTAL = 312 #Total single bit aggregate (persistent) ecc errors
DCGM_FI_DEV_ECC_DBE_AGG_TOTAL = 313 #Total double bit aggregate (persistent) ecc errors
DCGM_FI_DEV_ECC_SBE_VOL_L1 = 314 #L1 cache single bit volatile ecc errors
DCGM_FI_DEV_ECC_DBE_VOL_L1 = 315 #L1 cache double bit volatile ecc errors
DCGM_FI_DEV_ECC_SBE_VOL_L2 = 316 #L2 cache single bit volatile ecc errors
DCGM_FI_DEV_ECC_DBE_VOL_L2 = 317 #L2 cache double bit volatile ecc errors
DCGM_FI_DEV_ECC_SBE_VOL_DEV = 318 #Device memory single bit volatile ecc errors
DCGM_FI_DEV_ECC_DBE_VOL_DEV = 319 #Device memory double bit volatile ecc errors
DCGM_FI_DEV_ECC_SBE_VOL_REG = 320 #Register file single bit volatile ecc errors
DCGM_FI_DEV_ECC_DBE_VOL_REG = 321 #Register file double bit volatile ecc errors
DCGM_FI_DEV_ECC_SBE_VOL_TEX = 322 #Texture memory single bit volatile ecc errors
DCGM_FI_DEV_ECC_DBE_VOL_TEX = 323 #Texture memory double bit volatile ecc errors
DCGM_FI_DEV_ECC_SBE_AGG_L1 = 324 #L1 cache single bit aggregate (persistent) ecc errors
DCGM_FI_DEV_ECC_DBE_AGG_L1 = 325 #L1 cache double bit aggregate (persistent) ecc errors
DCGM_FI_DEV_ECC_SBE_AGG_L2 = 326 #L2 cache single bit aggregate (persistent) ecc errors
DCGM_FI_DEV_ECC_DBE_AGG_L2 = 327 #L2 cache double bit aggregate (persistent) ecc errors
DCGM_FI_DEV_ECC_SBE_AGG_DEV = 328 #Device memory single bit aggregate (persistent) ecc errors
DCGM_FI_DEV_ECC_DBE_AGG_DEV = 329 #Device memory double bit aggregate (persistent) ecc errors
DCGM_FI_DEV_ECC_SBE_AGG_REG = 330 #Register File single bit aggregate (persistent) ecc errors
DCGM_FI_DEV_ECC_DBE_AGG_REG = 331 #Register File double bit aggregate (persistent) ecc errors
DCGM_FI_DEV_ECC_SBE_AGG_TEX = 332 #Texture memory single bit aggregate (persistent) ecc errors
DCGM_FI_DEV_ECC_DBE_AGG_TEX = 333 #Texture memory double bit aggregate (persistent) ecc errors
DCGM_FI_DEV_RETIRED_SBE = 390 #Number of retired pages because of single bit errors
DCGM_FI_DEV_RETIRED_DBE = 391 #Number of retired pages because of double bit errors
DCGM_FI_DEV_RETIRED_PENDING = 392 #Number of pages pending retirement
#Row remapper fields (Ampere and newer)
DCGM_FI_DEV_UNCORRECTABLE_REMAPPED_ROWS = 393 #Number of remapped rows for uncorrectable errors
DCGM_FI_DEV_CORRECTABLE_REMAPPED_ROWS = 394 #Number of remapped rows for correctable errors
DCGM_FI_DEV_ROW_REMAP_FAILURE = 395 #Whether remapping of rows has failed
DCGM_FI_DEV_ROW_REMAP_PENDING = 396 #Whether remapping of rows is pending
#Device NvLink Bandwidth and Error Counters
DCGM_FI_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L0 = 400 #NV Link flow control CRC Error Counter for Lane 0
DCGM_FI_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L1 = 401 #NV Link flow control CRC Error Counter for Lane 1
DCGM_FI_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L2 = 402 #NV Link flow control CRC Error Counter for Lane 2
DCGM_FI_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L3 = 403 #NV Link flow control CRC Error Counter for Lane 3
DCGM_FI_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L4 = 404 #NV Link flow control CRC Error Counter for Lane 4
DCGM_FI_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L5 = 405 #NV Link flow control CRC Error Counter for Lane 5
DCGM_FI_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_TOTAL = 409 #NV Link flow control CRC Error Counter total for all Lanes
DCGM_FI_DEV_NVLINK_CRC_DATA_ERROR_COUNT_L0 = 410 #NV Link data CRC Error Counter for Lane 0
DCGM_FI_DEV_NVLINK_CRC_DATA_ERROR_COUNT_L1 = 411 #NV Link data CRC Error Counter for Lane 1
DCGM_FI_DEV_NVLINK_CRC_DATA_ERROR_COUNT_L2 = 412 #NV Link data CRC Error Counter for Lane 2
DCGM_FI_DEV_NVLINK_CRC_DATA_ERROR_COUNT_L3 = 413 #NV Link data CRC Error Counter for Lane 3
DCGM_FI_DEV_NVLINK_CRC_DATA_ERROR_COUNT_L4 = 414 #NV Link data CRC Error Counter for Lane 4
DCGM_FI_DEV_NVLINK_CRC_DATA_ERROR_COUNT_L5 = 415 #NV Link data CRC Error Counter for Lane 5
DCGM_FI_DEV_NVLINK_CRC_DATA_ERROR_COUNT_TOTAL = 419 #NV Link data CRC Error Counter total for all Lanes
DCGM_FI_DEV_NVLINK_REPLAY_ERROR_COUNT_L0 = 420 #NV Link Replay Error Counter for Lane 0
DCGM_FI_DEV_NVLINK_REPLAY_ERROR_COUNT_L1 = 421 #NV Link Replay Error Counter for Lane 1
DCGM_FI_DEV_NVLINK_REPLAY_ERROR_COUNT_L2 = 422 #NV Link Replay Error Counter for Lane 2
DCGM_FI_DEV_NVLINK_REPLAY_ERROR_COUNT_L3 = 423 #NV Link Replay Error Counter for Lane 3
DCGM_FI_DEV_NVLINK_REPLAY_ERROR_COUNT_L4 = 424 #NV Link Replay Error Counter for Lane 4
DCGM_FI_DEV_NVLINK_REPLAY_ERROR_COUNT_L5 = 425 #NV Link Replay Error Counter for Lane 3
DCGM_FI_DEV_NVLINK_REPLAY_ERROR_COUNT_TOTAL = 429 #NV Link Replay Error Counter total for all Lanes
DCGM_FI_DEV_NVLINK_RECOVERY_ERROR_COUNT_L0 = 430 #NV Link Recovery Error Counter for Lane 0
DCGM_FI_DEV_NVLINK_RECOVERY_ERROR_COUNT_L1 = 431 #NV Link Recovery Error Counter for Lane 1
DCGM_FI_DEV_NVLINK_RECOVERY_ERROR_COUNT_L2 = 432 #NV Link Recovery Error Counter for Lane 2
DCGM_FI_DEV_NVLINK_RECOVERY_ERROR_COUNT_L3 = 433 #NV Link Recovery Error Counter for Lane 3
DCGM_FI_DEV_NVLINK_RECOVERY_ERROR_COUNT_L4 = 434 #NV Link Recovery Error Counter for Lane 4
DCGM_FI_DEV_NVLINK_RECOVERY_ERROR_COUNT_L5 = 435 #NV Link Recovery Error Counter for Lane 5
DCGM_FI_DEV_NVLINK_RECOVERY_ERROR_COUNT_TOTAL = 439 #NV Link Recovery Error Counter total for all Lanes
DCGM_FI_DEV_NVLINK_BANDWIDTH_L0 = 440 #NV Link Bandwidth Counter for Lane 0
DCGM_FI_DEV_NVLINK_BANDWIDTH_L1 = 441 #NV Link Bandwidth Counter for Lane 1
DCGM_FI_DEV_NVLINK_BANDWIDTH_L2 = 442 #NV Link Bandwidth Counter for Lane 2
DCGM_FI_DEV_NVLINK_BANDWIDTH_L3 = 443 #NV Link Bandwidth Counter for Lane 3
DCGM_FI_DEV_NVLINK_BANDWIDTH_L4 = 444 #NV Link Bandwidth Counter for Lane 4
DCGM_FI_DEV_NVLINK_BANDWIDTH_L5 = 445 #NV Link Bandwidth Counter for Lane 5
DCGM_FI_DEV_NVLINK_BANDWIDTH_TOTAL = 449 #NV Link Bandwidth Counter total for all Lanes
DCGM_FI_DEV_GPU_NVLINK_ERRORS = 450 #GPU NVLink error information
DCGM_FI_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L6 = 451
DCGM_FI_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L7 = 452
DCGM_FI_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L8 = 453
DCGM_FI_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L9 = 454
DCGM_FI_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L10 = 455
DCGM_FI_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L11 = 456
DCGM_FI_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L12 = 406
DCGM_FI_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L13 = 407
DCGM_FI_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L14 = 408
DCGM_FI_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L15 = 481
DCGM_FI_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L16 = 482
DCGM_FI_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L17 = 483
DCGM_FI_DEV_NVLINK_CRC_DATA_ERROR_COUNT_L6 = 457
DCGM_FI_DEV_NVLINK_CRC_DATA_ERROR_COUNT_L7 = 458
DCGM_FI_DEV_NVLINK_CRC_DATA_ERROR_COUNT_L8 = 459
DCGM_FI_DEV_NVLINK_CRC_DATA_ERROR_COUNT_L9 = 460
DCGM_FI_DEV_NVLINK_CRC_DATA_ERROR_COUNT_L10 = 461
DCGM_FI_DEV_NVLINK_CRC_DATA_ERROR_COUNT_L11 = 462
DCGM_FI_DEV_NVLINK_CRC_DATA_ERROR_COUNT_L12 = 416
DCGM_FI_DEV_NVLINK_CRC_DATA_ERROR_COUNT_L13 = 417
DCGM_FI_DEV_NVLINK_CRC_DATA_ERROR_COUNT_L14 = 418
DCGM_FI_DEV_NVLINK_CRC_DATA_ERROR_COUNT_L15 = 484
DCGM_FI_DEV_NVLINK_CRC_DATA_ERROR_COUNT_L16 = 485
DCGM_FI_DEV_NVLINK_CRC_DATA_ERROR_COUNT_L17 = 486
DCGM_FI_DEV_NVLINK_REPLAY_ERROR_COUNT_L6 = 463
DCGM_FI_DEV_NVLINK_REPLAY_ERROR_COUNT_L7 = 464
DCGM_FI_DEV_NVLINK_REPLAY_ERROR_COUNT_L8 = 465
DCGM_FI_DEV_NVLINK_REPLAY_ERROR_COUNT_L9 = 466
DCGM_FI_DEV_NVLINK_REPLAY_ERROR_COUNT_L10 = 467
DCGM_FI_DEV_NVLINK_REPLAY_ERROR_COUNT_L11 = 468
DCGM_FI_DEV_NVLINK_REPLAY_ERROR_COUNT_L12 = 426
DCGM_FI_DEV_NVLINK_REPLAY_ERROR_COUNT_L13 = 427
DCGM_FI_DEV_NVLINK_REPLAY_ERROR_COUNT_L14 = 428
DCGM_FI_DEV_NVLINK_REPLAY_ERROR_COUNT_L15 = 487
DCGM_FI_DEV_NVLINK_REPLAY_ERROR_COUNT_L16 = 488
DCGM_FI_DEV_NVLINK_REPLAY_ERROR_COUNT_L17 = 489
DCGM_FI_DEV_NVLINK_RECOVERY_ERROR_COUNT_L6 = 469
DCGM_FI_DEV_NVLINK_RECOVERY_ERROR_COUNT_L7 = 470
DCGM_FI_DEV_NVLINK_RECOVERY_ERROR_COUNT_L8 = 471
DCGM_FI_DEV_NVLINK_RECOVERY_ERROR_COUNT_L9 = 472
DCGM_FI_DEV_NVLINK_RECOVERY_ERROR_COUNT_L10 = 473
DCGM_FI_DEV_NVLINK_RECOVERY_ERROR_COUNT_L11 = 474
DCGM_FI_DEV_NVLINK_RECOVERY_ERROR_COUNT_L12 = 436
DCGM_FI_DEV_NVLINK_RECOVERY_ERROR_COUNT_L13 = 437
DCGM_FI_DEV_NVLINK_RECOVERY_ERROR_COUNT_L14 = 438
DCGM_FI_DEV_NVLINK_RECOVERY_ERROR_COUNT_L15 = 491
DCGM_FI_DEV_NVLINK_RECOVERY_ERROR_COUNT_L16 = 492
DCGM_FI_DEV_NVLINK_RECOVERY_ERROR_COUNT_L17 = 493
DCGM_FI_DEV_NVLINK_BANDWIDTH_L6 = 475
DCGM_FI_DEV_NVLINK_BANDWIDTH_L7 = 476
DCGM_FI_DEV_NVLINK_BANDWIDTH_L8 = 477
DCGM_FI_DEV_NVLINK_BANDWIDTH_L9 = 478
DCGM_FI_DEV_NVLINK_BANDWIDTH_L10 = 479
DCGM_FI_DEV_NVLINK_BANDWIDTH_L11 = 480
DCGM_FI_DEV_NVLINK_BANDWIDTH_L12 = 446
DCGM_FI_DEV_NVLINK_BANDWIDTH_L13 = 447
DCGM_FI_DEV_NVLINK_BANDWIDTH_L14 = 448
DCGM_FI_DEV_NVLINK_BANDWIDTH_L15 = 494
DCGM_FI_DEV_NVLINK_BANDWIDTH_L16 = 495
DCGM_FI_DEV_NVLINK_BANDWIDTH_L17 = 496
#Device Attributes associated with virtualization
DCGM_FI_DEV_VIRTUAL_MODE = 500 #Operating mode of the GPU
DCGM_FI_DEV_SUPPORTED_TYPE_INFO = 501 #Includes Count and Supported vGPU type information
DCGM_FI_DEV_CREATABLE_VGPU_TYPE_IDS = 502 #Includes Count and List of Creatable vGPU type IDs
DCGM_FI_DEV_VGPU_INSTANCE_IDS = 503 #Includes Count and List of vGPU instance IDs
DCGM_FI_DEV_VGPU_UTILIZATIONS = 504 #Utilization values for vGPUs running on the device
DCGM_FI_DEV_VGPU_PER_PROCESS_UTILIZATION = 505 #Utilization values for processes running within vGPU VMs using the device
DCGM_FI_DEV_ENC_STATS = 506 #Current encoder statistics for a given device
DCGM_FI_DEV_FBC_STATS = 507 #Statistics of current active frame buffer capture sessions on a given device
DCGM_FI_DEV_FBC_SESSIONS_INFO = 508 #Information about active frame buffer capture sessions on a target device
DCGM_FI_DEV_SUPPORTED_VGPU_TYPE_IDS = 509 #Includes Count and currently Supported vGPU types on a device
DCGM_FI_DEV_VGPU_TYPE_INFO = 510 #Includes Static info of vGPU types supported on a device
DCGM_FI_DEV_VGPU_TYPE_NAME = 511 #Includes the name of a vGPU type supported on a device
DCGM_FI_DEV_VGPU_TYPE_CLASS = 512 #Includes the class of a vGPU type supported on a device
DCGM_FI_DEV_VGPU_TYPE_LICENSE = 513 #Includes the license info for a vGPU type supported on a device
#Related to vGPU Instance IDs
DCGM_FI_DEV_VGPU_VM_ID = 520 #vGPU VM ID
DCGM_FI_DEV_VGPU_VM_NAME = 521 #vGPU VM name
DCGM_FI_DEV_VGPU_TYPE = 522 #vGPU type of the vGPU instance
DCGM_FI_DEV_VGPU_UUID = 523 #UUID of the vGPU instance
DCGM_FI_DEV_VGPU_DRIVER_VERSION = 524 #Driver version of the vGPU instance
DCGM_FI_DEV_VGPU_MEMORY_USAGE = 525 #Memory usage of the vGPU instance
DCGM_FI_DEV_VGPU_LICENSE_STATUS = 526 #License status of the vGPU
DCGM_FI_DEV_VGPU_FRAME_RATE_LIMIT = 527 #Frame rate limit of the vGPU instance
DCGM_FI_DEV_VGPU_ENC_STATS = 528 #Current encoder statistics of the vGPU instance
DCGM_FI_DEV_VGPU_ENC_SESSIONS_INFO = 529 #Information about all active encoder sessions on the vGPU instance
DCGM_FI_DEV_VGPU_FBC_STATS = 530 #Statistics of current active frame buffer capture sessions on the vGPU instance
DCGM_FI_DEV_VGPU_FBC_SESSIONS_INFO = 531 #Information about active frame buffer capture sessions on the vGPU instance
DCGM_FI_DEV_VGPU_INSTANCE_LICENSE_STATE = 532 #License state information of the vGPU instance
DCGM_FI_DEV_VGPU_PCI_ID = 533 #PCI Id of the vGPU instance
DCGM_FI_DEV_VGPU_VM_GPU_INSTANCE_ID = 534 #GPU Instance Id of the vGPU instance
#Internal fields reserve the range 600..699
#below fields related to NVSwitch
DCGM_FI_DEV_NVSWITCH_LATENCY_LOW_P00 = 700
DCGM_FI_DEV_NVSWITCH_LATENCY_MED_P00 = 701
DCGM_FI_DEV_NVSWITCH_LATENCY_HIGH_P00 = 702
DCGM_FI_DEV_NVSWITCH_LATENCY_MAX_P00 = 703
DCGM_FI_DEV_NVSWITCH_LATENCY_LOW_P01 = 704
DCGM_FI_DEV_NVSWITCH_LATENCY_MED_P01 = 705
DCGM_FI_DEV_NVSWITCH_LATENCY_HIGH_P01 = 706
DCGM_FI_DEV_NVSWITCH_LATENCY_MAX_P01 = 707
DCGM_FI_DEV_NVSWITCH_LATENCY_LOW_P02 = 708
DCGM_FI_DEV_NVSWITCH_LATENCY_MED_P02 = 709
DCGM_FI_DEV_NVSWITCH_LATENCY_HIGH_P02 = 710
DCGM_FI_DEV_NVSWITCH_LATENCY_MAX_P02 = 711
DCGM_FI_DEV_NVSWITCH_LATENCY_LOW_P03 = 712
DCGM_FI_DEV_NVSWITCH_LATENCY_MED_P03 = 713
DCGM_FI_DEV_NVSWITCH_LATENCY_HIGH_P03 = 714
DCGM_FI_DEV_NVSWITCH_LATENCY_MAX_P03 = 715
DCGM_FI_DEV_NVSWITCH_LATENCY_LOW_P04 = 716
DCGM_FI_DEV_NVSWITCH_LATENCY_MED_P04 = 717
DCGM_FI_DEV_NVSWITCH_LATENCY_HIGH_P04 = 718
DCGM_FI_DEV_NVSWITCH_LATENCY_MAX_P04 = 719
DCGM_FI_DEV_NVSWITCH_LATENCY_LOW_P05 = 720
DCGM_FI_DEV_NVSWITCH_LATENCY_MED_P05 = 721
DCGM_FI_DEV_NVSWITCH_LATENCY_HIGH_P05 = 722
DCGM_FI_DEV_NVSWITCH_LATENCY_MAX_P05 = 723
DCGM_FI_DEV_NVSWITCH_LATENCY_LOW_P06 = 724
DCGM_FI_DEV_NVSWITCH_LATENCY_MED_P06 = 725
DCGM_FI_DEV_NVSWITCH_LATENCY_HIGH_P06 = 726
DCGM_FI_DEV_NVSWITCH_LATENCY_MAX_P06 = 727
DCGM_FI_DEV_NVSWITCH_LATENCY_LOW_P07 = 728
DCGM_FI_DEV_NVSWITCH_LATENCY_MED_P07 = 729
DCGM_FI_DEV_NVSWITCH_LATENCY_HIGH_P07 = 730
DCGM_FI_DEV_NVSWITCH_LATENCY_MAX_P07 = 731
DCGM_FI_DEV_NVSWITCH_LATENCY_LOW_P08 = 732
DCGM_FI_DEV_NVSWITCH_LATENCY_MED_P08 = 733
DCGM_FI_DEV_NVSWITCH_LATENCY_HIGH_P08 = 734
DCGM_FI_DEV_NVSWITCH_LATENCY_MAX_P08 = 735
DCGM_FI_DEV_NVSWITCH_LATENCY_LOW_P09 = 736
DCGM_FI_DEV_NVSWITCH_LATENCY_MED_P09 = 737
DCGM_FI_DEV_NVSWITCH_LATENCY_HIGH_P09 = 738
DCGM_FI_DEV_NVSWITCH_LATENCY_MAX_P09 = 739
DCGM_FI_DEV_NVSWITCH_LATENCY_LOW_P10 = 740
DCGM_FI_DEV_NVSWITCH_LATENCY_MED_P10 = 741
DCGM_FI_DEV_NVSWITCH_LATENCY_HIGH_P10 = 742
DCGM_FI_DEV_NVSWITCH_LATENCY_MAX_P10 = 743
DCGM_FI_DEV_NVSWITCH_LATENCY_LOW_P11 = 744
DCGM_FI_DEV_NVSWITCH_LATENCY_MED_P11 = 745
DCGM_FI_DEV_NVSWITCH_LATENCY_HIGH_P11 = 746
DCGM_FI_DEV_NVSWITCH_LATENCY_MAX_P11 = 747
DCGM_FI_DEV_NVSWITCH_LATENCY_LOW_P12 = 748
DCGM_FI_DEV_NVSWITCH_LATENCY_MED_P12 = 749
DCGM_FI_DEV_NVSWITCH_LATENCY_HIGH_P12 = 750
DCGM_FI_DEV_NVSWITCH_LATENCY_MAX_P12 = 751
DCGM_FI_DEV_NVSWITCH_LATENCY_LOW_P13 = 752
DCGM_FI_DEV_NVSWITCH_LATENCY_MED_P13 = 753
DCGM_FI_DEV_NVSWITCH_LATENCY_HIGH_P13 = 754
DCGM_FI_DEV_NVSWITCH_LATENCY_MAX_P13 = 755
DCGM_FI_DEV_NVSWITCH_LATENCY_LOW_P14 = 756
DCGM_FI_DEV_NVSWITCH_LATENCY_MED_P14 = 757
DCGM_FI_DEV_NVSWITCH_LATENCY_HIGH_P14 = 758
DCGM_FI_DEV_NVSWITCH_LATENCY_MAX_P14 = 759
DCGM_FI_DEV_NVSWITCH_LATENCY_LOW_P15 = 760
DCGM_FI_DEV_NVSWITCH_LATENCY_MED_P15 = 761
DCGM_FI_DEV_NVSWITCH_LATENCY_HIGH_P15 = 762
DCGM_FI_DEV_NVSWITCH_LATENCY_MAX_P15 = 763
DCGM_FI_DEV_NVSWITCH_LATENCY_LOW_P16 = 764
DCGM_FI_DEV_NVSWITCH_LATENCY_MED_P16 = 765
DCGM_FI_DEV_NVSWITCH_LATENCY_HIGH_P16 = 766
DCGM_FI_DEV_NVSWITCH_LATENCY_MAX_P16 = 767
DCGM_FI_DEV_NVSWITCH_LATENCY_LOW_P17 = 768
DCGM_FI_DEV_NVSWITCH_LATENCY_MED_P17 = 769
DCGM_FI_DEV_NVSWITCH_LATENCY_HIGH_P17 = 770
DCGM_FI_DEV_NVSWITCH_LATENCY_MAX_P17 = 771
DCGM_FI_DEV_NVSWITCH_LINK_THROUGHPUT_TX = 780
DCGM_FI_DEV_NVSWITCH_LINK_THROUGHPUT_RX = 781
DCGM_FI_DEV_NVSWITCH_LINK_FATAL_ERRORS = 782
DCGM_FI_DEV_NVSWITCH_LINK_NON_FATAL_ERRORS = 783
DCGM_FI_DEV_NVSWITCH_LINK_REPLAY_ERRORS = 784
DCGM_FI_DEV_NVSWITCH_LINK_RECOVERY_ERRORS = 785
DCGM_FI_DEV_NVSWITCH_LINK_FLIT_ERRORS = 786
DCGM_FI_DEV_NVSWITCH_LINK_CRC_ERRORS = 787
DCGM_FI_DEV_NVSWITCH_LINK_ECC_ERRORS = 788
DCGM_FI_DEV_NVSWITCH_LINK_LATENCY_LOW_VC0 = 789
DCGM_FI_DEV_NVSWITCH_LINK_LATENCY_LOW_VC1 = 790
DCGM_FI_DEV_NVSWITCH_LINK_LATENCY_LOW_VC2 = 791
DCGM_FI_DEV_NVSWITCH_LINK_LATENCY_LOW_VC3 = 792
DCGM_FI_DEV_NVSWITCH_LINK_LATENCY_MEDIUM_VC0 = 793
DCGM_FI_DEV_NVSWITCH_LINK_LATENCY_MEDIUM_VC1 = 794
DCGM_FI_DEV_NVSWITCH_LINK_LATENCY_MEDIUM_VC2 = 795
DCGM_FI_DEV_NVSWITCH_LINK_LATENCY_MEDIUM_VC3 = 796
DCGM_FI_DEV_NVSWITCH_LINK_LATENCY_HIGH_VC0 = 797
DCGM_FI_DEV_NVSWITCH_LINK_LATENCY_HIGH_VC1 = 798
DCGM_FI_DEV_NVSWITCH_LINK_LATENCY_HIGH_VC2 = 799
DCGM_FI_DEV_NVSWITCH_LINK_LATENCY_HIGH_VC3 = 800
DCGM_FI_DEV_NVSWITCH_LINK_LATENCY_PANIC_VC0 = 801
DCGM_FI_DEV_NVSWITCH_LINK_LATENCY_PANIC_VC1 = 802
DCGM_FI_DEV_NVSWITCH_LINK_LATENCY_PANIC_VC2 = 803
DCGM_FI_DEV_NVSWITCH_LINK_LATENCY_PANIC_VC3 = 804
DCGM_FI_DEV_NVSWITCH_LINK_LATENCY_COUNT_VC0 = 805
DCGM_FI_DEV_NVSWITCH_LINK_LATENCY_COUNT_VC1 = 806
DCGM_FI_DEV_NVSWITCH_LINK_LATENCY_COUNT_VC2 = 807
DCGM_FI_DEV_NVSWITCH_LINK_LATENCY_COUNT_VC3 = 808
DCGM_FI_DEV_NVSWITCH_LINK_CRC_ERRORS_LANE0 = 809
DCGM_FI_DEV_NVSWITCH_LINK_CRC_ERRORS_LANE1 = 810
DCGM_FI_DEV_NVSWITCH_LINK_CRC_ERRORS_LANE2 = 811
DCGM_FI_DEV_NVSWITCH_LINK_CRC_ERRORS_LANE3 = 812
DCGM_FI_DEV_NVSWITCH_LINK_ECC_ERRORS_LANE0 = 813
DCGM_FI_DEV_NVSWITCH_LINK_ECC_ERRORS_LANE1 = 814
DCGM_FI_DEV_NVSWITCH_LINK_ECC_ERRORS_LANE2 = 815
DCGM_FI_DEV_NVSWITCH_LINK_ECC_ERRORS_LANE3 = 816
DCGM_FI_DEV_NVSWITCH_FATAL_ERRORS = 856
DCGM_FI_DEV_NVSWITCH_NON_FATAL_ERRORS = 857
DCGM_FI_DEV_NVSWITCH_TEMPERATURE_CURRENT = 858
DCGM_FI_DEV_NVSWITCH_TEMPERATURE_LIMIT_SLOWDOWN = 859
DCGM_FI_DEV_NVSWITCH_TEMPERATURE_LIMIT_SHUTDOWN = 860
DCGM_FI_DEV_NVSWITCH_THROUGHPUT_TX = 861
DCGM_FI_DEV_NVSWITCH_THROUGHPUT_RX = 862
'''
Profiling Fields
'''
DCGM_FI_PROF_GR_ENGINE_ACTIVE = 1001 #Ratio of time the graphics engine is active. The graphics engine is
#active if a graphics/compute context is bound and the graphics pipe or
#compute pipe is busy.
DCGM_FI_PROF_SM_ACTIVE = 1002 #The ratio of cycles an SM has at least 1 warp assigned
#(computed from the number of cycles and elapsed cycles)
DCGM_FI_PROF_SM_OCCUPANCY = 1003 #The ratio of number of warps resident on an SM.
#(number of resident as a ratio of the theoretical
#maximum number of warps per elapsed cycle)
DCGM_FI_PROF_PIPE_TENSOR_ACTIVE = 1004 #The ratio of cycles the any tensor pipe is active
#(off the peak sustained elapsed cycles)
DCGM_FI_PROF_DRAM_ACTIVE = 1005 #The ratio of cycles the device memory interface is active sending or receiving data.
DCGM_FI_PROF_PIPE_FP64_ACTIVE = 1006 #Ratio of cycles the fp64 pipe is active.
DCGM_FI_PROF_PIPE_FP32_ACTIVE = 1007 #Ratio of cycles the fp32 pipe is active.
DCGM_FI_PROF_PIPE_FP16_ACTIVE = 1008 #Ratio of cycles the fp16 pipe is active. This does not include HMMA.
DCGM_FI_PROF_PCIE_TX_BYTES = 1009 #The number of bytes of active PCIe tx (transmit) data including both header and payload.
DCGM_FI_PROF_PCIE_RX_BYTES = 1010 #The number of bytes of active PCIe rx (read) data including both header and payload.
DCGM_FI_PROF_NVLINK_TX_BYTES = 1011 #The number of bytes of active NvLink tx (transmit) data including both header and payload.
DCGM_FI_PROF_NVLINK_RX_BYTES = 1012 #The number of bytes of active NvLink rx (receive) data including both header and payload.
DCGM_FI_PROF_PIPE_TENSOR_IMMA_ACTIVE = 1013 #The ratio of cycles the IMMA tensor pipe is active (off the peak sustained elapsed cycles)
DCGM_FI_PROF_PIPE_TENSOR_HMMA_ACTIVE = 1014 #The ratio of cycles the HMMA tensor pipe is active (off the peak sustained elapsed cycles)
DCGM_FI_PROF_PIPE_TENSOR_DFMA_ACTIVE = 1015 #The ratio of cycles the tensor (DFMA) pipe is active (off the peak sustained elapsed cycles)
DCGM_FI_PROF_PIPE_INT_ACTIVE = 1016 #Ratio of cycles the integer pipe is active.
#Ratio of cycles each of the NVDEC engines are active.
DCGM_FI_PROF_NVDEC0_ACTIVE = 1017
DCGM_FI_PROF_NVDEC1_ACTIVE = 1018
DCGM_FI_PROF_NVDEC2_ACTIVE = 1019
DCGM_FI_PROF_NVDEC3_ACTIVE = 1020
DCGM_FI_PROF_NVDEC4_ACTIVE = 1021
DCGM_FI_PROF_NVDEC5_ACTIVE = 1022
DCGM_FI_PROF_NVDEC6_ACTIVE = 1023
DCGM_FI_PROF_NVDEC7_ACTIVE = 1024
#Ratio of cycles each of the NVJPG engines are active.
DCGM_FI_PROF_NVJPG0_ACTIVE = 1025
DCGM_FI_PROF_NVJPG1_ACTIVE = 1026
DCGM_FI_PROF_NVJPG2_ACTIVE = 1027
DCGM_FI_PROF_NVJPG3_ACTIVE = 1028
DCGM_FI_PROF_NVJPG4_ACTIVE = 1029
DCGM_FI_PROF_NVJPG5_ACTIVE = 1030
DCGM_FI_PROF_NVJPG6_ACTIVE = 1031
DCGM_FI_PROF_NVJPG7_ACTIVE = 1032
#Ratio of cycles each of the NVOFA engines are active.
DCGM_FI_PROF_NVOFA0_ACTIVE = 1033
'''
The per-link number of bytes of active NvLink TX (transmit) or RX (transmit) data including both header and payload.
For example: DCGM_FI_PROF_NVLINK_L0_TX_BYTES -> L0 TX
To get the bandwidth for a link, add the RX and TX value together like
total = DCGM_FI_PROF_NVLINK_L0_TX_BYTES + DCGM_FI_PROF_NVLINK_L0_RX_BYTES
'''
DCGM_FI_PROF_NVLINK_L0_TX_BYTES = 1040
DCGM_FI_PROF_NVLINK_L0_RX_BYTES = 1041
DCGM_FI_PROF_NVLINK_L1_TX_BYTES = 1042
DCGM_FI_PROF_NVLINK_L1_RX_BYTES = 1043
DCGM_FI_PROF_NVLINK_L2_TX_BYTES = 1044
DCGM_FI_PROF_NVLINK_L2_RX_BYTES = 1045
DCGM_FI_PROF_NVLINK_L3_TX_BYTES = 1046
DCGM_FI_PROF_NVLINK_L3_RX_BYTES = 1047
DCGM_FI_PROF_NVLINK_L4_TX_BYTES = 1048
DCGM_FI_PROF_NVLINK_L4_RX_BYTES = 1049
DCGM_FI_PROF_NVLINK_L5_TX_BYTES = 1050
DCGM_FI_PROF_NVLINK_L5_RX_BYTES = 1051
DCGM_FI_PROF_NVLINK_L6_TX_BYTES = 1052
DCGM_FI_PROF_NVLINK_L6_RX_BYTES = 1053
DCGM_FI_PROF_NVLINK_L7_TX_BYTES = 1054
DCGM_FI_PROF_NVLINK_L7_RX_BYTES = 1055
DCGM_FI_PROF_NVLINK_L8_TX_BYTES = 1056
DCGM_FI_PROF_NVLINK_L8_RX_BYTES = 1057
DCGM_FI_PROF_NVLINK_L9_TX_BYTES = 1058
DCGM_FI_PROF_NVLINK_L9_RX_BYTES = 1059
DCGM_FI_PROF_NVLINK_L10_TX_BYTES = 1060
DCGM_FI_PROF_NVLINK_L10_RX_BYTES = 1061
DCGM_FI_PROF_NVLINK_L11_TX_BYTES = 1062
DCGM_FI_PROF_NVLINK_L11_RX_BYTES = 1063
DCGM_FI_PROF_NVLINK_L12_TX_BYTES = 1064
DCGM_FI_PROF_NVLINK_L12_RX_BYTES = 1065
DCGM_FI_PROF_NVLINK_L13_TX_BYTES = 1066
DCGM_FI_PROF_NVLINK_L13_RX_BYTES = 1067
DCGM_FI_PROF_NVLINK_L14_TX_BYTES = 1068
DCGM_FI_PROF_NVLINK_L14_RX_BYTES = 1069
DCGM_FI_PROF_NVLINK_L15_TX_BYTES = 1070
DCGM_FI_PROF_NVLINK_L15_RX_BYTES = 1071
DCGM_FI_PROF_NVLINK_L16_TX_BYTES = 1072
DCGM_FI_PROF_NVLINK_L16_RX_BYTES = 1073
DCGM_FI_PROF_NVLINK_L17_TX_BYTES = 1074
DCGM_FI_PROF_NVLINK_L17_RX_BYTES = 1075
DCGM_FI_PROF_NVLINK_THROUGHPUT_FIRST = DCGM_FI_PROF_NVLINK_L0_TX_BYTES
DCGM_FI_PROF_NVLINK_THROUGHPUT_LAST = DCGM_FI_PROF_NVLINK_L17_RX_BYTES
#greater than maximum fields above. This value can increase in the future
DCGM_FI_MAX_FIELDS = 1076
class struct_c_dcgm_field_meta_t(dcgm_structs._DcgmStructure):
# struct_c_dcgm_field_meta_t structure
pass # opaque handle
dcgm_field_meta_t = POINTER(struct_c_dcgm_field_meta_t)
class _PrintableStructure(dcgm_structs._DcgmStructure):
"""
Abstract class that produces nicer __str__ output than ctypes.Structure.
e.g. instead of:
>>> print str(obj)
<class_name object at 0x7fdf82fef9e0>
this class will print
class_name(field_name: formatted_value, field_name: formatted_value)
_fmt_ dictionary of <str _field_ name> -> <str format>
e.g. class that has _field_ 'hex_value', c_uint could be formatted with
_fmt_ = {"hex_value" : "%08X"}
to produce nicer output.
Default fomratting string for all fields can be set with key "<default>" like:
_fmt_ = {"<default>" : "%d MHz"} # e.g all values are numbers in MHz.
If not set it's assumed to be just "%s"
Exact format of returned str from this class is subject to change in the future.
"""
_fmt_ = {}
def __str__(self):
result = []
for x in self._fields_:
key = x[0]
value = getattr(self, key)
fmt = "%s"
if key in self._fmt_:
fmt = self._fmt_[key]
elif "<default>" in self._fmt_:
fmt = self._fmt_["<default>"]
result.append(("%s: " + fmt) % (key, value))
return self.__class__.__name__ + "(" + ', '.join(result) + ")"
# Provides access to functions from dcgm_agent_internal
dcgmFP = dcgm_structs._dcgmGetFunctionPointer
SHORTNAME_LENGTH = 10
UNIT_LENGTH = 4
# Structure to hold formatting information for values
class c_dcgm_field_output_format_t(_PrintableStructure):
_fields_ = [
('shortName', c_char * SHORTNAME_LENGTH),
('unit' , c_char * UNIT_LENGTH),
('width' , c_short)
]
TAG_LENGTH = 48
# Structure to represent device information
class c_dcgm_field_meta_t(_PrintableStructure):
_fields_ = [
# version must always be first
('fieldId', c_short),
('fieldType', c_char),
('size', c_ubyte),
('tag', c_char * TAG_LENGTH),
('scope', c_int),
('valueFormat', c_dcgm_field_output_format_t)
]
# Class for maintaining properties for each sampling type like Power, Utilization and Clock.
class pySamplingProperties:
'''
The instance of this class is used to hold information related to each sampling event type.
'''
def __init__(self, name, sampling_type, sample_val_type, timeIntervalIdle, timeIntervalBoost, min_value, max_value):
self.name = name
self.sampling_type = sampling_type
self.timeIntervalIdle = timeIntervalIdle
self.timeIntervalBoost = timeIntervalBoost
self.min_value = min_value
self.max_value = max_value
self.sample_val_type = sample_val_type
def DcgmFieldsInit():
fn = dcgmFP("DcgmFieldsInit")
ret = fn()
assert ret == 0, "Got return %d from DcgmFieldsInit" % ret
def DcgmFieldGetById(fieldId):
'''
Get metadata for a field, given its fieldId
:param fieldId: Field ID to get metadata for
:return: c_dcgm_field_meta_t struct on success. None on error.
'''
DcgmFieldsInit()
retVal = c_dcgm_field_meta_t()
fn = dcgmFP("DcgmFieldGetById")
fn.restype = POINTER(c_dcgm_field_meta_t)
c_field_meta_ptr = fn(fieldId)
if not c_field_meta_ptr:
return None
retVal = c_dcgm_field_meta_t()
memmove(addressof(retVal), c_field_meta_ptr, sizeof(retVal))
return retVal
def DcgmFieldGetByTag(tag):
'''
Get metadata for a field, given its string tag
:param tag: Field tag to get metadata for. Example 'brand'
:return: c_dcgm_field_meta_t struct on success. None on error.
'''
DcgmFieldsInit()
retVal = c_dcgm_field_meta_t()
fn = dcgmFP("DcgmFieldGetByTag")
fn.restype = POINTER(c_dcgm_field_meta_t)
c_field_meta_ptr = fn(c_char_p(tag.encode('utf-8')))
if not c_field_meta_ptr:
return None
retVal = c_dcgm_field_meta_t()
memmove(addressof(retVal), c_field_meta_ptr, sizeof(retVal))
return retVal
def DcgmFieldGetTagById(fieldId):
field = DcgmFieldGetById(fieldId)
if field:
return field.tag
else:
return None
|
# Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .monitor import Monitor
from ..tb_dcgm_types.gpu_free_memory import GPUFreeMemory
from ..tb_dcgm_types.gpu_tensoractive import GPUTensorActive
from ..tb_dcgm_types.gpu_peak_memory import GPUPeakMemory
from ..tb_dcgm_types.gpu_utilization import GPUUtilization
from ..tb_dcgm_types.gpu_power_usage import GPUPowerUsage
from ..tb_dcgm_types.gpu_fp32active import GPUFP32Active
from ..tb_dcgm_types.gpu_dram_active import GPUDRAMActive
from ..tb_dcgm_types.gpu_pcie_rx import GPUPCIERX
from ..tb_dcgm_types.gpu_pcie_tx import GPUPCIETX
from ..tb_dcgm_types.da_exceptions import TorchBenchAnalyzerException
from . import dcgm_agent
from . import dcgm_fields
from . import dcgm_field_helpers
from . import dcgm_structs as structs
class DCGMMonitor(Monitor):
"""
Use DCGM to monitor GPU metrics
"""
# Mapping between the DCGM Fields and Model Analyzer Records
# For more explainations, please refer to https://docs.nvidia.com/datacenter/dcgm/latest/dcgm-api/dcgm-api-field-ids.html
model_analyzer_to_dcgm_field = {
GPUPeakMemory: dcgm_fields.DCGM_FI_DEV_FB_USED,
GPUFreeMemory: dcgm_fields.DCGM_FI_DEV_FB_FREE,
GPUUtilization: dcgm_fields.DCGM_FI_DEV_GPU_UTIL,
GPUPowerUsage: dcgm_fields.DCGM_FI_DEV_POWER_USAGE,
GPUFP32Active: dcgm_fields.DCGM_FI_PROF_PIPE_FP32_ACTIVE,
GPUTensorActive: dcgm_fields.DCGM_FI_PROF_PIPE_TENSOR_ACTIVE,
GPUDRAMActive: dcgm_fields.DCGM_FI_PROF_DRAM_ACTIVE,
GPUPCIERX: dcgm_fields.DCGM_FI_PROF_PCIE_RX_BYTES,
GPUPCIETX: dcgm_fields.DCGM_FI_PROF_PCIE_TX_BYTES,
}
def __init__(self, gpus, frequency, metrics, dcgmPath=None):
"""
Parameters
----------
gpus : list of GPUDevice
The gpus to be monitored
frequency : int
Sampling frequency for the metric
metrics : list
List of Record types to monitor
dcgmPath : str (optional)
DCGM installation path
"""
super().__init__(frequency, metrics)
structs._dcgmInit(dcgmPath)
dcgm_agent.dcgmInit()
self._gpus = gpus
# Start DCGM in the embedded mode to use the shared library
self.dcgm_handle = dcgm_handle = dcgm_agent.dcgmStartEmbedded(
structs.DCGM_OPERATION_MODE_MANUAL)
group_name = "torchbench-dcgm-monitor"
# Create DCGM monitor group
self.group_id = dcgm_agent.dcgmGroupCreate(dcgm_handle,
structs.DCGM_GROUP_EMPTY,
group_name)
# Add the GPUs to the group
for gpu in self._gpus:
dcgm_agent.dcgmGroupAddDevice(dcgm_handle, self.group_id,
gpu.device_id())
frequency = int(self._frequency * 1000)
fields = []
try:
for metric in metrics:
fields.append(self.model_analyzer_to_dcgm_field[metric])
except KeyError:
dcgm_agent.dcgmShutdown()
raise TorchBenchAnalyzerException(
f'{metric} is not supported by Model Analyzer DCGM Monitor')
dcgm_field_group_id = dcgm_agent.dcgmFieldGroupCreate(
dcgm_handle, fields, group_name)
dcgm_filed_group = dcgm_field_helpers.DcgmFieldGroup(dcgm_handle, fields, group_name, dcgm_field_group_id)
self.group_watcher = dcgm_field_helpers.DcgmFieldGroupWatcher(
dcgm_handle, self.group_id, dcgm_filed_group,
structs.DCGM_OPERATION_MODE_MANUAL, frequency, 3600, 0, 0)
def _monitoring_iteration(self):
self.group_watcher.GetAllSinceLastCall()
def _collect_records(self):
records = []
for gpu in self._gpus:
device_id = gpu.device_id()
metrics = self.group_watcher.values[device_id]
# Find the first key in the metrics dictionary to find the
# dictionary length
if len(list(metrics)) > 0:
for metric_type in self._metrics:
dcgm_field = self.model_analyzer_to_dcgm_field[metric_type]
for measurement in metrics[dcgm_field].values:
if measurement.value is not None:
# DCGM timestamp is in nanoseconds
records.append(
metric_type(value=float(measurement.value),
device_uuid=gpu.device_uuid(),
timestamp=measurement.ts))
records.sort(key=lambda x: x._timestamp)
return records
def destroy(self):
"""
Destroy the DCGMMonitor. This function must be called
in order to appropriately deallocate the resources.
"""
dcgm_agent.dcgmShutdown()
super().destroy()
|
import os
import time
from .monitor import Monitor
import psutil
from ..tb_dcgm_types.cpu_peak_memory import CPUPeakMemory
class CPUMonitor(Monitor):
"""
A CPU monitor that uses psutil to monitor CPU usage
"""
def __init__(self, frequency, metrics_needed=[], monitored_pid=None):
super().__init__(frequency, metrics_needed)
# It is a raw record list. [timestamp, cpu_memory_usage, cpu_available_memory]
self._cpu_records = []
if monitored_pid:
self._monitored_pid = monitored_pid
else:
self._monitored_pid = os.getpid()
def _get_cpu_stats(self):
"""
Append a raw record into self._cpu_metric_values.
A raw record includes the timestamp in nanosecond, the CPU memory usage, CPU available memory in MB.
"""
server_process = psutil.Process(self._monitored_pid)
process_memory_info = server_process.memory_full_info()
system_memory_info = psutil.virtual_memory()
# Divide by 1024*1024 to convert from bytes to MB
a_raw_record = (time.time_ns(), process_memory_info.uss // 1048576, system_memory_info.available // 1048576)
return a_raw_record
def _monitoring_iteration(self):
if CPUPeakMemory in self._metrics:
self._cpu_records.append(self._get_cpu_stats())
def _collect_records(self):
"""
Convert all raw records into corresponding Record type.
"""
records = []
for record in self._cpu_records:
records.append(CPUPeakMemory(timestamp=record[0], value=record[1]))
return records |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from . import dcgm_fields
from . import dcgm_structs
from . import dcgm_agent
from . import dcgm_value as dcgmvalue
from . import dcgm_fields_internal
import ctypes
import json
# @Yueming Hao: add this warpper class to make fieldgroupid consistent with the latest dcgm_filed_helpers.py
class DcgmFieldGroup:
def __init__(self, dcgm_handle, field_ids, group_name, fieldGroupId):
self.dcgm_handle = dcgm_handle
self.field_ids = field_ids
self.group_name = group_name
self.fieldGroupId = fieldGroupId
'''
Helper class that makes a python-friendly field value from one returned from the python bindings
'''
class DcgmFieldValue():
'''
Constructor
rawValue is the latest dcgm_structs.c_dcgmFieldValue_v? structure of a field value returned from the raw APIs
'''
def __init__(self, rawValue):
#Make sure the class passed in is an expected type
if not type(rawValue) == dcgm_structs.c_dcgmFieldValue_v1:
raise Exception("Unexpected rawValue type %s" % str(type(rawValue)))
self.ts = rawValue.ts
self.fieldId = rawValue.fieldId
self.fieldType = chr(rawValue.fieldType)
self.isBlank = False
self.value = None
if rawValue.status != dcgm_structs.DCGM_ST_OK:
self.isBlank = True
return
if self.fieldType == dcgm_fields.DCGM_FT_DOUBLE:
self.value = float(rawValue.value.dbl)
self.isBlank = dcgmvalue.DCGM_FP64_IS_BLANK(self.value)
elif self.fieldType == dcgm_fields.DCGM_FT_INT64 or self.fieldType == dcgm_fields.DCGM_FT_TIMESTAMP:
self.value = int(rawValue.value.i64)
self.isBlank = dcgmvalue.DCGM_INT64_IS_BLANK(self.value)
elif self.fieldType == dcgm_fields.DCGM_FT_STRING:
self.value = str(rawValue.value.str)
self.isBlank = dcgmvalue.DCGM_STR_IS_BLANK(self.value)
elif self.fieldType == dcgm_fields.DCGM_FT_BINARY:
if self.fieldId == dcgm_fields.DCGM_FI_DEV_ACCOUNTING_DATA:
accStats = dcgm_structs.c_dcgmDevicePidAccountingStats_v1()
ctypes.memmove(ctypes.addressof(accStats), rawValue.value.blob, accStats.FieldsSizeof())
if self.fieldId in [dcgm_fields_internal.DCGM_FI_DEV_COMPUTE_PIDS, dcgm_fields_internal.DCGM_FI_DEV_GRAPHICS_PIDS]:
processStats = dcgm_structs.c_dcgmRunningProcess_t()
ctypes.memmove(ctypes.addressof(processStats), rawValue.value.blob, processStats.FieldsSizeof())
self.value = processStats
self.fieldType = dcgm_fields.DCGM_FT_BINARY
# This should always be false
self.isBlank = dcgmvalue.DCGM_INT64_IS_BLANK(processStats.pid)
elif self.fieldId == dcgm_fields.DCGM_FI_SYNC_BOOST:
#Not exposed publicly for now
self.value = None
else:
raise Exception("Blobs not handled yet for fieldId %d" % self.fieldId)
else:
raise Exception("Unhandled fieldType: %s" % self.fieldType)
class DcgmFieldValueTimeSeries:
def __init__(self):
self.values = [] #Values in timestamp order
def __len__(self):
return len(self.values)
def __getitem__(self, key):
return self.values[key]
def InsertValue(self, value):
if len(self.values) < 1 or value.ts >= self.values[-1].ts:
self.values.append(value)
return
#Otherwise, we need to insert the value in the correct place. Find the place
for i, existingValue in enumerate(self.values):
if value.ts < existingValue.ts:
self.values.insert(i, value)
return
raise Exception("Unexpected no place to insert ts %d" % value.ts)
class FieldValueEncoder(json.JSONEncoder):
# Pylint does not link overloading the default method, so the comment below is WAR for the linting problem
def default(self, obj): # pylint: disable=E0202
nested_json = []
i=0
for key in obj:
if isinstance(key, DcgmFieldValue):
if(key.isBlank):
continue
nested_json.append({'Timestamp' : key.ts, 'FieldId': key.fieldId, 'Value' : key.value})
else:
return json.JSONEncoder.default(self, obj) # Let default encoder throw exception
return nested_json
def py_helper_dcgm_field_values_since_callback(gpuId, values, numValues, userData):
userData = ctypes.cast(userData, ctypes.py_object).value
userData._ProcessValues(gpuId, values[0:numValues])
return 0
helper_dcgm_field_values_since_callback = dcgm_agent.dcgmFieldValueEnumeration_f(py_helper_dcgm_field_values_since_callback)
def py_helper_dcgm_field_values_since_callback_v2(entityGroupId, entityId, values, numValues, userData):
userData = ctypes.cast(userData, ctypes.py_object).value
userData._ProcessValues(entityGroupId, entityId, values[0:numValues])
return 0
helper_dcgm_field_values_since_callback_v2 = dcgm_agent.dcgmFieldValueEntityEnumeration_f(py_helper_dcgm_field_values_since_callback_v2)
'''
Helper class for handling field value update callbacks and storing them in a .values member variable
'''
class DcgmFieldValueCollection:
def __init__(self, handle, groupId):
self.values = {} #2D dictionary of [gpuId][fieldId](DcgmFieldValueTimeSeries)
self._handle = handle
self._groupId = groupId
self._numValuesSeen = 0
self._nextSinceTimestamp = 0
'''
Helper function called by the callback of dcgm_agent.dcgmGetValuesSince to process individual field values
'''
def _ProcessValues(self, gpuId, values):
self._numValuesSeen += len(values)
if gpuId not in self.values:
self.values[gpuId] = {}
for rawValue in values:
#Convert to python-friendly value
value = DcgmFieldValue(rawValue)
if value.fieldId not in self.values[gpuId]:
self.values[gpuId][value.fieldId] = DcgmFieldValueTimeSeries()
self.values[gpuId][value.fieldId].InsertValue(value)
'''
Get the latest values for a fieldGroup and store them to the .values member variable
Note: This class does not automatically watch fieldGroup. You must do that ahead of time with dcgmGroup.samples.WatchFields()
'''
def GetLatestValues(self, fieldGroup):
ret = dcgm_agent.dcgmGetLatestValues(self._handle, self._groupId, fieldGroup.fieldGroupId, helper_dcgm_field_values_since_callback, self)
#Will throw exception on error
dcgm_structs._dcgmCheckReturn(ret)
'''
Method to cause more field values to be retrieved from DCGM. Returns the
number of field values that were retrieved.
'''
def GetAllSinceLastCall(self, fieldGroup):
beforeCount = self._numValuesSeen
self._nextSinceTimestamp = dcgm_agent.dcgmGetValuesSince(self._handle, self._groupId, fieldGroup.fieldGroupId, self._nextSinceTimestamp, helper_dcgm_field_values_since_callback, self)
afterCount = self._numValuesSeen
return afterCount - beforeCount
def GetLatestValues_v2(self, fieldGroup):
ret = dcgm_agent.dcgmGetLatestValues_v2(self._handle, self._groupId, fieldGroup.fieldGroupId, helper_dcgm_field_values_since_callback_v2, self)
#Will throw exception on error
dcgm_structs._dcgmCheckReturn(ret)
'''
Method to cause more field values to be retrieved from DCGM. Returns the number of field values that were retrieved
'''
def GetAllSinceLastCall_v2(self, fieldGroup):
beforeCount = self._numValuesSeen
self._nextSinceTimestamp = dcgm_agent.dcgmGetValuesSince_v2(self._handle, self._groupId, fieldGroup.fieldGroupId, self._nextSinceTimestamp, helper_dcgm_field_values_since_entity_callback, self)
afterCount = self._numValuesSeen
return afterCount - beforeCount
'''
Empty .values{} so that old data is no longer present in this structure.
This can be used to prevent .values from growing over time
'''
def EmptyValues(self):
self.values = {}
self._numValuesSeen = 0
'''
Helper class for watching a field group and storing fields values returned from it
'''
class DcgmFieldGroupWatcher(DcgmFieldValueCollection):
'''
Constructor
handle is a DCGM handle from dcgm_agent.dcgmInit()
groupId is a valid DCGM group ID returned from dcgm_agent.dcgmGroupCreate
fieldGroup is the DcgmFieldGroup() instance to watch fields for
operationMode is a dcgm_structs.DCGM_OPERATION_MODE_? constant for if the host engine is running in lock step or auto mode
updateFreq is how often to update each field in usec
maxKeepAge is how long DCGM should keep values for in seconds
maxKeepSamples is the maximum number of samples DCGM should ever cache for each field
startTimestamp is a base timestamp we should start from when first reading values. This can be used to resume a
previous instance of a DcgmFieldGroupWatcher by using its _nextSinceTimestamp.
0=start with all cached data
'''
def __init__(self, handle, groupId, fieldGroup, operationMode, updateFreq, maxKeepAge, maxKeepSamples, startTimestamp):
self._fieldGroup = fieldGroup
self._operationMode = operationMode
self._updateFreq = updateFreq
self._maxKeepAge = maxKeepAge
self._maxKeepSamples = maxKeepSamples
DcgmFieldValueCollection.__init__(self, handle, groupId)
self._nextSinceTimestamp = 0 #Start from beginning of time
if startTimestamp > 0:
self._nextSinceTimestamp = startTimestamp
self._numValuesSeen = 0
#Start watches
self._WatchFieldGroup()
'''
Initiate the host engine watch on the fields
'''
def _WatchFieldGroup(self):
ret = dcgm_agent.dcgmWatchFields(self._handle, self._groupId, self._fieldGroup.fieldGroupId, self._updateFreq, self._maxKeepAge, self._maxKeepSamples)
dcgm_structs._dcgmCheckReturn(ret) #Will throw exception on error
# Force an update of the fields so that we can fetch initial values.
ret = dcgm_agent.dcgmUpdateAllFields(self._handle, 1)
dcgm_structs._dcgmCheckReturn(ret) #Will throw exception on error
# Initial update will fetch from startTimestamp.
self.GetAllSinceLastCall()
'''
Method to cause more field values to be retrieved from DCGM. Returns the
number of field values that were retrieved
'''
def GetAllSinceLastCall(self):
#If we're in manual mode, force an update
if self._operationMode == dcgm_structs.DCGM_OPERATION_MODE_MANUAL:
ret = dcgm_agent.dcgmUpdateAllFields(self._handle, 1)
dcgm_structs._dcgmCheckReturn(ret) #Will throw exception on error
return super().GetAllSinceLastCall(self._fieldGroup)
def py_helper_dcgm_field_values_since_entity_callback(entityGroupId, entityId, values, numValues, userData):
userData = ctypes.cast(userData, ctypes.py_object).value
userData._ProcessValues(entityGroupId, entityId, values[0:numValues])
return 0
helper_dcgm_field_values_since_entity_callback = dcgm_agent.dcgmFieldValueEntityEnumeration_f(py_helper_dcgm_field_values_since_entity_callback)
'''
Helper class for handling field value update callbacks and storing them in a .values member variable
'''
class DcgmFieldValueEntityCollection:
def __init__(self, handle, groupId):
self.values = {} #3D dictionary of [entityGroupId][entityId][fieldId](DcgmFieldValueTimeSeries)
self._handle = handle
self._groupId = groupId
self._numValuesSeen = 0
self._nextSinceTimestamp = 0
'''
Helper function called by the callback of dcgm_agent.dcgmGetValuesSince to process individual field values
'''
def _ProcessValues(self, entityGroupId, entityId, values):
self._numValuesSeen += len(values)
if entityGroupId not in self.values:
self.values[entityGroupId] = {}
if entityId not in self.values[entityGroupId]:
self.values[entityGroupId][entityId] = {}
for rawValue in values:
#Convert to python-friendly value
value = DcgmFieldValue(rawValue)
if value.fieldId not in self.values[entityGroupId][entityId]:
self.values[entityGroupId][entityId][value.fieldId] = DcgmFieldValueTimeSeries()
self.values[entityGroupId][entityId][value.fieldId].InsertValue(value)
'''
Get the latest values for a fieldGroup and store them to the .values member variable
Note: This class does not automatically watch fieldGroup. You must do that ahead of time with dcgmGroup.samples.WatchFields()
'''
def GetLatestValues(self, fieldGroup):
ret = dcgm_agent.dcgmGetLatestValues_v2(self._handle, self._groupId, fieldGroup.fieldGroupId, helper_dcgm_field_values_since_entity_callback, self)
#Will throw exception on error
dcgm_structs._dcgmCheckReturn(ret)
'''
Method to cause more field values to be retrieved from DCGM. Returns the
number of field values that were retrieved.
'''
def GetAllSinceLastCall(self, fieldGroup):
beforeCount = self._numValuesSeen
self._nextSinceTimestamp = dcgm_agent.dcgmGetValuesSince_v2(self._handle, self._groupId, fieldGroup.fieldGroupId, self._nextSinceTimestamp, helper_dcgm_field_values_since_entity_callback, self)
afterCount = self._numValuesSeen
return afterCount - beforeCount
'''
Empty .values{} so that old data is no longer present in this structure.
This can be used to prevent .values from growing over time
'''
def EmptyValues(self):
self.values = {}
self._numValuesSeen = 0
'''
Helper class for watching a field group and storing fields values returned from it
'''
class DcgmFieldGroupEntityWatcher(DcgmFieldValueEntityCollection):
'''
Constructor
handle is a DCGM handle from dcgm_agent.dcgmInit()
groupId is a valid DCGM group ID returned from dcgm_agent.dcgmGroupCreate
fieldGroup is the DcgmFieldGroup() instance to watch fields for
operationMode is a dcgm_structs.DCGM_OPERATION_MODE_? constant for if the host engine is running in lock step or auto mode
updateFreq is how often to update each field in usec
maxKeepAge is how long DCGM should keep values for in seconds
maxKeepSamples is the maximum number of samples DCGM should ever cache for each field
startTimestamp is a base timestamp we should start from when first reading values. This can be used to resume a
previous instance of a DcgmFieldGroupWatcher by using its _nextSinceTimestamp.
0=start with all cached data
'''
def __init__(self, handle, groupId, fieldGroup, operationMode, updateFreq, maxKeepAge, maxKeepSamples, startTimestamp):
self._fieldGroup = fieldGroup
self._operationMode = operationMode
self._updateFreq = updateFreq
self._maxKeepAge = maxKeepAge
self._maxKeepSamples = maxKeepSamples
DcgmFieldValueEntityCollection.__init__(self, handle, groupId)
self._nextSinceTimestamp = 0 #Start from beginning of time
if startTimestamp > 0:
self._nextSinceTimestamp = startTimestamp
self._numValuesSeen = 0
#Start watches
self._WatchFieldGroup()
'''
Initiate the host engine watch on the fields
'''
def _WatchFieldGroup(self):
ret = dcgm_agent.dcgmWatchFields(self._handle, self._groupId, self._fieldGroup.fieldGroupId, self._updateFreq, self._maxKeepAge, self._maxKeepSamples)
dcgm_structs._dcgmCheckReturn(ret) #Will throw exception on error
# Force an update of the fields so that we can fetch initial values.
ret = dcgm_agent.dcgmUpdateAllFields(self._handle, 1)
dcgm_structs._dcgmCheckReturn(ret) #Will throw exception on error
# Initial update will fetch from startTimestamp.
self.GetAllSinceLastCall()
'''
Method to cause more field values to be retrieved from DCGM. Returns the
number of field values that were retrieved
'''
def GetAllSinceLastCall(self):
#If we're in manual mode, force an update
if self._operationMode == dcgm_structs.DCGM_OPERATION_MODE_MANUAL:
ret = dcgm_agent.dcgmUpdateAllFields(self._handle, 1)
dcgm_structs._dcgmCheckReturn(ret) #Will throw exception on error
return super().GetAllSinceLastCall(self._fieldGroup)
#Test program for demonstrating how this module works
# def main():
# operationMode = dcgm_structs.DCGM_OPERATION_MODE_AUTO
# timeStep = 1.0
# dcgm_structs._dcgmInit()
# dcgm_agent.dcgmInit() #Will throw an exception on error
# handle = dcgm_agent.dcgmStartEmbedded(operationMode)
# handleObj = pydcgm.DcgmHandle(handle=handle)
# groupId = dcgm_structs.DCGM_GROUP_ALL_GPUS
# fieldIds = [dcgm_fields.DCGM_FI_DEV_SM_CLOCK, dcgm_fields.DCGM_FI_DEV_MEM_CLOCK]
# fieldGroup = pydcgm.DcgmFieldGroup(handleObj, "my_field_group", fieldIds)
# updateFreq = int(timeStep * 1000000.0)
# maxKeepAge = 3600.0 #1 hour
# maxKeepSamples = 0 #unlimited. maxKeepAge will enforce quota
# startTimestamp = 0 #beginning of time
# dfcw = DcgmFieldGroupWatcher(handle, groupId, fieldGroup, operationMode, updateFreq, maxKeepAge, maxKeepSamples, startTimestamp)
# dfcw2 = DcgmFieldGroupEntityWatcher(handle, groupId, fieldGroup, operationMode, updateFreq, maxKeepAge, maxKeepSamples, startTimestamp)
# while(True):
# newUpdateCount = dfcw.GetAllSinceLastCall()
# newUpdateCount2 = dfcw2.GetAllSinceLastCall()
# print("Got %d and %d new field value updates" % (newUpdateCount, newUpdateCount2))
# for gpuId in list(dfcw.values.keys()):
# print("gpuId %d" % gpuId)
# for fieldId in list(dfcw.values[gpuId].keys()):
# print(" fieldId %d: %d values. latest timestamp %d" % \
# (fieldId, len(dfcw.values[gpuId][fieldId]), dfcw.values[gpuId][fieldId][-1].ts))
# for entityGroupId in list(dfcw2.values.keys()):
# print("entityGroupId %d" % entityGroupId)
# for entityId in list(dfcw2.values[entityGroupId].keys()):
# print(" entityId %d" % entityId)
# for fieldId in list(dfcw2.values[entityGroupId][entityId].keys()):
# print(" fieldId %d: %d values. latest timestamp %d" % \
# (fieldId, len(dfcw2.values[entityGroupId][entityId][fieldId]), dfcw2.values[entityGroupId][entityId][fieldId][-1].ts))
# time.sleep(timeStep)
# if __name__ == "__main__":
# main()
|
import time
from .monitor import Monitor
from ..tb_dcgm_types.gpu_free_memory import GPUFreeMemory
from ..tb_dcgm_types.gpu_tensoractive import GPUTensorActive
from ..tb_dcgm_types.gpu_peak_memory import GPUPeakMemory
from ..tb_dcgm_types.gpu_utilization import GPUUtilization
from ..tb_dcgm_types.gpu_power_usage import GPUPowerUsage
from ..tb_dcgm_types.gpu_fp32active import GPUFP32Active
from ..tb_dcgm_types.gpu_dram_active import GPUDRAMActive
from ..tb_dcgm_types.gpu_pcie_rx import GPUPCIERX
from ..tb_dcgm_types.gpu_pcie_tx import GPUPCIETX
from ..tb_dcgm_types.da_exceptions import TorchBenchAnalyzerException
from . import dcgm_agent
from . import dcgm_fields
from . import dcgm_field_helpers
from . import dcgm_structs as structs
from packaging import version
import pynvml
class NVMLMonitor(Monitor):
"""
Use NVML to monitor GPU metrics
"""
# Mapping between the NVML Fields and Model Analyzer Records
# For more explainations, please refer to https://docs.nvidia.com/deploy/nvml-api/group__nvmlDeviceQueries.html
model_analyzer_to_nvml_field = {
GPUPeakMemory: "used",
GPUFreeMemory: "free",
GPUUtilization: "utilization.gpu",
GPUPowerUsage: "power.draw",
}
def __init__(self, gpus, frequency, metrics):
"""
Parameters
----------
gpus : list of GPUDevice
The gpus to be monitored
frequency : int
Sampling frequency for the metric
metrics : list
List of Record types to monitor
"""
super().__init__(frequency, metrics)
self._nvml = pynvml
self._nvml.nvmlInit()
self._metrics = metrics
# raw records: {gpu: {field: [(timestamp, value), ...]}}
self._records = {}
self._gpus = gpus
# gpu handles: {gpu: handle}
self._gpu_handles = {}
self._nvmlDeviceGetHandleByUUID = None
self.check_nvml_compatibility()
for gpu in self._gpus:
self._gpu_handles[gpu] = self._nvmlDeviceGetHandleByUUID(gpu.device_uuid())
self._records[gpu] = {}
for metric in self._metrics:
self._records[gpu][metric] = []
def check_nvml_compatibility(self):
# check pynvml version, if it is less than 11.5.0, convert uuid to bytes
current_version = version.parse(pynvml.__version__)
if current_version < version.parse("11.5.0"):
self._nvmlDeviceGetHandleByUUID=self._nvmlDeviceGetHandleByUUID_for_older_pynvml
else:
self._nvmlDeviceGetHandleByUUID=self._nvml.nvmlDeviceGetHandleByUUID
def _nvmlDeviceGetHandleByUUID_for_older_pynvml(self, uuid):
return self._nvml.nvmlDeviceGetHandleByUUID(uuid.encode("ascii"))
def _monitoring_iteration(self):
self._get_gpu_metrics()
def _get_gpu_metrics(self):
"""
Get the metrics of all the GPUs
"""
for gpu in self._gpus:
handle = self._nvmlDeviceGetHandleByUUID(gpu.device_uuid())
for metric in self._metrics:
nvml_field = self.model_analyzer_to_nvml_field[metric]
# convert to microseconds to keep consistency with the dcgm monitor
atimestamp = time.time_ns() // 1000
if metric == GPUPeakMemory:
info = self._nvml.nvmlDeviceGetMemoryInfo(handle)
# @Yueming TODO: need to update with the nvml API version 2. Because the nvml API version 1 returns the used memory including the memory allocated by the GPU driver.
# used_mem = info.used
# reserved_mem = info.reserved
# self._records[gpu][metric].append((atimestamp, used_mem - reserved_mem))
self._records[gpu][metric].append((atimestamp, float(getattr(info, nvml_field)/1024/1024)))
elif metric == GPUFreeMemory:
info = self._nvml.nvmlDeviceGetMemoryInfo(handle)
self._records[gpu][metric].append((atimestamp, float(getattr(info, nvml_field)/1024/1024)))
elif metric == GPUUtilization:
info = self._nvml.nvmlDeviceGetUtilizationRates(handle)
self._records[gpu][metric].append((atimestamp, getattr(info, nvml_field)))
elif metric == GPUPowerUsage:
info = self._nvml.nvmlDeviceGetPowerUsage(handle)
self._records[gpu][metric].append((atimestamp, info))
def _collect_records(self):
records = []
for gpu in self._gpus:
for metric_type in self._metrics:
for measurement in self._records[gpu][metric_type]:
records.append(metric_type(value=float(measurement[1]),
timestamp=measurement[0], device_uuid=gpu.device_uuid()))
return records
def destroy(self):
self._nvml.nvmlShutdown()
super().destroy()
|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
# Python bindings for the internal API of DCGM library (dcgm_agent.h)
##
from . import dcgm_structs
from . import dcgm_fields
from ctypes import *
import functools
def ensure_byte_strings():
"""
Ensures that we don't call C APIs with unicode strings in the arguments
every unicode args gets converted to UTF-8 before the function is called
"""
def convert_result_from_bytes(result):
if isinstance(result, bytes):
return result.decode('utf-8')
if isinstance(result, list):
return list(map(convert_result_from_bytes, result))
if isinstance(result, tuple):
return tuple(map(convert_result_from_bytes, result))
return result
def decorator(fn):
@functools.wraps(fn)
def wrapper(*args, **kwargs):
newargs = []
newkwargs = {}
for arg in args:
if isinstance(arg, str):
newargs.append(bytes(arg, 'utf-8'))
else:
newargs.append(arg)
for k, v in kwargs.items():
if isinstance(v, str):
newkwargs[k] = bytes(v, 'utf-8')
else:
newkwargs[k] = v
newargs = tuple(newargs)
return fn(*newargs, **newkwargs)
return wrapper
return decorator
# Provides access to functions from dcgm_agent_internal
dcgmFP = dcgm_structs._dcgmGetFunctionPointer
# This method is used to initialize DCGM
@ensure_byte_strings()
def dcgmInit():
dcgm_handle = c_void_p()
fn = dcgmFP("dcgmInit")
ret = fn(byref(dcgm_handle))
dcgm_structs._dcgmCheckReturn(ret)
return ret
# This method is used to shutdown DCGM Engine
@ensure_byte_strings()
def dcgmShutdown():
fn = dcgmFP("dcgmShutdown")
ret = fn()
dcgm_structs._dcgmCheckReturn(ret)
return ret
@ensure_byte_strings()
def dcgmStartEmbedded(opMode):
dcgm_handle = c_void_p()
fn = dcgmFP("dcgmStartEmbedded")
ret = fn(opMode, byref(dcgm_handle))
dcgm_structs._dcgmCheckReturn(ret)
return dcgm_handle
@ensure_byte_strings()
def dcgmStopEmbedded(dcgm_handle):
fn = dcgmFP("dcgmStopEmbedded")
ret = fn(dcgm_handle)
dcgm_structs._dcgmCheckReturn(ret)
return ret
@ensure_byte_strings()
def dcgmConnect(ip_address):
dcgm_handle = c_void_p()
fn = dcgmFP("dcgmConnect")
ret = fn(ip_address, byref(dcgm_handle))
dcgm_structs._dcgmCheckReturn(ret)
return dcgm_handle
@ensure_byte_strings()
def dcgmConnect_v2(ip_address, connectParams, version=dcgm_structs.c_dcgmConnectV2Params_version):
connectParams.version = version
dcgm_handle = c_void_p()
fn = dcgmFP("dcgmConnect_v2")
ret = fn(ip_address, byref(connectParams), byref(dcgm_handle))
dcgm_structs._dcgmCheckReturn(ret)
return dcgm_handle
@ensure_byte_strings()
def dcgmDisconnect(dcgm_handle):
fn = dcgmFP("dcgmDisconnect")
ret = fn(dcgm_handle)
dcgm_structs._dcgmCheckReturn(ret)
return ret
@ensure_byte_strings()
def dcgmGetAllSupportedDevices(dcgm_handle):
c_count = c_uint()
gpuid_list = c_uint * dcgm_structs.DCGM_MAX_NUM_DEVICES
c_gpuid_list = gpuid_list()
fn = dcgmFP("dcgmGetAllSupportedDevices")
ret = fn(dcgm_handle, c_gpuid_list, byref(c_count))
dcgm_structs._dcgmCheckReturn(ret)
return list(c_gpuid_list[0:int(c_count.value)])
@ensure_byte_strings()
def dcgmGetAllDevices(dcgm_handle):
c_count = c_uint()
gpuid_list = c_uint * dcgm_structs.DCGM_MAX_NUM_DEVICES
c_gpuid_list = gpuid_list()
fn = dcgmFP("dcgmGetAllDevices")
ret = fn(dcgm_handle, c_gpuid_list, byref(c_count))
dcgm_structs._dcgmCheckReturn(ret)
return list(c_gpuid_list[0:int(c_count.value)])
@ensure_byte_strings()
def dcgmGetDeviceAttributes(dcgm_handle, gpuId, version=dcgm_structs.dcgmDeviceAttributes_version3):
fn = dcgmFP("dcgmGetDeviceAttributes")
if version == dcgm_structs.dcgmDeviceAttributes_version3:
device_values = dcgm_structs.c_dcgmDeviceAttributes_v3()
device_values.version = dcgm_structs.dcgmDeviceAttributes_version3
else:
dcgm_structs._dcgmCheckReturn(dcgm_structs.DCGM_ST_VER_MISMATCH)
ret = fn(dcgm_handle, c_int(gpuId), byref(device_values))
dcgm_structs._dcgmCheckReturn(ret)
return device_values
@ensure_byte_strings()
def dcgmGetEntityGroupEntities(dcgm_handle, entityGroup, flags):
capacity = dcgm_structs.DCGM_GROUP_MAX_ENTITIES
c_count = c_int32(capacity)
entityIds = c_uint32 * capacity
c_entityIds = entityIds()
fn = dcgmFP("dcgmGetEntityGroupEntities")
ret = fn(dcgm_handle, entityGroup, c_entityIds, byref(c_count), flags)
dcgm_structs._dcgmCheckReturn(ret)
return c_entityIds[0:int(c_count.value)]
@ensure_byte_strings()
def dcgmGetNvLinkLinkStatus(dcgm_handle):
linkStatus = dcgm_structs.c_dcgmNvLinkStatus_v3()
linkStatus.version = dcgm_structs.dcgmNvLinkStatus_version3
fn = dcgmFP("dcgmGetNvLinkLinkStatus")
ret = fn(dcgm_handle, byref(linkStatus))
dcgm_structs._dcgmCheckReturn(ret)
return linkStatus
@ensure_byte_strings()
def dcgmGetGpuInstanceHierarchy(dcgm_handle):
hierarchy = dcgm_structs.c_dcgmMigHierarchy_v2()
hierarchy.version = dcgm_structs.c_dcgmMigHierarchy_version2
fn = dcgmFP("dcgmGetGpuInstanceHierarchy")
ret = fn(dcgm_handle, byref(hierarchy))
dcgm_structs._dcgmCheckReturn(ret)
return hierarchy
@ensure_byte_strings()
def dcgmCreateMigEntity(dcgm_handle, parentId, profile, createOption, flags):
fn = dcgmFP("dcgmCreateMigEntity")
cme = dcgm_structs.c_dcgmCreateMigEntity_v1()
cme.version = dcgm_structs.c_dcgmCreateMigEntity_version1
cme.parentId = parentId
cme.createOption = createOption
cme.profile = profile
cme.flags = flags
ret = fn(dcgm_handle, byref(cme))
dcgm_structs._dcgmCheckReturn(ret)
@ensure_byte_strings()
def dcgmDeleteMigEntity(dcgm_handle, entityGroupId, entityId, flags):
fn = dcgmFP("dcgmDeleteMigEntity")
dme = dcgm_structs.c_dcgmDeleteMigEntity_v1()
dme.version = dcgm_structs.c_dcgmDeleteMigEntity_version1
dme.entityGroupId = entityGroupId
dme.entityId = entityId
dme.flags = flags
ret = fn(dcgm_handle, byref(dme))
dcgm_structs._dcgmCheckReturn(ret)
@ensure_byte_strings()
def dcgmGroupCreate(dcgm_handle, type, groupName):
c_group_id = c_void_p()
fn = dcgmFP("dcgmGroupCreate")
ret = fn(dcgm_handle, type, groupName, byref(c_group_id))
dcgm_structs._dcgmCheckReturn(ret)
return c_group_id
@ensure_byte_strings()
def dcgmGroupDestroy(dcgm_handle, group_id):
fn = dcgmFP("dcgmGroupDestroy")
ret = fn(dcgm_handle, group_id)
dcgm_structs._dcgmCheckReturn(ret)
return ret
@ensure_byte_strings()
def dcgmGroupAddDevice(dcgm_handle, group_id, gpu_id):
fn = dcgmFP("dcgmGroupAddDevice")
ret = fn(dcgm_handle, group_id, gpu_id)
dcgm_structs._dcgmCheckReturn(ret)
return ret
@ensure_byte_strings()
def dcgmGroupAddEntity(dcgm_handle, group_id, entityGroupId, entityId):
fn = dcgmFP("dcgmGroupAddEntity")
ret = fn(dcgm_handle, group_id, entityGroupId, entityId)
dcgm_structs._dcgmCheckReturn(ret)
return ret
@ensure_byte_strings()
def dcgmGroupRemoveDevice(dcgm_handle, group_id, gpu_id):
fn = dcgmFP("dcgmGroupRemoveDevice")
ret = fn(dcgm_handle, group_id, gpu_id)
dcgm_structs._dcgmCheckReturn(ret)
return ret
@ensure_byte_strings()
def dcgmGroupRemoveEntity(dcgm_handle, group_id, entityGroupId, entityId):
fn = dcgmFP("dcgmGroupRemoveEntity")
ret = fn(dcgm_handle, group_id, entityGroupId, entityId)
dcgm_structs._dcgmCheckReturn(ret)
return ret
@ensure_byte_strings()
def dcgmGroupGetInfo(dcgm_handle, group_id, version=dcgm_structs.c_dcgmGroupInfo_version2):
fn = dcgmFP("dcgmGroupGetInfo")
#support the old version of the request since the host engine does
if version == dcgm_structs.c_dcgmGroupInfo_version2:
device_values = dcgm_structs.c_dcgmGroupInfo_v2()
device_values.version = dcgm_structs.c_dcgmGroupInfo_version2
else:
dcgm_structs._dcgmCheckReturn(dcgm_structs.DCGM_ST_VER_MISMATCH)
ret = fn(dcgm_handle, group_id, byref(device_values))
dcgm_structs._dcgmCheckReturn(ret)
return device_values
@ensure_byte_strings()
def dcgmGroupGetAllIds(dcgmHandle):
fn = dcgmFP("dcgmGroupGetAllIds")
c_count = c_uint()
groupIdList = c_void_p * dcgm_structs.DCGM_MAX_NUM_GROUPS
c_groupIdList = groupIdList()
ret = fn(dcgmHandle, c_groupIdList, byref(c_count))
dcgm_structs._dcgmCheckReturn(ret)
return list(c_groupIdList[0:int(c_count.value)])
@ensure_byte_strings()
def dcgmFieldGroupCreate(dcgm_handle, fieldIds, fieldGroupName):
c_field_group_id = c_void_p()
c_num_field_ids = c_int32(len(fieldIds))
c_field_ids = (c_uint16 * len(fieldIds))(*fieldIds)
fn = dcgmFP("dcgmFieldGroupCreate")
ret = fn(dcgm_handle, c_num_field_ids, byref(c_field_ids), fieldGroupName, byref(c_field_group_id))
dcgm_structs._dcgmCheckReturn(ret)
return c_field_group_id
@ensure_byte_strings()
def dcgmFieldGroupDestroy(dcgm_handle, fieldGroupId):
fn = dcgmFP("dcgmFieldGroupDestroy")
ret = fn(dcgm_handle, fieldGroupId)
dcgm_structs._dcgmCheckReturn(ret)
return ret
@ensure_byte_strings()
def dcgmFieldGroupGetInfo(dcgm_handle, fieldGroupId):
c_fieldGroupInfo = dcgm_structs.c_dcgmFieldGroupInfo_v1()
c_fieldGroupInfo.version = dcgm_structs.dcgmFieldGroupInfo_version1
c_fieldGroupInfo.fieldGroupId = fieldGroupId
fn = dcgmFP("dcgmFieldGroupGetInfo")
ret = fn(dcgm_handle, byref(c_fieldGroupInfo))
dcgm_structs._dcgmCheckReturn(ret)
return c_fieldGroupInfo
@ensure_byte_strings()
def dcgmFieldGroupGetAll(dcgm_handle):
c_allGroupInfo = dcgm_structs.c_dcgmAllFieldGroup_v1()
c_allGroupInfo.version = dcgm_structs.dcgmAllFieldGroup_version1
fn = dcgmFP("dcgmFieldGroupGetAll")
ret = fn(dcgm_handle, byref(c_allGroupInfo))
dcgm_structs._dcgmCheckReturn(ret)
return c_allGroupInfo
@ensure_byte_strings()
def dcgmStatusCreate():
c_status_handle = c_void_p()
fn = dcgmFP("dcgmStatusCreate")
ret = fn(byref(c_status_handle))
dcgm_structs._dcgmCheckReturn(ret)
return c_status_handle
@ensure_byte_strings()
def dcgmStatusDestroy(status_handle):
fn = dcgmFP("dcgmStatusDestroy")
ret = fn(status_handle)
dcgm_structs._dcgmCheckReturn(ret)
return ret
@ensure_byte_strings()
def dcgmStatusGetCount(status_handle):
c_count = c_uint()
fn = dcgmFP("dcgmStatusGetCount")
ret = fn(status_handle, byref(c_count))
dcgm_structs._dcgmCheckReturn(ret)
return c_count.value
@ensure_byte_strings()
def dcgmStatusPopError(status_handle):
c_errorInfo = dcgm_structs.c_dcgmErrorInfo_v1()
fn = dcgmFP("dcgmStatusPopError")
ret = fn(status_handle, byref(c_errorInfo))
if ret == dcgm_structs.DCGM_ST_OK:
return c_errorInfo
else:
return None
return c_errorInfo
@ensure_byte_strings()
def dcgmStatusClear(status_handle):
fn = dcgmFP("dcgmStatusClear")
ret = fn(status_handle)
dcgm_structs._dcgmCheckReturn(ret)
return ret
@ensure_byte_strings()
def dcgmConfigSet(dcgm_handle, group_id, configToSet, status_handle):
fn = dcgmFP("dcgmConfigSet")
configToSet.version = dcgm_structs.dcgmDeviceConfig_version1
ret = fn(dcgm_handle, group_id, byref(configToSet), status_handle)
dcgm_structs._dcgmCheckReturn(ret)
return ret
@ensure_byte_strings()
def dcgmConfigGet(dcgm_handle, group_id, reqCfgType, count, status_handle):
fn = dcgmFP("dcgmConfigGet")
config_values_array = count * dcgm_structs.c_dcgmDeviceConfig_v1
c_config_values = config_values_array()
for index in range(0, count):
c_config_values[index].version = dcgm_structs.dcgmDeviceConfig_version1
ret = fn(dcgm_handle, group_id, reqCfgType, count, c_config_values, status_handle)
dcgm_structs._dcgmCheckReturn(ret)
return list(c_config_values[0:count])
@ensure_byte_strings()
def dcgmConfigEnforce(dcgm_handle, group_id, status_handle):
fn = dcgmFP("dcgmConfigEnforce")
ret = fn(dcgm_handle, group_id, status_handle)
dcgm_structs._dcgmCheckReturn(ret)
return ret
# This method is used to tell the cache manager to update all fields
@ensure_byte_strings()
def dcgmUpdateAllFields(dcgm_handle, waitForUpdate):
fn = dcgmFP("dcgmUpdateAllFields")
ret = fn(dcgm_handle, c_int(waitForUpdate))
dcgm_structs._dcgmCheckReturn(ret)
return ret
# This method is used to get the policy information
@ensure_byte_strings()
def dcgmPolicyGet(dcgm_handle, group_id, count, status_handle):
fn = dcgmFP("dcgmPolicyGet")
policy_array = count * dcgm_structs.c_dcgmPolicy_v1
c_policy_values = policy_array()
for index in range(0, count):
c_policy_values[index].version = dcgm_structs.dcgmPolicy_version1
ret = fn(dcgm_handle, group_id, count, c_policy_values, status_handle)
dcgm_structs._dcgmCheckReturn(ret)
return c_policy_values[0:count]
# This method is used to set the policy information
@ensure_byte_strings()
def dcgmPolicySet(dcgm_handle, group_id, policy, status_handle):
fn = dcgmFP("dcgmPolicySet")
ret = fn(dcgm_handle, group_id, byref(policy), status_handle)
dcgm_structs._dcgmCheckReturn(ret)
return ret
#First parameter below is the return type
dcgmFieldValueEnumeration_f = CFUNCTYPE(c_int32, c_uint32, POINTER(dcgm_structs.c_dcgmFieldValue_v1), c_int32, c_void_p)
dcgmFieldValueEntityEnumeration_f = CFUNCTYPE(c_int32, c_uint32, c_uint32, POINTER(dcgm_structs.c_dcgmFieldValue_v1), c_int32, c_void_p)
@ensure_byte_strings()
def dcgmGetValuesSince(dcgm_handle, groupId, fieldGroupId, sinceTimestamp, enumCB, userData):
fn = dcgmFP("dcgmGetValuesSince")
c_nextSinceTimestamp = c_int64()
ret = fn(dcgm_handle, groupId, fieldGroupId, c_int64(sinceTimestamp), byref(c_nextSinceTimestamp), enumCB, py_object(userData))
dcgm_structs._dcgmCheckReturn(ret)
return c_nextSinceTimestamp.value
@ensure_byte_strings()
def dcgmGetValuesSince_v2(dcgm_handle, groupId, fieldGroupId, sinceTimestamp, enumCB, userData):
fn = dcgmFP("dcgmGetValuesSince_v2")
c_nextSinceTimestamp = c_int64()
ret = fn(dcgm_handle, groupId, fieldGroupId, c_int64(sinceTimestamp), byref(c_nextSinceTimestamp), enumCB, py_object(userData))
dcgm_structs._dcgmCheckReturn(ret)
return c_nextSinceTimestamp.value
@ensure_byte_strings()
def dcgmGetLatestValues(dcgm_handle, groupId, fieldGroupId, enumCB, userData):
fn = dcgmFP("dcgmGetLatestValues")
ret = fn(dcgm_handle, groupId, fieldGroupId, enumCB, py_object(userData))
dcgm_structs._dcgmCheckReturn(ret)
return ret
@ensure_byte_strings()
def dcgmGetLatestValues_v2(dcgm_handle, groupId, fieldGroupId, enumCB, userData):
fn = dcgmFP("dcgmGetLatestValues_v2")
ret = fn(dcgm_handle, groupId, fieldGroupId, enumCB, py_object(userData))
dcgm_structs._dcgmCheckReturn(ret)
return ret
@ensure_byte_strings()
def dcgmWatchFields(dcgm_handle, groupId, fieldGroupId, updateFreq, maxKeepAge, maxKeepSamples):
fn = dcgmFP("dcgmWatchFields")
ret = fn(dcgm_handle, groupId, fieldGroupId, c_int64(updateFreq), c_double(maxKeepAge), c_int32(maxKeepSamples))
dcgm_structs._dcgmCheckReturn(ret)
return ret
@ensure_byte_strings()
def dcgmUnwatchFields(dcgm_handle, groupId, fieldGroupId):
fn = dcgmFP("dcgmUnwatchFields")
ret = fn(dcgm_handle, groupId, fieldGroupId)
dcgm_structs._dcgmCheckReturn(ret)
return ret
@ensure_byte_strings()
def dcgmHealthSet(dcgm_handle, groupId, systems):
fn = dcgmFP("dcgmHealthSet")
ret = fn(dcgm_handle, groupId, systems)
dcgm_structs._dcgmCheckReturn(ret)
return ret
@ensure_byte_strings()
def dcgmHealthSet_v2(dcgm_handle, groupId, systems, updateInterval, maxKeepAge):
params = dcgm_structs.c_dcgmHealthSetParams_v2()
params.version = dcgm_structs.dcgmHealthSetParams_version2
params.groupId = groupId
params.systems = systems
params.updateInterval = updateInterval
params.maxKeepAge = maxKeepAge
fn = dcgmFP("dcgmHealthSet_v2")
ret = fn(dcgm_handle, byref(params))
dcgm_structs._dcgmCheckReturn(ret)
return ret
@ensure_byte_strings()
def dcgmHealthGet(dcgm_handle, groupId):
c_systems = c_int32()
fn = dcgmFP("dcgmHealthGet")
ret = fn(dcgm_handle, groupId, byref(c_systems))
dcgm_structs._dcgmCheckReturn(ret)
return c_systems.value
@ensure_byte_strings()
def dcgmHealthCheck(dcgm_handle, groupId, version=dcgm_structs.dcgmHealthResponse_version4):
if version != dcgm_structs.dcgmHealthResponse_version4:
dcgm_structs._dcgmCheckReturn(dcgm_structs.DCGM_ST_VER_MISMATCH)
c_results = dcgm_structs.c_dcgmHealthResponse_v4()
c_results.version = dcgm_structs.dcgmHealthResponse_version4
fn = dcgmFP("dcgmHealthCheck")
ret = fn(dcgm_handle, groupId, byref(c_results))
dcgm_structs._dcgmCheckReturn(ret)
return c_results
@ensure_byte_strings()
def dcgmPolicyRegister(dcgm_handle, groupId, condition, beginCallback, finishCallback):
fn = dcgmFP("dcgmPolicyRegister")
ret = fn(dcgm_handle, groupId, condition, beginCallback, finishCallback)
dcgm_structs._dcgmCheckReturn(ret)
return ret
@ensure_byte_strings()
def dcgmPolicyUnregister(dcgm_handle, groupId, condition):
fn = dcgmFP("dcgmPolicyUnregister")
ret = fn(dcgm_handle, groupId, condition)
dcgm_structs._dcgmCheckReturn(ret)
return ret
@ensure_byte_strings()
def dcgmPolicyTrigger(dcgm_handle):
fn = dcgmFP("dcgmPolicyTrigger")
ret = fn(dcgm_handle)
dcgm_structs._dcgmCheckReturn(ret)
return ret
def helperDiagCheckReturn(ret, response):
try:
dcgm_structs._dcgmCheckReturn(ret)
except dcgm_structs.DCGMError as e:
if response.systemError.msg != "":
# Add systemError information to the raised exception.
import sys
info = "%s" % response.systemError.msg
e.SetAdditionalInfo(info)
raise e
else:
raise
return response
@ensure_byte_strings()
def dcgmActionValidate_v2(dcgm_handle, runDiagInfo, runDiagVersion=dcgm_structs.dcgmRunDiag_version7):
response = dcgm_structs.c_dcgmDiagResponse_v8()
runDiagInfo.version = runDiagVersion
response.version = dcgm_structs.dcgmDiagResponse_version8
fn = dcgmFP("dcgmActionValidate_v2")
ret = fn(dcgm_handle, byref(runDiagInfo), byref(response))
return helperDiagCheckReturn(ret, response)
@ensure_byte_strings()
def dcgmActionValidate(dcgm_handle, group_id, validate):
response = dcgm_structs.c_dcgmDiagResponse_v8()
response.version = dcgm_structs.dcgmDiagResponse_version8
# Put the group_id and validate into a dcgmRunDiag struct
runDiagInfo = dcgm_structs.c_dcgmRunDiag_v7()
runDiagInfo.version = dcgm_structs.dcgmRunDiag_version7
runDiagInfo.validate = validate
runDiagInfo.groupId = group_id
fn = dcgmFP("dcgmActionValidate_v2")
ret = fn(dcgm_handle, byref(runDiagInfo), byref(response))
return helperDiagCheckReturn(ret, response)
@ensure_byte_strings()
def dcgmRunDiagnostic(dcgm_handle, group_id, diagLevel):
response = dcgm_structs.c_dcgmDiagResponse_v8()
response.version = dcgm_structs.dcgmDiagResponse_version8
fn = dcgmFP("dcgmRunDiagnostic")
ret = fn(dcgm_handle, group_id, diagLevel, byref(response))
return helperDiagCheckReturn(ret, response)
@ensure_byte_strings()
def dcgmWatchPidFields(dcgm_handle, groupId, updateFreq, maxKeepAge, maxKeepSamples):
fn = dcgmFP("dcgmWatchPidFields")
ret = fn(dcgm_handle, groupId, c_int64(updateFreq), c_double(maxKeepAge), c_int32(maxKeepSamples))
dcgm_structs._dcgmCheckReturn(ret)
return ret
@ensure_byte_strings()
def dcgmGetPidInfo(dcgm_handle, groupId, pid):
fn = dcgmFP("dcgmGetPidInfo")
pidInfo = dcgm_structs.c_dcgmPidInfo_v2()
pidInfo.version = dcgm_structs.dcgmPidInfo_version2
pidInfo.pid = pid
ret = fn(dcgm_handle, groupId, byref(pidInfo))
dcgm_structs._dcgmCheckReturn(ret)
return pidInfo
@ensure_byte_strings()
def dcgmGetDeviceTopology(dcgm_handle, gpuId):
devtopo = dcgm_structs.c_dcgmDeviceTopology_v1()
fn = dcgmFP("dcgmGetDeviceTopology")
ret = fn(dcgm_handle, gpuId, byref(devtopo))
dcgm_structs._dcgmCheckReturn(ret)
return devtopo
@ensure_byte_strings()
def dcgmGetGroupTopology(dcgm_handle, groupId):
grouptopo = dcgm_structs.c_dcgmGroupTopology_v1()
fn = dcgmFP("dcgmGetGroupTopology")
ret = fn(dcgm_handle, groupId, byref(grouptopo))
dcgm_structs._dcgmCheckReturn(ret)
return grouptopo
@ensure_byte_strings()
def dcgmWatchJobFields(dcgm_handle, groupId, updateFreq, maxKeepAge, maxKeepSamples):
fn = dcgmFP("dcgmWatchJobFields")
ret = fn(dcgm_handle, groupId, c_int64(updateFreq), c_double(maxKeepAge), c_int32(maxKeepSamples))
dcgm_structs._dcgmCheckReturn(ret)
return ret
@ensure_byte_strings()
def dcgmJobStartStats(dcgm_handle, groupId, jobid):
fn = dcgmFP("dcgmJobStartStats")
ret = fn(dcgm_handle, groupId, jobid)
dcgm_structs._dcgmCheckReturn(ret)
return ret
@ensure_byte_strings()
def dcgmJobStopStats(dcgm_handle, jobid):
fn = dcgmFP("dcgmJobStopStats")
ret = fn(dcgm_handle, jobid)
dcgm_structs._dcgmCheckReturn(ret)
return ret
@ensure_byte_strings()
def dcgmJobGetStats(dcgm_handle, jobid):
fn = dcgmFP("dcgmJobGetStats")
jobInfo = dcgm_structs.c_dcgmJobInfo_v3()
jobInfo.version = dcgm_structs.dcgmJobInfo_version3
ret = fn(dcgm_handle, jobid, byref(jobInfo))
dcgm_structs._dcgmCheckReturn(ret)
return jobInfo
@ensure_byte_strings()
def dcgmJobRemove(dcgm_handle, jobid):
fn = dcgmFP("dcgmJobRemove")
ret = fn(dcgm_handle, jobid)
dcgm_structs._dcgmCheckReturn(ret)
return ret
@ensure_byte_strings()
def dcgmJobRemoveAll(dcgm_handle):
fn = dcgmFP("dcgmJobRemoveAll")
ret = fn(dcgm_handle)
dcgm_structs._dcgmCheckReturn(ret)
return ret
@ensure_byte_strings()
def dcgmIntrospectGetHostengineMemoryUsage(dcgm_handle, waitIfNoData=True):
fn = dcgmFP("dcgmIntrospectGetHostengineMemoryUsage")
memInfo = dcgm_structs.c_dcgmIntrospectMemory_v1()
memInfo.version = dcgm_structs.dcgmIntrospectMemory_version1
ret = fn(dcgm_handle, byref(memInfo), waitIfNoData)
dcgm_structs._dcgmCheckReturn(ret)
return memInfo
@ensure_byte_strings()
def dcgmIntrospectGetHostengineCpuUtilization(dcgm_handle, waitIfNoData=True):
fn = dcgmFP("dcgmIntrospectGetHostengineCpuUtilization")
cpuUtil = dcgm_structs.c_dcgmIntrospectCpuUtil_v1()
cpuUtil.version = dcgm_structs.dcgmIntrospectCpuUtil_version1
ret = fn(dcgm_handle, byref(cpuUtil), waitIfNoData)
dcgm_structs._dcgmCheckReturn(ret)
return cpuUtil
@ensure_byte_strings()
def dcgmEntityGetLatestValues(dcgmHandle, entityGroup, entityId, fieldIds):
fn = dcgmFP("dcgmEntityGetLatestValues")
field_values = (dcgm_structs.c_dcgmFieldValue_v1 * len(fieldIds))()
id_values = (c_uint16 * len(fieldIds))(*fieldIds)
ret = fn(dcgmHandle, c_uint(entityGroup), dcgm_fields.c_dcgm_field_eid_t(entityId), id_values, c_uint(len(fieldIds)), field_values)
dcgm_structs._dcgmCheckReturn(ret)
return field_values
@ensure_byte_strings()
def dcgmEntitiesGetLatestValues(dcgmHandle, entities, fieldIds, flags):
fn = dcgmFP("dcgmEntitiesGetLatestValues")
numFvs = len(fieldIds) * len(entities)
field_values = (dcgm_structs.c_dcgmFieldValue_v2 * numFvs)()
entities_values = (dcgm_structs.c_dcgmGroupEntityPair_t * len(entities))(*entities)
field_id_values = (c_uint16 * len(fieldIds))(*fieldIds)
ret = fn(dcgmHandle, entities_values, c_uint(len(entities)), field_id_values, c_uint(len(fieldIds)), flags, field_values)
dcgm_structs._dcgmCheckReturn(ret)
return field_values
@ensure_byte_strings()
def dcgmSelectGpusByTopology(dcgmHandle, inputGpuIds, numGpus, hintFlags):
fn = dcgmFP("dcgmSelectGpusByTopology")
outputGpuIds = c_int64()
ret = fn(dcgmHandle, c_uint64(inputGpuIds), c_uint32(numGpus), byref(outputGpuIds), c_uint64(hintFlags))
dcgm_structs._dcgmCheckReturn(ret)
return outputGpuIds
@ensure_byte_strings()
def dcgmGetFieldSummary(dcgmHandle, fieldId, entityGroupType, entityId, summaryMask, startTime, endTime):
fn = dcgmFP("dcgmGetFieldSummary")
request = dcgm_structs.c_dcgmFieldSummaryRequest_v1()
request.version = dcgm_structs.dcgmFieldSummaryRequest_version1
request.fieldId = fieldId
request.entityGroupType =entityGroupType
request.entityId = entityId
request.summaryTypeMask = summaryMask
request.startTime = startTime
request.endTime = endTime
ret = fn(dcgmHandle, byref(request))
dcgm_structs._dcgmCheckReturn(ret)
return request
@ensure_byte_strings()
def dcgmModuleDenylist(dcgmHandle, moduleId):
fn = dcgmFP("dcgmModuleDenylist")
ret = fn(dcgmHandle, c_uint32(moduleId))
dcgm_structs._dcgmCheckReturn(ret)
return ret
@ensure_byte_strings()
def dcgmModuleGetStatuses(dcgmHandle):
moduleStatuses = dcgm_structs.c_dcgmModuleGetStatuses_v1()
moduleStatuses.version = dcgm_structs.dcgmModuleGetStatuses_version1
fn = dcgmFP("dcgmModuleGetStatuses")
ret = fn(dcgmHandle, byref(moduleStatuses))
dcgm_structs._dcgmCheckReturn(ret)
return moduleStatuses
@ensure_byte_strings()
def dcgmProfGetSupportedMetricGroups(dcgmHandle, gpuId):
msg = dcgm_structs.c_dcgmProfGetMetricGroups_v3()
msg.version = dcgm_structs.dcgmProfGetMetricGroups_version3
msg.gpuId = gpuId
fn = dcgmFP("dcgmProfGetSupportedMetricGroups")
ret = fn(dcgmHandle, byref(msg))
dcgm_structs._dcgmCheckReturn(ret)
return msg
@ensure_byte_strings()
def dcgmProfPause(dcgmHandle):
fn = dcgmFP("dcgmProfPause")
ret = fn(dcgmHandle)
dcgm_structs._dcgmCheckReturn(ret)
return ret
@ensure_byte_strings()
def dcgmProfResume(dcgmHandle):
fn = dcgmFP("dcgmProfResume")
ret = fn(dcgmHandle)
dcgm_structs._dcgmCheckReturn(ret)
return ret
@ensure_byte_strings()
def dcgmVersionInfo():
msg = dcgm_structs.c_dcgmVersionInfo_v2()
msg.version = dcgm_structs.dcgmVersionInfo_version2
fn = dcgmFP("dcgmVersionInfo")
ret = fn(byref(msg))
dcgm_structs._dcgmCheckReturn(ret)
return msg
@ensure_byte_strings()
def dcgmHostengineIsHealthy(dcgmHandle):
heHealth = dcgm_structs.c_dcgmHostengineHealth_v1()
heHealth.version = dcgm_structs.dcgmHostengineHealth_version1
fn = dcgmFP("dcgmHostengineIsHealthy")
ret = fn(dcgmHandle, byref(heHealth))
dcgm_structs._dcgmCheckReturn(ret)
return heHealth
|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
# Python bindings for "dcgm_structs.h"
##
from ctypes import *
from ctypes.util import find_library
import sys
import os
import threading
import string
import json
from . import dcgm_value as dcgmvalue
import platform
from inspect import isclass
DCGM_MAX_STR_LENGTH = 256
DCGM_MAX_NUM_DEVICES = 32 # DCGM 2.0 and newer = 32. DCGM 1.8 and older = 16
DCGM_MAX_NUM_SWITCHES = 12
DCGM_NVLINK_MAX_LINKS_PER_GPU = 18
DCGM_NVLINK_MAX_LINKS_PER_GPU_LEGACY1 = 6
DCGM_NVLINK_MAX_LINKS_PER_GPU_LEGACY2 = 12
DCGM_NVLINK_MAX_LINKS_PER_NVSWITCH_V1 = 36 # Max NvLinks per NvSwitch pre-Hopper
DCGM_NVLINK_MAX_LINKS_PER_NVSWITCH = 64
DCGM_LANE_MAX_LANES_PER_NVSWICH_LINK = 4
DCGM_MAX_CLOCKS = 256
DCGM_MAX_NUM_GROUPS = 64
DCGM_MAX_BLOB_LENGTH = 4096
DCGM_MAX_VGPU_INSTANCES_PER_PGPU = 32
DCGM_VGPU_NAME_BUFFER_SIZE = 64
DCGM_GRID_LICENSE_BUFFER_SIZE = 128
DCGM_MAX_VGPU_TYPES_PER_PGPU = 32
DCGM_DEVICE_UUID_BUFFER_SIZE = 80
DCGM_MAX_FBC_SESSIONS = 256
#When more than one value is returned from a query, which order should it be returned in?
DCGM_ORDER_ASCENDING = 1
DCGM_ORDER_DESCENDING = 2
DCGM_OPERATION_MODE_AUTO = 1
DCGM_OPERATION_MODE_MANUAL = 2
DCGM_ENCODER_QUERY_H264 = 0
DCGM_ENCODER_QUERY_HEVC = 1
DCGM_FBC_SESSION_TYPE_UNKNOWN = 0 # Unknown
DCGM_FBC_SESSION_TYPE_TOSYS = 1 # FB capture for a system buffer
DCGM_FBC_SESSION_TYPE_CUDA = 2 # FB capture for a cuda buffer
DCGM_FBC_SESSION_TYPE_VID = 3 # FB capture for a Vid buffer
DCGM_FBC_SESSION_TYPE_HWENC = 4 # FB capture for a NVENC HW buffer
## C Type mappings ##
## Enums
# Return types
_dcgmReturn_t = c_uint
DCGM_ST_OK = 0 # Success
DCGM_ST_BADPARAM = -1 # A bad parameter was passed to a function
DCGM_ST_GENERIC_ERROR = -3 # A generic, unspecified error
DCGM_ST_MEMORY = -4 # An out of memory error occured
DCGM_ST_NOT_CONFIGURED = -5 # Setting not configured
DCGM_ST_NOT_SUPPORTED = -6 # Feature not supported
DCGM_ST_INIT_ERROR = -7 # DCGM Init error
DCGM_ST_NVML_ERROR = -8 # When NVML returns error.
DCGM_ST_PENDING = -9 # Object is in pending state of something else
DCGM_ST_UNINITIALIZED = -10 # Object is in undefined state
DCGM_ST_TIMEOUT = -11 # Requested operation timed out
DCGM_ST_VER_MISMATCH = -12 # Version mismatch between received and understood API
DCGM_ST_UNKNOWN_FIELD = -13 # Unknown field id
DCGM_ST_NO_DATA = -14 # No data is available
DCGM_ST_STALE_DATA = -15
DCGM_ST_NOT_WATCHED = -16 # The given field is not being updated by the cache manager
DCGM_ST_NO_PERMISSION = -17 # We are not permissioned to perform the desired action
DCGM_ST_GPU_IS_LOST = -18 # GPU is no longer reachable
DCGM_ST_RESET_REQUIRED = -19 # GPU requires a reset
DCGM_ST_FUNCTION_NOT_FOUND = -20 # Unable to find function
DCGM_ST_CONNECTION_NOT_VALID = -21 # Connection to the host engine is not valid any longer
DCGM_ST_GPU_NOT_SUPPORTED = -22 # This GPU is not supported by DCGM
DCGM_ST_GROUP_INCOMPATIBLE = -23 # The GPUs of the provided group are not compatible with each other for the requested operation
DCGM_ST_MAX_LIMIT = -24
DCGM_ST_LIBRARY_NOT_FOUND = -25 # DCGM library could not be found
DCGM_ST_DUPLICATE_KEY = -26 #Duplicate key passed to the function
DCGM_ST_GPU_IN_SYNC_BOOST_GROUP = -27 #GPU is already a part of a sync boost group
DCGM_ST_GPU_NOT_IN_SYNC_BOOST_GROUP = -28 #GPU is a not a part of sync boost group
DCGM_ST_REQUIRES_ROOT = -29 #This operation cannot be performed when the host engine is running as non-root
DCGM_ST_NVVS_ERROR = -30 #DCGM GPU Diagnostic was successfully executed, but reported an error.
DCGM_ST_INSUFFICIENT_SIZE = -31 #An input argument is not large enough
DCGM_ST_FIELD_UNSUPPORTED_BY_API = -32 #The given field ID is not supported by the API being called
DCGM_ST_MODULE_NOT_LOADED = -33 #This request is serviced by a module of DCGM that is not currently loaded
DCGM_ST_IN_USE = -34 #The requested operation could not be completed because the affected resource is in use
DCGM_ST_GROUP_IS_EMPTY = -35 # The specified group is empty and this operation is not valid with an empty group
DCGM_ST_PROFILING_NOT_SUPPORTED = -36 # Profiling is not supported for this group of GPUs or GPU
DCGM_ST_PROFILING_LIBRARY_ERROR = -37 # The third-party Profiling module returned an unrecoverable error
DCGM_ST_PROFILING_MULTI_PASS = -38 # The requested profiling metrics cannot be collected in a single pass
DCGM_ST_DIAG_ALREADY_RUNNING = -39 # A diag instance is already running, cannot run a new diag until the current one finishes.
DCGM_ST_DIAG_BAD_JSON = -40 # The DCGM GPU Diagnostic returned JSON that cannot be parsed
DCGM_ST_DIAG_BAD_LAUNCH = -41 # Error while launching the DCGM GPU Diagnostic
DCGM_ST_DIAG_UNUSED = -42 # Unused
DCGM_ST_DIAG_THRESHOLD_EXCEEDED = -43 # A field value met or exceeded the error threshold.
DCGM_ST_INSUFFICIENT_DRIVER_VERSION = -44 # The installed driver version is insufficient for this API
DCGM_ST_INSTANCE_NOT_FOUND = -45 # The specified GPU instance does not exist
DCGM_ST_COMPUTE_INSTANCE_NOT_FOUND = -46 # The specified GPU compute instance does not exist
DCGM_ST_CHILD_NOT_KILLED = -47 # Couldn't kill a child process within the retries
DCGM_ST_3RD_PARTY_LIBRARY_ERROR = -48 # Detected an error in a 3rd-party library
DCGM_ST_INSUFFICIENT_RESOURCES = -49 # Not enough resources available
DCGM_ST_PLUGIN_EXCEPTION = -50 # Exception thrown from a diagnostic plugin
DCGM_ST_NVVS_ISOLATE_ERROR = -51 # The diagnostic returned an error that indicates the need for isolation
DCGM_GROUP_DEFAULT = 0 # All the GPUs on the node are added to the group
DCGM_GROUP_EMPTY = 1 # Creates an empty group
DCGM_GROUP_DEFAULT_NVSWITCHES = 2 # All NvSwitches of the node are added to the group
DCGM_GROUP_DEFAULT_INSTANCES = 3 # All GPU instances of the node are added to the group
DCGM_GROUP_DEFAULT_COMPUTE_INSTANCES = 4 # All compute instances of the node are added to the group
DCGM_GROUP_DEFAULT_ENTITIES = 5 # All entities are added to this default group
DCGM_GROUP_ALL_GPUS = 0x7fffffff
DCGM_GROUP_ALL_NVSWITCHES = 0x7ffffffe
DCGM_GROUP_ALL_INSTANCES = 0x7ffffffd
DCGM_GROUP_ALL_COMPUTE_INSTANCES = 0x7ffffffc
DCGM_GROUP_ALL_ENTITIES = 0x7ffffffb
DCGM_GROUP_MAX_ENTITIES = 64 #Maximum number of entities per entity group
DCGM_CONFIG_TARGET_STATE = 0 # The target configuration values to be applied
DCGM_CONFIG_CURRENT_STATE = 1 # The current configuration state
DCGM_CONFIG_POWER_CAP_INDIVIDUAL = 0 # Represents the power cap to be applied for each member of the group
DCGM_CONFIG_POWER_BUDGET_GROUP = 1 # Represents the power budget for the entire group
DCGM_CONFIG_COMPUTEMODE_DEFAULT = 0 # Default compute mode -- multiple contexts per device
DCGM_CONFIG_COMPUTEMODE_PROHIBITED = 1 # Compute-prohibited mode -- no contexts per device
DCGM_CONFIG_COMPUTEMODE_EXCLUSIVE_PROCESS = 2 #* Compute-exclusive-process mode -- only one context per device, usable from multiple threads at a time
DCGM_TOPOLOGY_BOARD = 0x1
DCGM_TOPOLOGY_SINGLE = 0x2
DCGM_TOPOLOGY_MULTIPLE = 0x4
DCGM_TOPOLOGY_HOSTBRIDGE = 0x8
DCGM_TOPOLOGY_CPU = 0x10
DCGM_TOPOLOGY_SYSTEM = 0x20
DCGM_TOPOLOGY_NVLINK1 = 0x0100
DCGM_TOPOLOGY_NVLINK2 = 0x0200
DCGM_TOPOLOGY_NVLINK3 = 0x0400
DCGM_TOPOLOGY_NVLINK4 = 0x0800
DCGM_TOPOLOGY_NVLINK5 = 0x1000
DCGM_TOPOLOGY_NVLINK6 = 0x2000
DCGM_TOPOLOGY_NVLINK7 = 0x4000
DCGM_TOPOLOGY_NVLINK8 = 0x8000
DCGM_TOPOLOGY_NVLINK9 = 0x10000
DCGM_TOPOLOGY_NVLINK10 = 0x20000
DCGM_TOPOLOGY_NVLINK11 = 0x40000
DCGM_TOPOLOGY_NVLINK12 = 0x80000
# Diagnostic per gpu tests - fixed indices for dcgmDiagResponsePerGpu_t.results[]
DCGM_MEMORY_INDEX = 0
DCGM_DIAGNOSTIC_INDEX = 1
DCGM_PCI_INDEX = 2
DCGM_SM_STRESS_INDEX = 3
DCGM_TARGETED_STRESS_INDEX = 4
DCGM_TARGETED_POWER_INDEX = 5
DCGM_MEMORY_BANDWIDTH_INDEX = 6
DCGM_MEMTEST_INDEX = 7
DCGM_PULSE_TEST_INDEX = 8
DCGM_UNUSED1_TEST_INDEX = 9
DCGM_UNUSED2_TEST_INDEX = 10
DCGM_UNUSED3_TEST_INDEX = 11
DCGM_UNUSED4_TEST_INDEX = 12
DCGM_UNUSED5_TEST_INDEX = 13
DCGM_PER_GPU_TEST_COUNT_V7 = 9
DCGM_PER_GPU_TEST_COUNT_V8 = 13
# DCGM Diag Level One test indices
DCGM_SWTEST_DENYLIST = 0
DCGM_SWTEST_NVML_LIBRARY = 1
DCGM_SWTEST_CUDA_MAIN_LIBRARY = 2
DCGM_SWTEST_CUDA_RUNTIME_LIBRARY = 3
DCGM_SWTEST_PERMISSIONS = 4
DCGM_SWTEST_PERSISTENCE_MODE = 5
DCGM_SWTEST_ENVIRONMENT = 6
DCGM_SWTEST_PAGE_RETIREMENT = 7
DCGM_SWTEST_GRAPHICS_PROCESSES = 8
DCGM_SWTEST_INFOROM = 9
# This test is only run by itself, so it can use the 0 slot
DCGM_CONTEXT_CREATE_INDEX = 0
class DCGM_INTROSPECT_STATE(object):
DISABLED = 0
ENABLED = 1
# Lib loading
dcgmLib = None
libLoadLock = threading.Lock()
_dcgmLib_refcount = 0 # Incremented on each dcgmInit and decremented on dcgmShutdown
class DCGMError(Exception):
""" Class to return error values for DCGM """
_valClassMapping = dict()
# List of currently known error codes
_error_code_to_string = {
DCGM_ST_OK: "Success",
DCGM_ST_BADPARAM: "Bad parameter passed to function",
DCGM_ST_GENERIC_ERROR: "Generic unspecified error",
DCGM_ST_MEMORY: "Out of memory error",
DCGM_ST_NOT_CONFIGURED: "Setting not configured",
DCGM_ST_NOT_SUPPORTED: "Feature not supported",
DCGM_ST_INIT_ERROR: "DCGM initialization error",
DCGM_ST_NVML_ERROR: "NVML error",
DCGM_ST_PENDING: "Object is in a pending state",
DCGM_ST_UNINITIALIZED: "Object is in an undefined state",
DCGM_ST_TIMEOUT: "Timeout",
DCGM_ST_VER_MISMATCH: "API version mismatch",
DCGM_ST_UNKNOWN_FIELD: "Unknown field",
DCGM_ST_NO_DATA: "No data is available",
DCGM_ST_STALE_DATA: "Data is considered stale",
DCGM_ST_NOT_WATCHED: "Field is not being updated",
DCGM_ST_NO_PERMISSION: "Not permissioned",
DCGM_ST_GPU_IS_LOST: "GPU is unreachable",
DCGM_ST_RESET_REQUIRED: "GPU requires a reset",
DCGM_ST_FUNCTION_NOT_FOUND: "Unable to find function",
DCGM_ST_CONNECTION_NOT_VALID: "The connection to the host engine is not valid any longer",
DCGM_ST_GPU_NOT_SUPPORTED: "This GPU is not supported by DCGM",
DCGM_ST_GROUP_INCOMPATIBLE: "GPUs are incompatible with each other for the requested operation",
DCGM_ST_MAX_LIMIT: "Max limit reached for the object",
DCGM_ST_LIBRARY_NOT_FOUND: "DCGM library could not be found",
DCGM_ST_DUPLICATE_KEY: "Duplicate key passed to function",
DCGM_ST_GPU_IN_SYNC_BOOST_GROUP: "GPU is already a part of a sync boost group",
DCGM_ST_GPU_NOT_IN_SYNC_BOOST_GROUP: "GPU is not a part of the sync boost group",
DCGM_ST_REQUIRES_ROOT: "This operation is not supported when the host engine is running as non root",
DCGM_ST_NVVS_ERROR: "DCGM GPU Diagnostic returned an error.",
DCGM_ST_INSUFFICIENT_SIZE: "An input argument is not large enough",
DCGM_ST_FIELD_UNSUPPORTED_BY_API: "The given field ID is not supported by the API being called",
DCGM_ST_MODULE_NOT_LOADED: "This request is serviced by a module of DCGM that is not currently loaded",
DCGM_ST_IN_USE: "The requested operation could not be completed because the affected resource is in use",
DCGM_ST_GROUP_IS_EMPTY: "The specified group is empty, and this operation is incompatible with an empty group",
DCGM_ST_PROFILING_NOT_SUPPORTED: "Profiling is not supported for this group of GPUs or GPU",
DCGM_ST_PROFILING_LIBRARY_ERROR: "The third-party Profiling module returned an unrecoverable error",
DCGM_ST_PROFILING_MULTI_PASS: "The requested profiling metrics cannot be collected in a single pass",
DCGM_ST_DIAG_ALREADY_RUNNING: "A diag instance is already running, cannot run a new diag until the current one finishes",
DCGM_ST_DIAG_BAD_JSON: "The GPU Diagnostic returned Json that cannot be parsed.",
DCGM_ST_DIAG_BAD_LAUNCH: "Error while launching the GPU Diagnostic.",
DCGM_ST_DIAG_UNUSED: "Unused error code",
DCGM_ST_DIAG_THRESHOLD_EXCEEDED: "A field value met or exceeded the error threshold.",
DCGM_ST_INSUFFICIENT_DRIVER_VERSION: "The installed driver version is insufficient for this API",
DCGM_ST_INSTANCE_NOT_FOUND: "The specified GPU instance does not exist",
DCGM_ST_COMPUTE_INSTANCE_NOT_FOUND: "The specified GPU compute instance does not exist",
DCGM_ST_CHILD_NOT_KILLED: "Couldn't kill a child process within the retries",
DCGM_ST_3RD_PARTY_LIBRARY_ERROR: "Detected an error in a 3rd-party library",
DCGM_ST_INSUFFICIENT_RESOURCES: "Not enough resources available",
DCGM_ST_PLUGIN_EXCEPTION: "Exception thrown from a diagnostic plugin",
DCGM_ST_NVVS_ISOLATE_ERROR: "The diagnostic returned an error that indicates the need for isolation",
}
def __new__(typ, value):
"""
Maps value to a proper subclass of DCGMError.
"""
if typ == DCGMError:
typ = DCGMError._valClassMapping.get(value, typ)
obj = Exception.__new__(typ)
obj.info = None
obj.value = value
return obj
def __str__(self):
msg = None
try:
if self.value not in DCGMError._error_code_to_string:
DCGMError._error_code_to_string[self.value] = str(_dcgmErrorString(self.value))
msg = DCGMError._error_code_to_string[self.value]
# Ensure we catch all exceptions, otherwise the error code will be hidden in a traceback
except BaseException:
msg = "DCGM Error with code %d" % self.value
if self.info is not None:
if msg[-1] == ".":
msg = msg[:-1]
msg += ": '%s'" % self.info
return msg
def __eq__(self, other):
return self.value == other.value
def __hash__(self):
return hash(self.value)
def SetAdditionalInfo(self, msg):
"""
Sets msg as additional information returned by the string representation of DCGMError and subclasses.
Example output for DCGMError_Uninitialized subclass, with msg set to 'more info msg here' is
"DCGMError_Uninitialized: Object is in an undefined state: 'more info msg here'".
Ensure that msg is a string or an object for which the __str__() method does not throw an error
"""
self.info = msg
def dcgmExceptionClass(error_code):
return DCGMError._valClassMapping.get(error_code)
def _extractDCGMErrorsAsClasses():
'''
Generates a hierarchy of classes on top of DCGMLError class.
Each DCGM Error gets a new DCGMError subclass. This way try,except blocks can filter appropriate
exceptions more easily.
DCGMError is a parent class. Each DCGM_ST_* gets it's own subclass.
e.g. DCGM_ST_UNINITIALIZED will be turned into DCGMError_Uninitialized
'''
this_module = sys.modules[__name__]
dcgmErrorsNames = [x for x in dir(this_module) if x.startswith("DCGM_ST_")]
for err_name in dcgmErrorsNames:
# e.g. Turn DCGM_ST_UNINITIALIZED into DCGMError_Uninitialized
class_name = "DCGMError_" + string.capwords(err_name.replace("DCGM_ST_", ""), "_").replace("_", "")
err_val = getattr(this_module, err_name)
def gen_new(val):
def new(typ):
# pylint: disable=E1121
obj = DCGMError.__new__(typ, val)
return obj
return new
new_error_class = type(class_name, (DCGMError,), {'__new__': gen_new(err_val)})
new_error_class.__module__ = __name__
setattr(this_module, class_name, new_error_class)
DCGMError._valClassMapping[err_val] = new_error_class
_extractDCGMErrorsAsClasses()
class struct_c_dcgmUnit_t(Structure):
# Unit structures
pass # opaque handle
_dcgmUnit_t = POINTER(struct_c_dcgmUnit_t)
class _WrappedStructure():
def __init__(self, obj):
self.__dict__["_obj"] = obj
def __getattr__(self, key):
value = getattr(self._obj, key)
if isinstance(value, bytes):
return value.decode('utf-8')
if isclass(value):
return _WrappedStructure(value)
return value
def __getitem__(self, key):
value = self._obj[key]
if isinstance(value, bytes):
return value.decode('utf-8')
if isclass(value):
return _WrappedStructure(value)
return value
def __setattr__(self, key, raw_value):
def find_field_type(fields, key):
field = (f[1] for f in fields if f[0] == key)
try:
return next(field)
except StopIteration:
return None
if (key == '_obj'):
raise RuntimeError("Cannot set _obj")
value = raw_value
fieldtype = find_field_type(self._obj._fields_, key)
if fieldtype == c_uint and not isinstance(value, c_uint32):
value = int(value)
elif fieldtype == c_int and not isinstance(value, c_int32):
value = int(value)
elif isinstance(raw_value, str):
value = raw_value.encode('utf-8')
self._obj[key] = value
return value
class _DcgmStructure(Structure):
def __getattribute__(self, key):
value = super().__getattribute__(key)
if isinstance(value, bytes):
return value.decode('utf-8')
if isclass(value):
return _WrappedStructure(value)
return value
def __setattr__(self, key, raw_value):
def find_field_type(fields, key):
field = (f[1] for f in fields if f[0] == key)
try:
return next(field)
except StopIteration:
return None
value = raw_value
fieldtype = find_field_type(self._fields_, key)
if fieldtype == c_uint and not isinstance(value, c_uint32):
value = int(value)
elif fieldtype == c_int and not isinstance(value, c_int32):
value = int(value)
elif isinstance(raw_value, str):
value = raw_value.encode('utf-8')
return super().__setattr__(key, value)
class DcgmUnion(Union):
def __getattribute__(self, key):
value = super().__getattribute__(key)
if isinstance(value, bytes):
return value.decode('utf-8')
if isclass(value):
return _WrappedStructure(value)
return value
def __setattr__(self, key, raw_value):
def find_field_type(fields, key):
field = (f[1] for f in fields if f[0] == key)
try:
return next(field)
except StopIteration:
return None
value = raw_value
fieldtype = find_field_type(self._fields_, key)
if fieldtype == c_uint and not isinstance(value, c_uint32):
value = int(value)
elif fieldtype == c_int and not isinstance(value, c_int32):
value = int(value)
elif isinstance(raw_value, str):
value = raw_value.encode('utf-8')
return super().__setattr__(key, value)
class _PrintableStructure(_DcgmStructure):
"""
Abstract class that produces nicer __str__ output than ctypes.Structure.
e.g. instead of:
>>> print str(obj)
<class_name object at 0x7fdf82fef9e0>
this class will print
class_name(field_name: formatted_value, field_name: formatted_value)
_fmt_ dictionary of <str _field_ name> -> <str format>
e.g. class that has _field_ 'hex_value', c_uint could be formatted with
_fmt_ = {"hex_value" : "%08X"}
to produce nicer output.
Default fomratting string for all fields can be set with key "<default>" like:
_fmt_ = {"<default>" : "%d MHz"} # e.g all values are numbers in MHz.
If not set it's assumed to be just "%s"
Exact format of returned str from this class is subject to change in the future.
"""
_fmt_ = {}
def __str__(self):
result = []
for x in self._fields_:
key = x[0]
value = getattr(self, key)
fmt = "%s"
if key in self._fmt_:
fmt = self._fmt_[key]
elif "<default>" in self._fmt_:
fmt = self._fmt_["<default>"]
result.append(("%s: " + fmt) % (key, value))
return self.__class__.__name__ + "(" + ", ".join(result) + ")"
def FieldsSizeof(self):
size = 0
for s,t in self._fields_:
size = size + sizeof(t)
return size
#JSON serializer for DCGM structures
class DcgmJSONEncoder(json.JSONEncoder):
def default(self, o): # pylint: disable=method-hidden
if isinstance(o, _PrintableStructure):
retVal = {}
for fieldName, fieldType in o._fields_:
subObj = getattr(o, fieldName)
if isinstance(subObj, _PrintableStructure):
subObj = self.default(subObj)
retVal[fieldName] = subObj
return retVal
elif isinstance(o, Array):
retVal = []
for i in range(len(o)):
subVal = {}
for fieldName, fieldType in o[i]._fields_:
subObj = getattr(o[i], fieldName)
if isinstance(subObj, _PrintableStructure):
subObj = self.default(subObj)
subVal[fieldName] = subObj
retVal.append(subVal)
return retVal
#Let the parent class handle this/fail
return json.JSONEncoder.default(self, o)
# Creates a unique version number for each struct
def make_dcgm_version(struct, ver):
return sizeof(struct) | (ver << 24)
# Function access ##
_dcgmGetFunctionPointer_cache = dict() # function pointers are cached to prevent unnecessary libLoadLock locking
def _dcgmGetFunctionPointer(name):
global dcgmLib
if name in _dcgmGetFunctionPointer_cache:
return _dcgmGetFunctionPointer_cache[name]
libLoadLock.acquire()
try:
# ensure library was loaded
if dcgmLib is None:
raise DCGMError(DCGM_ST_UNINITIALIZED)
try:
_dcgmGetFunctionPointer_cache[name] = getattr(dcgmLib, name)
return _dcgmGetFunctionPointer_cache[name]
except AttributeError:
raise DCGMError(DCGM_ST_FUNCTION_NOT_FOUND)
finally:
# lock is always freed
libLoadLock.release()
# C function wrappers ##
def _LoadDcgmLibrary(libDcgmPath=None):
"""
Load the library if it isn't loaded already
:param libDcgmPath: Optional path to the libdcgm*.so libraries. Will use system defaults if not specified.
:type libDcgmPath: str
:return: None
"""
global dcgmLib
if dcgmLib is None:
# lock to ensure only one caller loads the library
libLoadLock.acquire()
try:
# ensure the library still isn't loaded
if dcgmLib is None:
try:
if sys.platform[:3] == "win":
# cdecl calling convention
# load nvml.dll from %ProgramFiles%/NVIDIA Corporation/NVSMI/nvml.dll
dcgmLib = CDLL(os.path.join(os.getenv("ProgramFiles", "C:/Program Files"), "NVIDIA Corporation/NVSMI/dcgm.dll"))
else:
if libDcgmPath:
lib_file = os.path.join(libDcgmPath, "libdcgm.so.3")
else:
# Try Debian-based distros
lib_file = '/usr/lib/{}-linux-gnu/libdcgm.so.3'.format(platform.machine())
if not os.path.isfile(lib_file):
# Presume Redhat-based distros
lib_file = '/usr/lib64/libdcgm.so.3'
dcgmLib = CDLL(lib_file)
except OSError as ose:
_dcgmCheckReturn(DCGM_ST_LIBRARY_NOT_FOUND)
if dcgmLib is None:
_dcgmCheckReturn(DCGM_ST_LIBRARY_NOT_FOUND)
finally:
# lock is always freed
libLoadLock.release()
def _dcgmInit(libDcgmPath=None):
_LoadDcgmLibrary(libDcgmPath)
# Atomically update refcount
global _dcgmLib_refcount
libLoadLock.acquire()
_dcgmLib_refcount += 1
libLoadLock.release()
return None
def _dcgmCheckReturn(ret):
if ret != DCGM_ST_OK:
raise DCGMError(ret)
return ret
def _dcgmShutdown():
# Leave the library loaded, but shutdown the interface
fn = _dcgmGetFunctionPointer("dcgmShutdown")
ret = fn()
_dcgmCheckReturn(ret)
# Atomically update refcount
global _dcgmLib_refcount
libLoadLock.acquire()
if 0 < _dcgmLib_refcount:
_dcgmLib_refcount -= 1
libLoadLock.release()
return None
def _dcgmErrorString(result):
fn = _dcgmGetFunctionPointer("dcgmErrorString")
fn.restype = c_char_p # otherwise return is an int
str = fn(result)
return str
# Represents a link object. type should be one of DCGM_FE_GPU or
# DCGM_FE_SWITCH. gpuId or switchID the associated gpu or switch;
#
class c_dcgm_link_t(_PrintableStructure):
_fields = [
('type', c_uint8),
('index', c_uint8),
('id', c_uint16)
]
class c_dcgmConnectV2Params_v1(_PrintableStructure):
_fields_ = [
('version', c_uint),
('persistAfterDisconnect', c_uint)
]
c_dcgmConnectV2Params_version1 = make_dcgm_version(c_dcgmConnectV2Params_v1, 1)
class c_dcgmConnectV2Params_v2(_PrintableStructure):
_fields_ = [
('version', c_uint),
('persistAfterDisconnect', c_uint),
('timeoutMs', c_uint),
('addressIsUnixSocket', c_uint)
]
c_dcgmConnectV2Params_version2 = make_dcgm_version(c_dcgmConnectV2Params_v2, 2)
c_dcgmConnectV2Params_version = c_dcgmConnectV2Params_version2
class c_dcgmHostengineHealth_v1(_PrintableStructure):
_fields_ = [
('version', c_uint),
('overallHealth', c_uint),
]
dcgmHostengineHealth_version1 = make_dcgm_version(c_dcgmHostengineHealth_v1, 1)
dcgmHostengineHealth_version = dcgmHostengineHealth_version1
#Represents memory and proc clocks for a device
class c_dcgmClockSet_v1(_PrintableStructure):
_fields_ = [
('version', c_uint),
('memClock', c_uint), #/* Memory Clock */
('smClock',c_uint) #/* SM Clock */
]
# Represents a entityGroupId + entityId pair to uniquely identify a given entityId inside
# a group of entities
# Added in DCGM 1.5.0
class c_dcgmGroupEntityPair_t(_PrintableStructure):
_fields_ = [
('entityGroupId', c_uint32), #Entity Group ID entity belongs to
('entityId', c_uint32) #Entity ID of the entity
]
# /**
# * Structure to store information for DCGM group (v2)
# * Added in DCGM 1.5.0
# */
class c_dcgmGroupInfo_v2(_PrintableStructure):
_fields_ = [
('version', c_uint),
('count', c_uint),
('groupName', c_char * DCGM_MAX_STR_LENGTH),
('entityList', c_dcgmGroupEntityPair_t * DCGM_GROUP_MAX_ENTITIES)
]
c_dcgmGroupInfo_version2 = make_dcgm_version(c_dcgmGroupInfo_v2, 2)
DcgmiMigProfileNone = 0 # No profile (for GPUs)
DcgmMigProfileGpuInstanceSlice1 = 1 # GPU instance slice 1
DcgmMigProfileGpuInstanceSlice2 = 2 # GPU instance slice 2
DcgmMigProfileGpuInstanceSlice3 = 3 # GPU instance slice 3
DcgmMigProfileGpuInstanceSlice4 = 4 # GPU instance slice 4
DcgmMigProfileGpuInstanceSlice7 = 5 # GPU instance slice 7
DcgmMigProfileGpuInstanceSlice8 = 6 # GPU instance slice 8
DcgmMigProfileComputeInstanceSlice1 = 30 # compute instance slice 1
DcgmMigProfileComputeInstanceSlice2 = 31 # compute instance slice 2
DcgmMigProfileComputeInstanceSlice3 = 32 # compute instance slice 3
DcgmMigProfileComputeInstanceSlice4 = 33 # compute instance slice 4
DcgmMigProfileComputeInstanceSlice7 = 34 # compute instance slice 7
DcgmMigProfileComputeInstanceSlice8 = 35 # compute instance slice 8
# /**
# * Represents a pair of entity pairings to uniquely identify an entity and its place in the hierarchy.
# */
class c_dcgmMigHierarchyInfo_t(_PrintableStructure):
_fields_ = [
('entity', c_dcgmGroupEntityPair_t),
('parent', c_dcgmGroupEntityPair_t),
('sliceProfile', c_uint),
]
class c_dcgmMigEntityInfo_t(_PrintableStructure):
_fields_ = [
('gpuUuid', c_char * 128), # GPU UUID
('nvmlGpuIndex', c_uint), # GPU index from NVML
('nvmlInstanceId', c_uint), # GPU instance index within GPU
('nvmlComputeInstanceId', c_uint), # GPU Compute instance index within GPU instance
('nvmlMigProfileId', c_uint), # Unique profile ID for GPU or Compute instances
('nvmlProfileSlices', c_uint), # Number of slices in the MIG profile
]
class c_dcgmMigHierarchyInfo_v2(_PrintableStructure):
_fields_ = [
('entity', c_dcgmGroupEntityPair_t),
('parent', c_dcgmGroupEntityPair_t),
('info', c_dcgmMigEntityInfo_t),
]
DCGM_MAX_INSTANCES_PER_GPU = 8
# There can never be more compute instances per GPU than instances per GPU because a compute instance
# is part of an instance
DCGM_MAX_COMPUTE_INSTANCES_PER_GPU = DCGM_MAX_INSTANCES_PER_GPU
# Currently, there cannot be more than 14 instances + compute instances. There are always 7 compute instances
# and never more than 7 instances
DCGM_MAX_TOTAL_INSTANCES = 14
DCGM_MAX_HIERARCHY_INFO = DCGM_MAX_NUM_DEVICES * DCGM_MAX_TOTAL_INSTANCES
DCGM_MAX_INSTANCES = DCGM_MAX_NUM_DEVICES * DCGM_MAX_INSTANCES_PER_GPU
# The maximum compute instances are always the same as the maximum instances because each compute instances
# is part of an instance
DCGM_MAX_COMPUTE_INSTANCES = DCGM_MAX_INSTANCES
DCGM_MIG_RECONFIG_DELAY_PROCESSING = 0x1 # Ask the hostengine to wait to process reconfiguring the GPUs
class c_dcgmMigHierarchy_v2(_PrintableStructure):
_fields_ = [
('version', c_uint),
('count', c_uint),
('entityList', c_dcgmMigHierarchyInfo_v2 * DCGM_MAX_HIERARCHY_INFO)
]
c_dcgmMigHierarchy_version2 = make_dcgm_version(c_dcgmMigHierarchy_v2, 2)
class c_dcgmDeleteMigEntity_v1(_PrintableStructure):
_fields_ = [
('version', c_uint),
('entityGroupId', c_uint32),
('entityId', c_uint32),
('flags', c_uint),
]
c_dcgmDeleteMigEntity_version1 = make_dcgm_version(c_dcgmDeleteMigEntity_v1, 1)
# /**
# * Enum values for the kinds of MIG creations
# */
DcgmMigCreateGpuInstance = 0 # Create a GPU instance
DcgmMigCreateComputeInstance = 1 # Create a compute instance
class c_dcgmCreateMigEntity_v1(_PrintableStructure):
_fields_ = [
('version', c_uint),
('parentId', c_uint32),
('profile', c_uint32),
('createOption', c_uint32),
('flags', c_uint),
]
c_dcgmCreateMigEntity_version1 = make_dcgm_version(c_dcgmCreateMigEntity_v1, 1)
# /**
# * Structure to represent error attributes
# */
class c_dcgmErrorInfo_v1(_PrintableStructure):
_fields_ = [
('gpuId', c_uint),
('fieldId', c_ushort),
('status', c_int)
]
# /**
# * Represents list of supported clocks for a device
# */
class c_dcgmDeviceSupportedClockSets_v1(_PrintableStructure):
_fields_ = [
('version', c_uint),
('count', c_uint),
('clockSet', c_dcgmClockSet_v1 * DCGM_MAX_CLOCKS)
]
# /**
# * Represents accounting information for a device and pid
# */
class c_dcgmDevicePidAccountingStats_v1(_PrintableStructure):
_fields_ = [
('version', c_uint32),
('pid', c_uint32),
('gpuUtilization', c_uint32),
('memoryUtilization', c_uint32),
('maxMemoryUsage', c_uint64),
('startTimestamp', c_uint64),
('activeTimeUsec', c_uint64)
]
# /**
# * Represents thermal information
# */
class c_dcgmDeviceThermals_v1(_PrintableStructure):
_fields_ = [
('version', c_uint),
('slowdownTemp', c_uint),
('shutdownTemp', c_uint)
]
# /**
# * Represents various power limits
# */
class c_dcgmDevicePowerLimits_v1(_PrintableStructure):
_fields_ = [
('version', c_uint),
('curPowerLimit', c_uint),
('defaultPowerLimit', c_uint),
('enforcedPowerLimit', c_uint),
('minPowerLimit', c_uint),
('maxPowerLimit', c_uint)
]
# /**
# * Represents device identifiers
# */
class c_dcgmDeviceIdentifiers_v1(_PrintableStructure):
_fields_ = [
('version', c_uint),
('brandName', c_char * DCGM_MAX_STR_LENGTH),
('deviceName', c_char * DCGM_MAX_STR_LENGTH),
('pciBusId', c_char * DCGM_MAX_STR_LENGTH),
('serial', c_char * DCGM_MAX_STR_LENGTH),
('uuid', c_char * DCGM_MAX_STR_LENGTH),
('vbios', c_char * DCGM_MAX_STR_LENGTH),
('inforomImageVersion', c_char * DCGM_MAX_STR_LENGTH),
('pciDeviceId', c_uint32),
('pciSubSystemId', c_uint32),
('driverVersion', c_char * DCGM_MAX_STR_LENGTH),
('virtualizationMode', c_uint32)
]
# /**
# * Represents memory utilization
# */
class c_dcgmDeviceMemoryUsage_v1(_PrintableStructure):
_fields_ = [
('version', c_uint),
('bar1Total', c_uint),
('fbTotal', c_uint),
('fbUsed', c_uint),
('fbFree', c_uint)
]
# /**
# * Represents utilization values of vGPUs running on the device
# */
class c_dcgmDeviceVgpuUtilInfo_v1(_PrintableStructure):
_fields_ = [
('version', c_uint),
('vgpuId', c_uint),
('smUtil', c_uint),
('memUtil', c_uint),
('encUtil', c_uint),
('decUtil', c_uint)
]
# /**
# * Utilization values for processes running within vGPU VMs using the device
# */
class c_dcgmDeviceVgpuProcessUtilInfo_v1(_PrintableStructure):
_fields_ = [
('version', c_uint),
('vgpuId', c_uint),
('pid', c_uint),
('processName', c_char * DCGM_VGPU_NAME_BUFFER_SIZE),
('smUtil', c_uint),
('memUtil', c_uint),
('encUtil', c_uint),
('decUtil', c_uint)
]
# /**
# * Represents current encoder statistics for the given device/vGPU instance
# */
class c_dcgmDeviceEncStats_v1(_PrintableStructure):
_fields_ = [
('version', c_uint),
('sessionCount', c_uint),
('averageFps', c_uint),
('averageLatency', c_uint)
]
# /**
# * Represents information about active encoder sessions on the given vGPU instance
# */
class c_dcgmDeviceVgpuEncSessions_v1(_PrintableStructure):
_fields_ = [
('version', c_uint),
('vgpuId', c_uint),
('sessionId', c_uint),
('pid', c_uint),
('codecType', c_uint),
('hResolution', c_uint),
('vResolution', c_uint),
('averageFps', c_uint),
('averageLatency', c_uint)
]
# /**
# * Represents current frame buffer capture sessions statistics for the given device/vGPU instance
# */
class c_dcgmDeviceFbcStats_v1(_PrintableStructure):
_fields_ = [
('version', c_uint),
('sessionCount', c_uint),
('averageFps', c_uint),
('averageLatency', c_uint)
]
# /**
# * Represents information about active FBC session on the given device/vGPU instance
# */
class c_dcgmDeviceFbcSessionInfo_t(_PrintableStructure):
_fields_ = [
('version', c_uint),
('sessionId', c_uint),
('pid', c_uint),
('vgpuId', c_uint),
('displayOrdinal', c_uint),
('sessionType', c_uint),
('sessionFlags', c_uint),
('hMaxResolution', c_uint),
('vMaxResolution', c_uint),
('hResolution', c_uint),
('vResolution', c_uint),
('averageFps', c_uint),
('averageLatency', c_uint)
]
# /**
# * Represents all the active FBC sessions on the given device/vGPU instance
# */
class c_dcgmDeviceFbcSessions_v1(_PrintableStructure):
_fields_ = [
('version', c_uint),
('sessionCount', c_uint),
('sessionInfo', c_dcgmDeviceFbcSessionInfo_t * DCGM_MAX_FBC_SESSIONS)
]
# /**
# * Represents static info related to vGPU types supported on the device
# */
class c_dcgmDeviceVgpuTypeInfo_v1(_PrintableStructure):
_fields_ = [
('version', c_uint),
('vgpuTypeId', c_uint),
('vgpuTypeName', c_char * DCGM_VGPU_NAME_BUFFER_SIZE),
('vgpuTypeClass', c_char * DCGM_VGPU_NAME_BUFFER_SIZE),
('vgpuTypeLicense', c_char * DCGM_GRID_LICENSE_BUFFER_SIZE),
('deviceId', c_uint),
('subsystemId', c_uint),
('numDisplayHeads', c_uint),
('maxInstances', c_uint),
('frameRateLimit', c_uint),
('maxResolutionX', c_uint),
('maxResolutionY', c_uint),
('fbTotal', c_uint)
]
class c_dcgmDeviceVgpuTypeInfo_v2(_PrintableStructure):
_fields_ = [
('version', c_uint),
('vgpuTypeId', c_uint),
('vgpuTypeName', c_char * DCGM_VGPU_NAME_BUFFER_SIZE),
('vgpuTypeClass', c_char * DCGM_VGPU_NAME_BUFFER_SIZE),
('vgpuTypeLicense', c_char * DCGM_GRID_LICENSE_BUFFER_SIZE),
('deviceId', c_uint),
('subsystemId', c_uint),
('numDisplayHeads', c_uint),
('maxInstances', c_uint),
('frameRateLimit', c_uint),
('maxResolutionX', c_uint),
('maxResolutionY', c_uint),
('fbTotal', c_uint),
('gpuInstanceProfileId', c_uint)
]
dcgmDeviceVgpuTypeInfo_version2 = make_dcgm_version(c_dcgmDeviceVgpuTypeInfo_v2, 2)
class c_dcgmDeviceSettings_v2(_PrintableStructure):
_fields_ = [
('version', c_uint),
('persistenceModeEnabled', c_uint),
('migModeEnabled', c_uint),
('confidentialComputeMode', c_uint),
]
# /**
# * Represents attributes corresponding to a device
# */
class c_dcgmDeviceAttributes_deprecated_v1(_PrintableStructure):
_fields_ = [
('version', c_uint),
('clockSets', c_dcgmDeviceSupportedClockSets_v1),
('thermalSettings', c_dcgmDeviceThermals_v1),
('powerLimits', c_dcgmDevicePowerLimits_v1),
('identifiers', c_dcgmDeviceIdentifiers_v1),
('memoryUsage', c_dcgmDeviceMemoryUsage_v1),
('unused', c_char * 208)
]
dcgmDeviceAttributes_deprecated_version1 = make_dcgm_version(c_dcgmDeviceAttributes_deprecated_v1, 1)
# /**
# * Represents attributes corresponding to a device
# */
class c_dcgmDeviceAttributes_v3(_PrintableStructure):
_fields_ = [
('version', c_uint),
('clockSets', c_dcgmDeviceSupportedClockSets_v1),
('thermalSettings', c_dcgmDeviceThermals_v1),
('powerLimits', c_dcgmDevicePowerLimits_v1),
('identifiers', c_dcgmDeviceIdentifiers_v1),
('memoryUsage', c_dcgmDeviceMemoryUsage_v1),
('settings', c_dcgmDeviceSettings_v2),
]
dcgmDeviceAttributes_version3 = make_dcgm_version(c_dcgmDeviceAttributes_v3, 3)
# /**
# * Represents attributes info for a MIG device
# */
class c_dcgmDeviceMigAttributesInfo_v1(_PrintableStructure):
_fields_ = [
('version', c_uint),
('gpuInstanceId', c_uint),
('computeInstanceId', c_uint),
('multiprocessorCount', c_uint),
('sharedCopyEngineCount', c_uint),
('sharedDecoderCount', c_uint),
('sharedEncoderCount', c_uint),
('sharedJpegCount', c_uint),
('sharedOfaCount', c_uint),
('gpuInstanceSliceCount', c_uint),
('computeInstanceSliceCount', c_uint),
('memorySizeMB', c_uint64),
]
dcgmDeviceMigAttributesInfo_version1 = make_dcgm_version(c_dcgmDeviceMigAttributesInfo_v1, 1)
# /**
# * Represents attributes for a MIG device
# */
class c_dcgmDeviceMigAttributes_v1(_PrintableStructure):
_fields_ = [
('version', c_uint),
('migDevicesCount', c_uint),
('migAttributesInfo', c_dcgmDeviceMigAttributesInfo_v1),
]
dcgmDeviceMigAttributes_version1 = make_dcgm_version(c_dcgmDeviceMigAttributes_v1, 1)
# /**
# * Represents GPU instance profile information
# */
class c_dcgmGpuInstanceProfileInfo_v1(_PrintableStructure):
_fields_ = [
('version', c_uint),
('id', c_uint),
('isP2pSupported', c_uint),
('sliceCount', c_uint),
('instanceCount', c_uint),
('multiprocessorCount', c_uint),
('copyEngineCount', c_uint),
('decoderCount', c_uint),
('encoderCount', c_uint),
('jpegCount', c_uint),
('ofaCount', c_uint),
('memorySizeMB', c_uint64),
]
dcgmGpuInstanceProfileInfo_version1 = make_dcgm_version(c_dcgmGpuInstanceProfileInfo_v1, 1)
# /**
# * Represents GPU instance profiles
# */
class c_dcgmGpuInstanceProfiles_v1(_PrintableStructure):
_fields_ = [
('version', c_uint),
('profileCount', c_uint),
('profileInfo', c_dcgmGpuInstanceProfileInfo_v1),
]
dcgmGpuInstanceProfiles_version1 = make_dcgm_version(c_dcgmGpuInstanceProfiles_v1, 1)
# /**
# * Represents Compute instance profile information
# */
class c_dcgmComputeInstanceProfileInfo_v1(_PrintableStructure):
_fields_ = [
('version', c_uint),
('gpuInstanceId', c_uint),
('id', c_uint),
('sliceCount', c_uint),
('instanceCount', c_uint),
('multiprocessorCount', c_uint),
('sharedCopyEngineCount', c_uint),
('sharedDecoderCount', c_uint),
('sharedEncoderCount', c_uint),
('sharedJpegCount', c_uint),
('sharedOfaCount', c_uint),
]
dcgmComputeInstanceProfileInfo_version1 = make_dcgm_version(c_dcgmComputeInstanceProfileInfo_v1, 1)
# /**
# * Represents Compute instance profiles
# */
class c_dcgmComputeInstanceProfiles_v1(_PrintableStructure):
_fields_ = [
('version', c_uint),
('profileCount', c_uint),
('profileInfo', c_dcgmComputeInstanceProfileInfo_v1),
]
dcgmComputeInstanceProfiles_version1 = make_dcgm_version(c_dcgmComputeInstanceProfiles_v1, 1)
# /**
# * Represents vGPU attributes corresponding to a device
# */
class c_dcgmVgpuDeviceAttributes_v6(_PrintableStructure):
_fields_ = [
('version', c_uint),
('activeVgpuInstanceCount', c_uint),
('activeVgpuInstanceIds', c_uint * DCGM_MAX_VGPU_INSTANCES_PER_PGPU),
('creatableVgpuTypeCount', c_uint),
('creatableVgpuTypeIds', c_uint * DCGM_MAX_VGPU_TYPES_PER_PGPU),
('supportedVgpuTypeCount', c_uint),
('supportedVgpuTypeInfo', c_dcgmDeviceVgpuTypeInfo_v1 * DCGM_MAX_VGPU_TYPES_PER_PGPU),
('vgpuUtilInfo', c_dcgmDeviceVgpuUtilInfo_v1 * DCGM_MAX_VGPU_TYPES_PER_PGPU),
('gpuUtil', c_uint),
('memCopyUtil', c_uint),
('encUtil', c_uint),
('decUtil', c_uint)
]
dcgmVgpuDeviceAttributes_version6 = make_dcgm_version(c_dcgmVgpuDeviceAttributes_v6, 1)
class c_dcgmVgpuDeviceAttributes_v7(_PrintableStructure):
_fields_ = [
('version', c_uint),
('activeVgpuInstanceCount', c_uint),
('activeVgpuInstanceIds', c_uint * DCGM_MAX_VGPU_INSTANCES_PER_PGPU),
('creatableVgpuTypeCount', c_uint),
('creatableVgpuTypeIds', c_uint * DCGM_MAX_VGPU_TYPES_PER_PGPU),
('supportedVgpuTypeCount', c_uint),
('supportedVgpuTypeInfo', c_dcgmDeviceVgpuTypeInfo_v2 * DCGM_MAX_VGPU_TYPES_PER_PGPU),
('vgpuUtilInfo', c_dcgmDeviceVgpuUtilInfo_v1 * DCGM_MAX_VGPU_TYPES_PER_PGPU),
('gpuUtil', c_uint),
('memCopyUtil', c_uint),
('encUtil', c_uint),
('decUtil', c_uint)
]
dcgmVgpuDeviceAttributes_version7 = make_dcgm_version(c_dcgmVgpuDeviceAttributes_v7, 7)
# /**
# * Represents attributes specific to vGPU instance
# */
class c_dcgmVgpuInstanceAttributes_v1(_PrintableStructure):
_fields_ = [
('version', c_uint),
('vmId', c_char * DCGM_DEVICE_UUID_BUFFER_SIZE),
('vmName', c_char * DCGM_DEVICE_UUID_BUFFER_SIZE),
('vgpuTypeId', c_uint),
('vgpuUuid', c_char * DCGM_DEVICE_UUID_BUFFER_SIZE),
('vgpuDriverVersion', c_char * DCGM_DEVICE_UUID_BUFFER_SIZE),
('fbUsage', c_uint),
('licenseStatus', c_uint),
('frameRateLimit', c_uint)
]
dcgmVgpuInstanceAttributes_version1 = make_dcgm_version(c_dcgmVgpuInstanceAttributes_v1, 1)
class c_dcgmConfigPowerLimit(_PrintableStructure):
_fields_ = [
('type', c_uint),
('val', c_uint)
]
class c_dcgmConfigPerfStateSettings_t(_PrintableStructure):
_fields_ = [
('syncBoost', c_uint),
('targetClocks', c_dcgmClockSet_v1),
]
# Structure to represent default configuration for a device
class c_dcgmDeviceConfig_v1(_PrintableStructure):
_fields_ = [
# version must always be first
('version', c_uint),
('gpuId', c_uint),
('mEccMode', c_uint),
('mComputeMode', c_uint),
('mPerfState', c_dcgmConfigPerfStateSettings_t),
('mPowerLimit', c_dcgmConfigPowerLimit)
]
dcgmDeviceConfig_version1 = make_dcgm_version(c_dcgmDeviceConfig_v1, 1)
# Structure to represent default vGPU configuration for a device
class c_dcgmDeviceVgpuConfig_v1(_PrintableStructure):
_fields_ = [
# version must always be first
('version', c_uint),
('gpuId', c_uint),
('mEccMode', c_uint),
('mComputeMode', c_uint),
('mPerfState', c_dcgmConfigPerfStateSettings_t),
('mPowerLimit', c_dcgmConfigPowerLimit)
]
def SetBlank(self):
#Does not set version or gpuId
self.mEccMode = dcgmvalue.DCGM_INT32_BLANK
self.mPerfState.syncBoost = dcgmvalue.DCGM_INT32_BLANK
self.mPerfState.targetClocks.memClock = dcgmvalue.DCGM_INT32_BLANK
self.mPerfState.targetClocks.smClock = dcgmvalue.DCGM_INT32_BLANK
self.mComputeMode = dcgmvalue.DCGM_INT32_BLANK
self.mPowerLimit.type = DCGM_CONFIG_POWER_CAP_INDIVIDUAL
self.mPowerLimit.val = dcgmvalue.DCGM_INT32_BLANK
dcgmDeviceVgpuConfig_version1 = make_dcgm_version(c_dcgmDeviceVgpuConfig_v1, 1)
# Structure to receive update on the list of metrics.
class c_dcgmPolicyUpdate_v1(_PrintableStructure):
_fields_ = [
# version must always be first
('version', c_uint),
('power', c_uint)
]
dcgmPolicyUpdate_version1 = make_dcgm_version(c_dcgmPolicyUpdate_v1, 1)
# Represents a Callback to receive power updates from the host engine
_dcgmRecvUpdates_t = c_void_p
# Define the structure that contains specific policy information
class c_dcgmPolicyViolation_v1(_PrintableStructure):
_fields_ = [
# version must always be first
('version', c_uint),
('notifyOnEccDbe', c_uint),
('notifyOnPciEvent', c_uint),
('notifyOnMaxRetiredPages', c_uint)
]
dcgmPolicyViolation_version1 = make_dcgm_version(c_dcgmPolicyViolation_v1, 1)
class c_dcgmWatchFieldValue_v1(_PrintableStructure):
_fields_ = []
dcgmWatchFieldValue_version1 = make_dcgm_version(c_dcgmWatchFieldValue_v1, 1)
class c_dcgmUnwatchFieldValue_v1(_PrintableStructure):
_fields_ = []
dcgmUnwatchFieldValue_version1 = make_dcgm_version(c_dcgmUnwatchFieldValue_v1, 1)
class c_dcgmUpdateAllFields_v1(_PrintableStructure):
_fields_ = []
dcgmUpdateAllFields_version1 = make_dcgm_version(c_dcgmUpdateAllFields_v1, 1)
dcgmGetMultipleValuesForFieldResponse_version1 = 1
# policy enums (and table indices)
DCGM_POLICY_COND_IDX_DBE = 0
DCGM_POLICY_COND_IDX_PCI = 1
DCGM_POLICY_COND_IDX_MAX_PAGES_RETIRED = 2
DCGM_POLICY_COND_IDX_THERMAL = 3
DCGM_POLICY_COND_IDX_POWER = 4
DCGM_POLICY_COND_IDX_NVLINK = 5
DCGM_POLICY_COND_IDX_XID = 6
DCGM_POLICY_COND_IDX_MAX = 7
# policy enum bitmasks
DCGM_POLICY_COND_DBE = 0x1
DCGM_POLICY_COND_PCI = 0x2
DCGM_POLICY_COND_MAX_PAGES_RETIRED = 0x4
DCGM_POLICY_COND_THERMAL = 0x8
DCGM_POLICY_COND_POWER = 0x10
DCGM_POLICY_COND_NVLINK = 0x20
DCGM_POLICY_COND_XID = 0x40
DCGM_POLICY_COND_MAX = 7
DCGM_POLICY_MODE_AUTOMATED = 0
DCGM_POLICY_MODE_MANUAL = 1
DCGM_POLICY_ISOLATION_NONE = 0
DCGM_POLICY_ACTION_NONE = 0
DCGM_POLICY_ACTION_GPURESET = 1 #Deprecated
DCGM_POLICY_VALID_NONE = 0
DCGM_POLICY_VALID_SV_SHORT = 1
DCGM_POLICY_VALID_SV_MED = 2
DCGM_POLICY_VALID_SV_LONG = 3
DCGM_POLICY_VALID_SV_XLONG = 4
DCGM_POLICY_FAILURE_NONE = 0
DCGM_DIAG_LVL_INVALID = 0
DCGM_DIAG_LVL_SHORT = 10
DCGM_DIAG_LVL_MED = 20
DCGM_DIAG_LVL_LONG = 30
DCGM_DIAG_LVL_XLONG = 40
DCGM_DIAG_RESULT_PASS = 0
DCGM_DIAG_RESULT_SKIP = 1
DCGM_DIAG_RESULT_WARN = 2
DCGM_DIAG_RESULT_FAIL = 3
DCGM_DIAG_RESULT_NOT_RUN = 4
class c_dcgmPolicyConditionParmTypes_t(DcgmUnion):
_fields_ = [
('boolean', c_bool),
('llval', c_longlong),
]
class c_dcgmPolicyConditionParms_t(_PrintableStructure):
_fields_ = [
('tag', c_uint),
('val', c_dcgmPolicyConditionParmTypes_t)
]
class c_dcgmPolicy_v1(_PrintableStructure):
_fields_ = [
# version must always be first
('version', c_uint),
('condition', c_uint), # an OR'd list of DCGM_POLICY_COND_*
('mode', c_uint),
('isolation', c_uint),
('action', c_uint),
('validation', c_uint),
('response', c_uint),
('parms', c_dcgmPolicyConditionParms_t * DCGM_POLICY_COND_MAX)
]
dcgmPolicy_version1 = make_dcgm_version(c_dcgmPolicy_v1, 1)
class c_dcgmPolicyConditionPci_t(_PrintableStructure):
_fields_ = [
("timestamp", c_longlong), # timestamp of the error
("counter", c_uint) # value of the PCIe replay counter
]
class c_dcgmPolicyConditionDbe_t(_PrintableStructure):
LOCATIONS = {
'L1': 0,
'L2': 1,
'DEVICE': 2,
'REGISTER': 3,
'TEXTURE': 4
}
_fields_ = [
("timestamp", c_longlong), # timestamp of the error
("location", c_int), # location of the error (one of self.LOCATIONS)
("numerrors", c_uint) # number of errors
]
class c_dcgmPolicyConditionMpr_t(_PrintableStructure):
_fields_ = [
("timestamp", c_longlong), # timestamp of the error
("sbepages", c_uint), # number of pending pages due to SBE
("dbepages", c_uint) # number of pending pages due to DBE
]
class c_dcgmPolicyConditionThermal_t(_PrintableStructure):
_fields_ = [
("timestamp", c_longlong), # timestamp of the error
("thermalViolation", c_uint) # Temperature reached that violated policy
]
class c_dcgmPolicyConditionPower_t(_PrintableStructure):
_fields_ = [
("timestamp", c_longlong), # timestamp of the error
("powerViolation", c_uint) # Power value reached that violated policyy
]
class c_dcgmPolicyConditionNvlink_t(_PrintableStructure):
_fields_ = [
("timestamp", c_longlong), # timestamp of the error
("fieldId", c_ushort), # FieldId of the nvlink error counter
("counter", c_uint) # Error value reached that violated policyy
]
class c_dcgmPolicyConditionXID_t(_PrintableStructure):
_fields_ = [
("timestamp", c_longlong), # timestamp of the error
("errnum", c_uint) # XID error number
]
class c_dcgmPolicyCallbackResponse_v1(_PrintableStructure):
class Value(DcgmUnion):
# implement more of the fields when a test requires them
_fields_ = [
("dbe", c_dcgmPolicyConditionDbe_t), # ECC DBE return structure
("pci", c_dcgmPolicyConditionPci_t), # PCI replay error return structure
("mpr", c_dcgmPolicyConditionMpr_t), # Max retired pages limit return structure
("thermal", c_dcgmPolicyConditionThermal_t), # Thermal policy violations return structure
("power", c_dcgmPolicyConditionPower_t), # Power policy violations return structure
("nvlink", c_dcgmPolicyConditionNvlink_t), # Nvlink policy violations return structure..
("xid", c_dcgmPolicyConditionXID_t) # XID policy violations return structure
]
_fields_ = [
("version", c_uint),
("condition", c_int), # an OR'ed list of DCGM_POLICY_COND_*
("val", Value)
]
class c_dcgmFieldValue_v1_value(DcgmUnion):
_fields_ = [
('i64', c_int64),
('dbl', c_double),
('str', c_char * DCGM_MAX_STR_LENGTH),
('blob', c_byte * DCGM_MAX_BLOB_LENGTH)
]
# This structure is used to represent value for the field to be queried.
class c_dcgmFieldValue_v1(_PrintableStructure):
_fields_ = [
# version must always be first
('version', c_uint),
('fieldId', c_ushort),
('fieldType', c_short),
('status', c_int),
('ts', c_int64),
('value', c_dcgmFieldValue_v1_value)
]
dcgmFieldValue_version1 = make_dcgm_version(c_dcgmFieldValue_v1, 1)
# This structure is used to represent value for the field to be queried (version 2)
class c_dcgmFieldValue_v2(_PrintableStructure):
_fields_ = [
# version must always be first
('version', c_uint),
('entityGroupId', c_uint),
('entityId', c_uint),
('fieldId', c_ushort),
('fieldType', c_short),
('status', c_int),
('unused', c_uint),
('ts', c_int64),
('value', c_dcgmFieldValue_v1_value)
]
dcgmFieldValue_version2 = make_dcgm_version(c_dcgmFieldValue_v2, 2)
#Field value flags used by dcgm_agent.dcgmEntitiesGetLatestValues()
DCGM_FV_FLAG_LIVE_DATA = 0x00000001
DCGM_HEALTH_WATCH_PCIE = 0x1
DCGM_HEALTH_WATCH_NVLINK = 0x2
DCGM_HEALTH_WATCH_PMU = 0x4
DCGM_HEALTH_WATCH_MCU = 0x8
DCGM_HEALTH_WATCH_MEM = 0x10
DCGM_HEALTH_WATCH_SM = 0x20
DCGM_HEALTH_WATCH_INFOROM = 0x40
DCGM_HEALTH_WATCH_THERMAL = 0x80
DCGM_HEALTH_WATCH_POWER = 0x100
DCGM_HEALTH_WATCH_DRIVER = 0x200
DCGM_HEALTH_WATCH_NVSWITCH_NONFATAL = 0x400
DCGM_HEALTH_WATCH_NVSWITCH_FATAL = 0x800
DCGM_HEALTH_WATCH_ALL = 0xFFFFFFFF
DCGM_HEALTH_WATCH_COUNT_V1 = 10
DCGM_HEALTH_WATCH_COUNT_V2 = 12
DCGM_HEALTH_RESULT_PASS = 0
DCGM_HEALTH_RESULT_WARN = 10
DCGM_HEALTH_RESULT_FAIL = 20
class c_dcgmDiagErrorDetail_t(_PrintableStructure):
_fields_ = [
('msg', c_char * 1024),
('code', c_uint)
]
DCGM_HEALTH_WATCH_MAX_INCIDENTS = DCGM_GROUP_MAX_ENTITIES
class c_dcgmIncidentInfo_t(_PrintableStructure):
_fields_ = [
('system', c_uint),
('health', c_uint32),
('error', c_dcgmDiagErrorDetail_t),
('entityInfo', c_dcgmGroupEntityPair_t),
]
class c_dcgmHealthResponse_v4(_PrintableStructure):
_fields_ = [
('version', c_uint32),
('overallHealth', c_uint32),
('incidentCount', c_uint32),
('incidents', c_dcgmIncidentInfo_t * DCGM_HEALTH_WATCH_MAX_INCIDENTS),
]
dcgmHealthResponse_version4 = make_dcgm_version(c_dcgmHealthResponse_v4, 4)
class c_dcgmHealthSetParams_v2(_PrintableStructure):
_fields_ = [
('version', c_uint32),
('groupId', c_void_p),
('systems', c_uint32),
('updateInterval', c_int64),
('maxKeepAge', c_double)
]
dcgmHealthSetParams_version2 = make_dcgm_version(c_dcgmHealthSetParams_v2, 2)
#Pid info structs
class c_dcgmStatSummaryInt64_t(_PrintableStructure):
_fields_ = [
('minValue', c_int64),
('maxValue', c_int64),
('average', c_int64)
]
class c_dcgmStatSummaryInt32_t(_PrintableStructure):
_fields_ = [
('minValue', c_int32),
('maxValue', c_int32),
('average', c_int32)
]
class c_dcgmStatSummaryFp64_t(_PrintableStructure):
_fields_ = [
('minValue', c_double),
('maxValue', c_double),
('average', c_double)
]
class c_dcgmProcessUtilInfo_t(_PrintableStructure):
_fields_ = [
('pid', c_uint),
('smUtil', c_double),
('memUtil', c_double)
]
class c_dcgmHealthResponseInfo_t(_PrintableStructure):
_fields_ = [
('system', c_uint),
('health', c_uint)
]
DCGM_MAX_PID_INFO_NUM = 16
class c_dcgmPidSingleInfo_t(_PrintableStructure):
_fields_ = [
('gpuId', c_uint32),
('energyConsumed', c_int64),
('pcieRxBandwidth', c_dcgmStatSummaryInt64_t),
('pcieTxBandwidth', c_dcgmStatSummaryInt64_t),
('pcieReplays', c_int64),
('startTime', c_int64),
('endTime', c_int64),
('processUtilization', c_dcgmProcessUtilInfo_t),
('smUtilization', c_dcgmStatSummaryInt32_t),
('memoryUtilization', c_dcgmStatSummaryInt32_t),
('eccSingleBit', c_uint32), #Deprecated
('eccDoubleBit', c_uint32),
('memoryClock', c_dcgmStatSummaryInt32_t),
('smClock', c_dcgmStatSummaryInt32_t),
('numXidCriticalErrors', c_int32),
('xidCriticalErrorsTs', c_int64 * 10),
('numOtherComputePids', c_int32),
('otherComputePids', c_uint32 * DCGM_MAX_PID_INFO_NUM),
('numOtherGraphicsPids', c_int32),
('otherGraphicsPids', c_uint32 * DCGM_MAX_PID_INFO_NUM),
('maxGpuMemoryUsed', c_int64),
('powerViolationTime', c_int64),
('thermalViolationTime', c_int64),
('reliabilityViolationTime', c_int64),
('boardLimitViolationTime', c_int64),
('lowUtilizationTime', c_int64),
('syncBoostTime', c_int64),
('overallHealth', c_uint),
('incidentCount', c_uint),
('systems', c_dcgmHealthResponseInfo_t * DCGM_HEALTH_WATCH_COUNT_V1)
]
class c_dcgmPidInfo_v2(_PrintableStructure):
_fields_ = [
('version', c_uint32),
('pid', c_uint32),
('unused', c_uint32),
('numGpus', c_int32),
('summary', c_dcgmPidSingleInfo_t),
('gpus', c_dcgmPidSingleInfo_t * DCGM_MAX_NUM_DEVICES)
]
dcgmPidInfo_version2 = make_dcgm_version(c_dcgmPidInfo_v2, 2)
class c_dcgmRunningProcess_v1(_PrintableStructure):
_fields_ = [
('version', c_uint32),
('pid', c_uint32),
('memoryUsed', c_uint64)
]
dcgmRunningProcess_version1 = make_dcgm_version(c_dcgmRunningProcess_v1, 1)
c_dcgmRunningProcess_t = c_dcgmRunningProcess_v1
class c_dcgmGpuUsageInfo_t(_PrintableStructure):
_fields_ = [
('gpuId', c_uint32),
('energyConsumed', c_int64),
('powerUsage', c_dcgmStatSummaryFp64_t),
('pcieRxBandwidth', c_dcgmStatSummaryInt64_t),
('pcieTxBandwidth', c_dcgmStatSummaryInt64_t),
('pcieReplays', c_int64),
('startTime', c_int64),
('endTime', c_int64),
('smUtilization', c_dcgmStatSummaryInt32_t),
('memoryUtilization', c_dcgmStatSummaryInt32_t),
('eccSingleBit', c_uint32), #Deprecated
('eccDoubleBit', c_uint32),
('memoryClock', c_dcgmStatSummaryInt32_t),
('smClock', c_dcgmStatSummaryInt32_t),
('numXidCriticalErrors', c_int32),
('xidCriticalErrorsTs', c_int64 * 10),
('numComputePids', c_int32),
('computePids', c_dcgmProcessUtilInfo_t * DCGM_MAX_PID_INFO_NUM ),
('numGraphicsPids', c_int32),
('graphicsPids', c_dcgmProcessUtilInfo_t * DCGM_MAX_PID_INFO_NUM ),
('maxGpuMemoryUsed', c_int64),
('powerViolationTime', c_int64),
('thermalViolationTime', c_int64),
('reliabilityViolationTime', c_int64),
('boardLimitViolationTime', c_int64),
('lowUtilizationTime', c_int64),
('syncBoostTime', c_int64),
('overallHealth', c_uint),
('incidentCount', c_uint),
('systems', c_dcgmHealthResponseInfo_t * DCGM_HEALTH_WATCH_COUNT_V1)
]
class c_dcgmJobInfo_v3(_PrintableStructure):
_fields_ = [
('version', c_uint32),
('numGpus', c_int32),
('summary', c_dcgmGpuUsageInfo_t),
('gpus', c_dcgmGpuUsageInfo_t * DCGM_MAX_NUM_DEVICES)
]
dcgmJobInfo_version3 = make_dcgm_version(c_dcgmJobInfo_v3, 3)
class c_dcgmDiagTestResult_v2(_PrintableStructure):
_fields_ = [
('result', c_uint),
('error', c_dcgmDiagErrorDetail_t),
('info', c_char * 1024)
]
class c_dcgmDiagResponsePerGpu_v4(_PrintableStructure):
_fields_ = [
('gpuId', c_uint),
('hwDiagnosticReturn', c_uint),
('results', c_dcgmDiagTestResult_v2 * DCGM_PER_GPU_TEST_COUNT_V8)
]
DCGM_SWTEST_COUNT = 10
LEVEL_ONE_MAX_RESULTS = 16
class c_dcgmDiagResponse_v8(_PrintableStructure):
_fields_ = [
('version', c_uint),
('gpuCount', c_uint),
('levelOneTestCount', c_uint),
('levelOneResults', c_dcgmDiagTestResult_v2 * LEVEL_ONE_MAX_RESULTS),
('perGpuResponses', c_dcgmDiagResponsePerGpu_v4 * DCGM_MAX_NUM_DEVICES),
('systemError', c_dcgmDiagErrorDetail_t),
('_unused', c_char * 1024)
]
dcgmDiagResponse_version8 = make_dcgm_version(c_dcgmDiagResponse_v8, 8)
DCGM_AFFINITY_BITMASK_ARRAY_SIZE = 8
class c_dcgmDeviceTopologyPath_t(_PrintableStructure):
_fields_ = [
('gpuId', c_uint32),
('path', c_uint32),
('localNvLinkIds', c_uint32)
]
class c_dcgmDeviceTopology_v1(_PrintableStructure):
_fields_ = [
('version', c_uint32),
('cpuAffinityMask', c_ulong * DCGM_AFFINITY_BITMASK_ARRAY_SIZE),
('numGpus', c_uint32),
('gpuPaths', c_dcgmDeviceTopologyPath_t * (DCGM_MAX_NUM_DEVICES - 1))
]
dcgmDeviceTopology_version1 = make_dcgm_version(c_dcgmDeviceTopology_v1, 1)
class c_dcgmGroupTopology_v1(_PrintableStructure):
_fields_ = [
('version', c_uint32),
('groupCpuAffinityMask', c_ulong * DCGM_AFFINITY_BITMASK_ARRAY_SIZE),
('numaOptimalFlag', c_uint32),
('slowestPath', c_uint32)
]
dcgmGroupTopology_version1 = make_dcgm_version(c_dcgmGroupTopology_v1, 1)
# Maximum number of field groups that can exist
DCGM_MAX_NUM_FIELD_GROUPS = 64
# Maximum number of field IDs that can be in a single field group
DCGM_MAX_FIELD_IDS_PER_FIELD_GROUP = 128
class c_dcgmFieldGroupInfo_v1(_PrintableStructure):
_fields_ = [
('version', c_uint32),
('numFieldIds', c_uint32),
('fieldGroupId', c_void_p),
('fieldGroupName', c_char * DCGM_MAX_STR_LENGTH),
('fieldIds', c_uint16 * DCGM_MAX_FIELD_IDS_PER_FIELD_GROUP)
]
dcgmFieldGroupInfo_version1 = make_dcgm_version(c_dcgmFieldGroupInfo_v1, 1)
class c_dcgmAllFieldGroup_v1(_PrintableStructure):
_fields_ = [
('version', c_uint32),
('numFieldGroups', c_uint32),
('fieldGroups', c_dcgmFieldGroupInfo_v1 * DCGM_MAX_NUM_FIELD_GROUPS)
]
dcgmAllFieldGroup_version1 = make_dcgm_version(c_dcgmAllFieldGroup_v1, 1)
class c_dcgmIntrospectMemory_v1(_PrintableStructure):
_fields_ = [
('version', c_uint32),
('bytesUsed', c_longlong) # The total number of bytes being used to store all of the fields being watched
]
dcgmIntrospectMemory_version1 = make_dcgm_version(c_dcgmIntrospectMemory_v1, 1)
class c_dcgmIntrospectCpuUtil_v1(_PrintableStructure):
_fields_ = [
('version', c_uint32), #!< version number (dcgmIntrospectCpuUtil_version)
('total', c_double), #!< fraction of device's CPU resources that were used
('kernel', c_double), #!< fraction of device's CPU resources that were used in kernel mode
('user', c_double), #!< fraction of device's CPU resources that were used in user mode
]
dcgmIntrospectCpuUtil_version1 = make_dcgm_version(c_dcgmIntrospectCpuUtil_v1, 1)
DCGM_MAX_CONFIG_FILE_LEN = 10000
DCGM_MAX_TEST_NAMES = 20
DCGM_MAX_TEST_NAMES_LEN = 50
DCGM_MAX_TEST_PARMS = 100
DCGM_MAX_TEST_PARMS_LEN = 100
DCGM_GPU_LIST_LEN = 50
DCGM_FILE_LEN = 30
DCGM_PATH_LEN = 128
DCGM_THROTTLE_MASK_LEN = 50
# Flags options for running the GPU diagnostic
DCGM_RUN_FLAGS_VERBOSE = 0x0001
DCGM_RUN_FLAGS_STATSONFAIL = 0x0002
# UNUSED
DCGM_RUN_FLAGS_TRAIN = 0x0004
# UNUSED
DCGM_RUN_FLAGS_FORCE_TRAIN = 0x0008
DCGM_RUN_FLAGS_FAIL_EARLY = 0x0010 # Enable fail early checks for the Targeted Stress, Targeted Power, SM Stress, and Diagnostic tests
class c_dcgmRunDiag_v7(_PrintableStructure):
_fields_ = [
('version', c_uint), # version of this message
('flags', c_uint), # flags specifying binary options for running it. Currently verbose and stats on fail
('debugLevel', c_uint), # 0-5 for the debug level the GPU diagnostic will use for logging
('groupId', c_void_p), # group of GPUs to verify. Cannot be specified together with gpuList.
('validate', c_uint), # 0-3 for which tests to run. Optional.
('testNames', c_char * DCGM_MAX_TEST_NAMES * DCGM_MAX_TEST_NAMES_LEN), # Specifed list of test names. Optional.
('testParms', c_char * DCGM_MAX_TEST_PARMS * DCGM_MAX_TEST_PARMS_LEN), # Parameters to set for specified tests in the format: testName.parameterName=parameterValue. Optional.
('fakeGpuList', c_char * DCGM_GPU_LIST_LEN), # Comma-separated list of fake gpus. Cannot be specified with the groupId or gpuList.
('gpuList', c_char * DCGM_GPU_LIST_LEN), # Comma-separated list of gpus. Cannot be specified with the groupId.
('debugLogFile', c_char * DCGM_PATH_LEN), # Alternate name for the debug log file that should be used
('statsPath', c_char * DCGM_PATH_LEN), # Path that the plugin's statistics files should be written to
('configFileContents', c_char * DCGM_MAX_CONFIG_FILE_LEN), # Contents of nvvs config file (likely yaml)
('throttleMask', c_char * DCGM_THROTTLE_MASK_LEN), # Throttle reasons to ignore as either integer mask or csv list of reasons
('pluginPath', c_char * DCGM_PATH_LEN), # Custom path to the diagnostic plugins
('_unusedInt1', c_uint), # Unused
('_unusedInt2', c_uint), # Unused
('_unusedInt3', c_uint), # Unused
('_unusedBuf', c_char * DCGM_PATH_LEN), # Unused
('failCheckInterval', c_uint), # How often the fail early checks should occur when DCGM_RUN_FLAGS_FAIL_EARLY is set.
]
dcgmRunDiag_version7 = make_dcgm_version(c_dcgmRunDiag_v7, 7)
# Latest c_dcgmRunDiag class
c_dcgmRunDiag_t = c_dcgmRunDiag_v7
# Latest version for dcgmRunDiag_t
dcgmRunDiag_version = dcgmRunDiag_version7
#Flags for dcgmGetEntityGroupEntities's flags parameter
DCGM_GEGE_FLAG_ONLY_SUPPORTED = 0x00000001 #Only return entities that are supported by DCGM.
#Identifies a GPU NVLink error type returned by DCGM_FI_DEV_GPU_NVLINK_ERRORS
DCGM_GPU_NVLINK_ERROR_RECOVERY_REQUIRED = 1 # NVLink link recovery error occurred
DCGM_GPU_NVLINK_ERROR_FATAL = 2 # NVLink link fatal error occurred
# Topology hints for dcgmSelectGpusByTopology()
DCGM_TOPO_HINT_F_NONE = 0x00000000 # No hints specified
DCGM_TOPO_HINT_F_IGNOREHEALTH = 0x00000001 # Ignore the health of the GPUs when picking GPUs for job execution.
# By default, only healthy GPUs are considered.
class c_dcgmTopoSchedHint_v1(_PrintableStructure):
_fields_ = [
('version', c_uint), # version of this message
('inputGpuIds', c_uint64), # bitmask of the GPU ids to choose from
('numGpus', c_uint32), # the number of GPUs that DCGM should chooose
('hintFlags', c_uint64), # Hints to ignore certain factors for the scheduling hint
]
dcgmTopoSchedHint_version1 = make_dcgm_version(c_dcgmTopoSchedHint_v1, 1)
#DCGM NvLink link states used by c_dcgmNvLinkGpuLinkStatus_v1 & 2 and c_dcgmNvLinkNvSwitchLinkStatus_t's linkState field
DcgmNvLinkLinkStateNotSupported = 0 # NvLink is unsupported by this GPU (Default for GPUs)
DcgmNvLinkLinkStateDisabled = 1 # NvLink is supported for this link but this link is disabled (Default for NvSwitches)
DcgmNvLinkLinkStateDown = 2 # This NvLink link is down (inactive)
DcgmNvLinkLinkStateUp = 3 # This NvLink link is up (active)
# State of NvLink links for a GPU
class c_dcgmNvLinkGpuLinkStatus_v1(_PrintableStructure):
_fields_ = [
('entityId', c_uint32), # Entity ID of the GPU (gpuId)
('linkState', c_uint32 * DCGM_NVLINK_MAX_LINKS_PER_GPU_LEGACY1), #Link state of each link of this GPU
]
# State of NvLink links for a GPU
class c_dcgmNvLinkGpuLinkStatus_v2(_PrintableStructure):
_fields_ = [
('entityId', c_uint32), # Entity ID of the GPU (gpuId)
('linkState', c_uint32 * DCGM_NVLINK_MAX_LINKS_PER_GPU_LEGACY2), #Link state of each link of this GPU
]
class c_dcgmNvLinkGpuLinkStatus_v3(_PrintableStructure):
_fields_ = [
('entityId', c_uint32), # Entity ID of the GPU (gpuId)
('linkState', c_uint32 * DCGM_NVLINK_MAX_LINKS_PER_GPU), #Link state of each link of this GPU
]
#State of NvLink links for a NvSwitch
class c_dcgmNvLinkNvSwitchLinkStatus_v1(_PrintableStructure):
_fields_ = [
('entityId', c_uint32), # Entity ID of the NvSwitch (physicalId)
('linkState', c_uint32 * DCGM_NVLINK_MAX_LINKS_PER_NVSWITCH_V1) #Link state of each link of this NvSwitch
]
class c_dcgmNvLinkStatus_v2(_PrintableStructure):
'''
NvSwitch link status for all GPUs and NvSwitches in the system
'''
_fields_ = [
('version', c_uint32), # version of this message. Should be dcgmNvLinkStatus_version1
('numGpus', c_uint32), # Number of GPUs populated in gpus[]
('gpus', c_dcgmNvLinkGpuLinkStatus_v2 * DCGM_MAX_NUM_DEVICES), #Per-GPU NvLink link statuses
('numNvSwitches', c_uint32), # Number of NvSwitches populated in nvSwitches[]
('nvSwitches', c_dcgmNvLinkNvSwitchLinkStatus_v1 * DCGM_MAX_NUM_SWITCHES) #Per-NvSwitch NvLink link statuses
]
dcgmNvLinkStatus_version2 = make_dcgm_version(c_dcgmNvLinkStatus_v2, 2)
#State of NvLink links for a NvSwitch
class c_dcgmNvLinkNvSwitchLinkStatus_v2(_PrintableStructure):
_fields_ = [
('entityId', c_uint32), # Entity ID of the NvSwitch (physicalId)
('linkState', c_uint32 * DCGM_NVLINK_MAX_LINKS_PER_NVSWITCH) #Link state of each link of this NvSwitch
]
class c_dcgmNvLinkStatus_v3(_PrintableStructure):
'''
NvSwitch link status for all GPUs and NvSwitches in the system
'''
_fields_ = [
('version', c_uint32), # version of this message. Should be dcgmNvLinkStatus_version1
('numGpus', c_uint32), # Number of GPUs populated in gpus[]
('gpus', c_dcgmNvLinkGpuLinkStatus_v3 * DCGM_MAX_NUM_DEVICES), #Per-GPU NvLink link statuses
('numNvSwitches', c_uint32), # Number of NvSwitches populated in nvSwitches[]
('nvSwitches', c_dcgmNvLinkNvSwitchLinkStatus_v2 * DCGM_MAX_NUM_SWITCHES) #Per-NvSwitch NvLink link statuses
]
dcgmNvLinkStatus_version3 = make_dcgm_version(c_dcgmNvLinkStatus_v3, 3)
# Bitmask values for dcgmGetFieldIdSummary
DCGM_SUMMARY_MIN = 0x00000001
DCGM_SUMMARY_MAX = 0x00000002
DCGM_SUMMARY_AVG = 0x00000004
DCGM_SUMMARY_SUM = 0x00000008
DCGM_SUMMARY_COUNT = 0x00000010
DCGM_SUMMARY_INTEGRAL = 0x00000020
DCGM_SUMMARY_DIFF = 0x00000040
DCGM_SUMMARY_SIZE = 7
class c_dcgmSummaryResponse_t(_PrintableStructure):
class ResponseValue(DcgmUnion):
_fields_ = [
('i64', c_int64),
('dbl', c_double),
]
_fields_ = [
('fieldType', c_uint),
('summaryCount', c_uint),
('values', ResponseValue * DCGM_SUMMARY_SIZE),
]
class c_dcgmFieldSummaryRequest_v1(_PrintableStructure):
_fields_ = [
('version', c_uint),
('fieldId', c_ushort),
('entityGroupType', c_uint32),
('entityId', c_uint),
('summaryTypeMask', c_uint32),
('startTime', c_uint64),
('endTime', c_uint64),
('response', c_dcgmSummaryResponse_t),
]
dcgmFieldSummaryRequest_version1 = make_dcgm_version(c_dcgmFieldSummaryRequest_v1, 1)
# Module IDs
DcgmModuleIdCore = 0 # Core DCGM
DcgmModuleIdNvSwitch = 1 # NvSwitch Module
DcgmModuleIdVGPU = 2 # VGPU Module
DcgmModuleIdIntrospect = 3 # Introspection Module
DcgmModuleIdHealth = 4 # Health Module
DcgmModuleIdPolicy = 5 # Policy Module
DcgmModuleIdConfig = 6 # Config Module
DcgmModuleIdDiag = 7 # GPU Diagnostic Module
DcgmModuleIdProfiling = 8 # Profiling Module
DcgmModuleIdCount = 9 # 1 greater than largest ID above
# Module Status
DcgmModuleStatusNotLoaded = 0 # Module has not been loaded yet
DcgmModuleStatusDenylisted = 1 # Module has been added to the denylist so it can't be loaded
DcgmModuleStatusFailed = 2 # Loading the module failed
DcgmModuleStatusLoaded = 3 # Module has been loaded
DCGM_MODULE_STATUSES_CAPACITY = 16
class c_dcgmModuleGetStatusesModule_t(_PrintableStructure):
_fields_ = [
('id', c_uint32), #One of DcgmModuleId*
('status', c_uint32), #One of DcgmModuleStatus*
]
class c_dcgmModuleGetStatuses_v1(_PrintableStructure):
_fields_ = [
('version', c_uint),
('numStatuses', c_uint32),
('statuses', c_dcgmModuleGetStatusesModule_t * DCGM_MODULE_STATUSES_CAPACITY),
]
dcgmModuleGetStatuses_version1 = make_dcgm_version(c_dcgmModuleGetStatuses_v1, 1)
DCGM_PROF_MAX_NUM_GROUPS_V2 = 10 # Maximum number of metric ID groups that can exist in DCGM
DCGM_PROF_MAX_FIELD_IDS_PER_GROUP_V2 = 64 # Maximum number of field IDs that can be in a single DCGM profiling metric group
class c_dcgmProfMetricGroupInfo_v2(_PrintableStructure):
_fields_ = [
('majorId', c_ushort),
('minorId', c_ushort),
('numFieldIds', c_uint32),
('fieldIds', c_ushort * DCGM_PROF_MAX_FIELD_IDS_PER_GROUP_V2),
]
class c_dcgmProfGetMetricGroups_v3(_PrintableStructure):
_fields_ = [
('version', c_uint32),
('unused', c_uint32),
('gpuId', c_uint32),
('numMetricGroups', c_uint32),
('metricGroups', c_dcgmProfMetricGroupInfo_v2 * DCGM_PROF_MAX_NUM_GROUPS_V2),
]
dcgmProfGetMetricGroups_version3 = make_dcgm_version(c_dcgmProfGetMetricGroups_v3, 3)
class c_dcgmVersionInfo_v2(_PrintableStructure):
_fields_ = [
('version', c_uint32),
('rawBuildInfoString', c_char * (DCGM_MAX_STR_LENGTH * 2)),
]
dcgmVersionInfo_version2 = make_dcgm_version(c_dcgmVersionInfo_v2, 2)
dcgmVersionInfo_version = dcgmVersionInfo_version2
|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Base value for integer blank. can be used as an unspecified blank
DCGM_INT32_BLANK = 0x7ffffff0
DCGM_INT64_BLANK = 0x7ffffffffffffff0
# Base value for double blank. 2 ** 47. FP 64 has 52 bits of mantissa,
#so 47 bits can still increment by 1 and represent each value from 0-15
DCGM_FP64_BLANK = 140737488355328.0
DCGM_STR_BLANK = "<<<NULL>>>"
# Represents an error where data was not found
DCGM_INT32_NOT_FOUND = (DCGM_INT32_BLANK+1)
DCGM_INT64_NOT_FOUND = (DCGM_INT64_BLANK+1)
DCGM_FP64_NOT_FOUND = (DCGM_FP64_BLANK+1.0)
DCGM_STR_NOT_FOUND = "<<<NOT_FOUND>>>"
# Represents an error where fetching the value is not supported
DCGM_INT32_NOT_SUPPORTED = (DCGM_INT32_BLANK+2)
DCGM_INT64_NOT_SUPPORTED = (DCGM_INT64_BLANK+2)
DCGM_FP64_NOT_SUPPORTED = (DCGM_FP64_BLANK+2.0)
DCGM_STR_NOT_SUPPORTED = "<<<NOT_SUPPORTED>>>"
# Represents and error where fetching the value is not allowed with our current credentials
DCGM_INT32_NOT_PERMISSIONED = (DCGM_INT32_BLANK+3)
DCGM_INT64_NOT_PERMISSIONED = (DCGM_INT64_BLANK+3)
DCGM_FP64_NOT_PERMISSIONED = (DCGM_FP64_BLANK+3.0)
DCGM_STR_NOT_PERMISSIONED = "<<<NOT_PERM>>>"
###############################################################################
# Functions to check if a value is blank or not
def DCGM_INT32_IS_BLANK(val):
if val >= DCGM_INT32_BLANK:
return True
else:
return False
def DCGM_INT64_IS_BLANK(val):
if val >= DCGM_INT64_BLANK:
return True
else:
return False
def DCGM_FP64_IS_BLANK(val):
if val >= DCGM_FP64_BLANK:
return True
else:
return False
#Looks for <<< at first position and >>> inside string
def DCGM_STR_IS_BLANK(val):
if 0 != val.find("<<<"):
return False
elif 0 > val.find(">>>"):
return False
return True
###############################################################################
class DcgmValue:
def __init__(self, value):
self.value = value #Contains either an integer (int64), string, or double of the actual value
###########################################################################
def SetFromInt32(self, i32Value):
'''
Handle the special case where our source data was an int32 but is currently
stored in a python int (int64), dealing with blanks
'''
value = int(i32Value)
if not DCGM_INT32_IS_BLANK(i32Value):
self.value = value
return
if value == DCGM_INT32_NOT_FOUND:
self.value = DCGM_INT64_NOT_FOUND
elif value == DCGM_INT32_NOT_SUPPORTED:
self.value = DCGM_INT64_NOT_SUPPORTED
elif value == DCGM_INT32_NOT_PERMISSIONED:
self.value = DCGM_INT64_NOT_PERMISSIONED
else:
self.value = DCGM_INT64_BLANK
###########################################################################
def IsBlank(self):
'''
Returns True if the currently-stored value is a blank value. False if not
'''
if self.value is None:
return True
elif type(self.value) == int or type(self.value) == int:
return DCGM_INT64_IS_BLANK(self.value)
elif type(self.value) == float:
return DCGM_FP64_IS_BLANK(self.value)
elif type(self.value) == str:
return DCGM_STR_IS_BLANK(self.value)
else:
raise Exception("Unknown type: %s") % str(type(self.value))
###########################################################################
def __str__(self):
return str(self.value)
###########################################################################
###############################################################################
def self_test():
v = DcgmValue(1.0)
assert(not v.IsBlank())
assert(v.value == 1.0)
v = DcgmValue(100)
assert(not v.IsBlank())
assert(v.value == 100)
v = DcgmValue(DCGM_INT64_NOT_FOUND)
assert(v.IsBlank())
v = DcgmValue(DCGM_FP64_NOT_FOUND)
assert(v.IsBlank())
v.SetFromInt32(DCGM_INT32_NOT_SUPPORTED)
assert(v.IsBlank())
assert(v.value == DCGM_INT64_NOT_SUPPORTED)
print("Tests passed")
return
###############################################################################
if __name__ == "__main__":
self_test()
###############################################################################
|
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import total_ordering
from .gpu_record import GPURecord
@total_ordering
class GPUTensorActive(GPURecord):
"""
GPU Tensor active record
"""
tag = "gpu_tensoractive"
def __init__(self, value, device_uuid=None, timestamp=0):
"""
Parameters
----------
value : float
The value of the GPU metrtic
device_uuid : str
The GPU device uuid this metric is associated
with.
timestamp : int
The timestamp for the record in nanoseconds
"""
super().__init__(value, device_uuid, timestamp)
@staticmethod
def aggregation_function():
"""
The function that is used to aggregate
this type of record
"""
def average(seq):
return sum(seq[1:], start=seq[0]) / len(seq)
return average
@staticmethod
def header(aggregation_tag=False):
"""
Parameters
----------
aggregation_tag: bool
An optional tag that may be displayed
as part of the header indicating that
this record has been aggregated using
max, min or average etc.
Returns
-------
str
The full name of the
metric.
"""
return ("Average " if aggregation_tag else "") + "GPU FP32 Active (%)"
def __eq__(self, other):
"""
Allows checking for
equality between two records
"""
return self.value() == other.value()
def __lt__(self, other):
"""
Allows checking if
this record is less than
the other
"""
return self.value() < other.value()
def __add__(self, other):
"""
Allows adding two records together
to produce a brand new record.
"""
return GPUTensorActive(device_uuid=None,
value=(self.value() + other.value()))
def __sub__(self, other):
"""
Allows subtracting two records together
to produce a brand new record.
"""
return GPUTensorActive(device_uuid=None,
value=(self.value() - other.value()))
|
from functools import total_ordering
from .gpu_record import GPURecord
@total_ordering
class GPUPCIERX(GPURecord):
"""
GPU PCIe RX Bytes record. The number of bytes of active PCIe rx (read) data including both header and payload.
Note that this is from the perspective of the GPU, so copying data from host to device (HtoD) would be reflected in this metric.
"""
tag = "gpu_picerx"
def __init__(self, value, device_uuid=None, timestamp=0):
"""
Parameters
----------
value : float
The value of the GPU metrtic
device_uuid : str
The GPU device uuid this metric is associated
with.
timestamp : int
The timestamp for the record in nanoseconds
"""
super().__init__(value, device_uuid, timestamp)
@staticmethod
def aggregation_function():
"""
The function that is used to aggregate
this type of record
"""
def average(seq):
return sum(seq[1:], start=seq[0]) / len(seq)
return average
@staticmethod
def header(aggregation_tag=False):
"""
Parameters
----------
aggregation_tag: bool
An optional tag that may be displayed
as part of the header indicating that
this record has been aggregated using
max, min or average etc.
Returns
-------
str
The full name of the
metric.
"""
return ("Average " if aggregation_tag else "") + "GPU FP32 Active (%)"
def __eq__(self, other):
"""
Allows checking for
equality between two records
"""
return self.value() == other.value()
def __lt__(self, other):
"""
Allows checking if
this record is less than
the other
"""
return self.value() < other.value()
def __add__(self, other):
"""
Allows adding two records together
to produce a brand new record.
"""
return GPUPCIERX(device_uuid=None,
value=(self.value() + other.value()))
def __sub__(self, other):
"""
Allows subtracting two records together
to produce a brand new record.
"""
return GPUPCIERX(device_uuid=None,
value=(self.value() - other.value()))
|
import logging
LOGGER_NAME = 'TorchBenchLogger'
def set_logger(logger_level=logging.WARNING):
formatter = logging.Formatter(fmt='%(asctime)s - %(levelname)s - %(module)s - %(message)s')
handler = logging.StreamHandler()
handler.setFormatter(formatter)
logger = logging.getLogger(LOGGER_NAME)
logger.setLevel(logger_level)
logger.addHandler(handler)
return logger |
# default is 0.01 second
DEFAULT_MONITORING_INTERVAL = 0.01
class AnalayzerConfig:
def __init__(self):
self.monitoring_interval = DEFAULT_MONITORING_INTERVAL |
from functools import total_ordering
from .gpu_record import GPURecord
@total_ordering
class GPUPCIETX(GPURecord):
"""
GPU PCIe TX Bytes record. The number of bytes of active PCIe tx (transmit) data including both header and payload.
Note that this is from the perspective of the GPU, so copying data from device to host (DtoH) would be reflected in this metric.
"""
tag = "gpu_picetx"
def __init__(self, value, device_uuid=None, timestamp=0):
"""
Parameters
----------
value : float
The value of the GPU metrtic
device_uuid : str
The GPU device uuid this metric is associated
with.
timestamp : int
The timestamp for the record in nanoseconds
"""
super().__init__(value, device_uuid, timestamp)
@staticmethod
def aggregation_function():
"""
The function that is used to aggregate
this type of record
"""
def average(seq):
return sum(seq[1:], start=seq[0]) / len(seq)
return average
@staticmethod
def header(aggregation_tag=False):
"""
Parameters
----------
aggregation_tag: bool
An optional tag that may be displayed
as part of the header indicating that
this record has been aggregated using
max, min or average etc.
Returns
-------
str
The full name of the
metric.
"""
return ("Average " if aggregation_tag else "") + "GPU FP32 Active (%)"
def __eq__(self, other):
"""
Allows checking for
equality between two records
"""
return self.value() == other.value()
def __lt__(self, other):
"""
Allows checking if
this record is less than
the other
"""
return self.value() < other.value()
def __add__(self, other):
"""
Allows adding two records together
to produce a brand new record.
"""
return GPUPCIETX(device_uuid=None,
value=(self.value() + other.value()))
def __sub__(self, other):
"""
Allows subtracting two records together
to produce a brand new record.
"""
return GPUPCIETX(device_uuid=None,
value=(self.value() - other.value()))
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
# @Yueming Hao: TODO: Replace this with nvml API
from .da_exceptions import TorchBenchAnalyzerException
from numba import cuda
class Device:
"""
Generic device class representing devices being monitored
"""
def __init__(self):
pass
class GPUDevice(Device):
"""
Representing a GPU device
"""
def __init__(self, device_name, device_id, pci_bus_id, device_uuid):
"""
Parameters
----------
device_name: str
Human readable name of the device
device_id : int
Device id according to the `nvidia-smi` output
pci_bus_id : str
PCI bus id
device_uuid : str
Device UUID
"""
self._device_name = device_name
self._device_id = device_id
self._pci_bus_id = pci_bus_id
self._device_uuid = device_uuid
self._device = None
for gpu in cuda.gpus:
if gpu._device.uuid == device_uuid:
self._device = gpu
if self._device is None:
raise TorchBenchAnalyzerException('Failed to find GPU with UUID: {}'.format(device_uuid))
self._sm_count = self._device.MULTIPROCESSOR_COUNT
fma_count = ConvertSMVer2Cores(self._device.COMPUTE_CAPABILITY_MAJOR, self._device.COMPUTE_CAPABILITY_MINOR)
if fma_count == 0:
raise TorchBenchAnalyzerException('Unsupported GPU arch with CC%d.%d. Please check ConvertSMVer2Cores function.'
%(self._device.COMPUTE_CAPABILITY_MAJOR, self._device.COMPUTE_CAPABILITY_MINOR))
self._fma_count = fma_count
self._frequency = self._device.CLOCK_RATE
def device_name(self):
"""
Returns
-------
str
device name
"""
return self._device_name
def device_id(self):
"""
Returns
-------
int
device id of this GPU
"""
return self._device_id
def pci_bus_id(self):
"""
Returns
-------
bytes
PCI bus id of this GPU
"""
return self._pci_bus_id
def device_uuid(self):
"""
Returns
-------
str
UUID of this GPU
"""
return self._device_uuid
def sm_count(self):
"""
Returns
-------
int
number of SMs on this GPU
"""
return self._sm_count
def ConvertSMVer2Cores(major, minor):
# Returns the number of CUDA cores per multiprocessor for a given
# Compute Capability version. There is no way to retrieve that via
# the API, so it needs to be hard-coded.
# Refer to https://github.com/NVIDIA/cuda-samples/blob/master/Common/helper_cuda.h
return {(3, 0): 192, # Kepler
(3, 2): 192,
(3, 5): 192,
(3, 7): 192,
(5, 0): 128, # Maxwell
(5, 2): 128,
(5, 3): 128,
(6, 0): 64, # Pascal
(6, 1): 128,
(6, 2): 128,
(7, 0): 64, # Volta
(7, 2): 64,
(7, 5): 64, # Turing
(8, 0): 64, # Ampere
(8, 6): 128,
(8, 7): 128,
(8, 9): 128, # Ada
(9, 0): 128, # Hopper
}.get((major, minor), 0)
|
from .record import Record
class CPURecord(Record):
"""
This is a base class for any
CPU based record
"""
def __init__(self, value, timestamp=0):
"""
Parameters
----------
value : float
The value of the CPU metrtic
device_uuid : str
A dummy parameter to pass record aggregator.
timestamp : int
The timestamp for the record in nanoseconds
"""
super().__init__(value, timestamp)
self._device_uuid = 0x1
def device_uuid(self):
return self._device_uuid
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import defaultdict
import itertools
from .record import Record
from .da_exceptions import TorchBenchAnalyzerException
class RecordAggregator:
"""
Stores a collection of Record objects.
"""
def __init__(self):
self._records = defaultdict(list)
def insert(self, record):
"""
Insert a record into the RecordAggregator
Parameters
----------
record : Record
A record to be inserted
"""
if isinstance(record, Record):
record_type = type(record)
self._records[record_type].append(record)
else:
raise TorchBenchAnalyzerException(
"Can only add objects of type 'Record' to RecordAggregator")
def insert_all(self, record_list):
"""
Insert records from a list of records
into the RecordAggregator
Parameters
----------
record_list : List of Records
The records to insert
"""
for record in record_list:
self.insert(record)
def add_key(self, record_type, records):
"""
Adds or replaces all the records of a given record_type with the new
records
Parameters
----------
record_type : Record
record_type to add to the records.
records : list
List of new records to be added.
"""
self._records[record_type] = records
def filter_records(self, record_types=None, filters=None):
"""
Get records that satisfy the given list of criteria.
Parameters
----------
record_types : list of types of Records
the types of the records we are
imposing the filter criteria on.
filters : list of callables
conditions that determine whether
a given record should be returned.
If no filters specified, all records
of types specified by record_types will be
returned.
Note : This must be of the same length
as the list of record_types, or omitted.
Returns
-------
RecordAggregator
Returns a new RecordAggregator containing the filtered
records
"""
filtered_records = RecordAggregator()
if not record_types and not filters:
for record_type, records in self._records.items():
filtered_records.add_key(record_type, records)
return filtered_records
if record_types and not filters:
try:
for record_type in record_types:
filtered_records.add_key(record_type,
self._records[record_type])
return filtered_records
except KeyError as k:
raise TorchBenchAnalyzerException(
f"Record type '{k.header()}' not found in this RecordAggregator"
)
if filters and not record_types:
raise TorchBenchAnalyzerException(
"Must specify the record types corresponding to each filter criterion."
)
if len(record_types) != len(filters):
raise TorchBenchAnalyzerException(
"Must specify the same number of record types as filter criteria."
)
# Remove records that do not satisfy criteria
for h, f in zip(record_types, filters):
for record in self._records[h]:
if f(record):
filtered_records.insert(record)
return filtered_records
def groupby(self, record_types, groupby_criterion):
"""
Group all the records of a certain type together if they have the
same value for a given groupbby criteria.
Parameters
----------
record_types : list
A list of record type
groupby_criterion : callable
This callable will receive a single record as the argument and
must return the value that will be used for groupby
Returns
-------
dict
A dictionary of dictionaries where the first level keys are the
record type and the second level keys are unique values returned
by groupby_criteria and the values are the aggregated records.
"""
field_values = {
record_type: set([
groupby_criterion(record)
for record in self._records[record_type]
]) for record_type in record_types
}
groupby_result = defaultdict(list)
for record_type in record_types:
groupby_result[record_type] = defaultdict(list)
for field_value in field_values[record_type]:
aggregated_result = self.filter_records(
record_types=[record_type],
filters=[lambda r: groupby_criterion(r) == field_value
]).aggregate(record_types=[record_type])
groupby_result[record_type][field_value] = \
aggregated_result[record_type]
return groupby_result
def groupby_wo_aggregate(self, record_types, groupby_criterion):
"""
Similar to groupby(). But this function will return raw grouped records rather aggrated record.
"""
field_values = {
record_type: set([
groupby_criterion(record)
for record in self._records[record_type]
]) for record_type in record_types
}
groupby_result = defaultdict(list)
for record_type in record_types:
groupby_result[record_type] = defaultdict(list)
for field_value in field_values[record_type]:
temp_records_aggregator = self.filter_records(
record_types=[record_type],
filters=[lambda r: groupby_criterion(r) == field_value ])
groupby_result[record_type][field_value] = temp_records_aggregator.get_records()
return groupby_result
def record_types(self):
"""
Returns
-------
list of str
a list of the types of records in this
RecordAgrregator
"""
return list(self._records)
def total(self, record_type=None):
"""
Get the total number of records in
the RecordAggregator
Parameters
----------
record_type : a class name of type Record
The type of records to count,
if None, count all types
Returns
-------
int
number of records in
the RecordAggregator
"""
if record_type:
if record_type not in self._records:
raise TorchBenchAnalyzerException(
f"Record type '{record_type.header()}' not found in this RecordAggregator"
)
return len(self._records[record_type])
return sum(len(self._records[k]) for k in self._records)
def aggregate(self, record_types=None):
"""
Parameters
----------
record_types : List of Record types
The type of records to aggregate.
If None, aggregates all records
Returns
-------
dict
keys are requested record types
and values are the aggregated values
"""
if not record_types:
record_types = self.record_types()
aggregated_records = {
record_type:
record_type.aggregation_function()(self._records[record_type])
for record_type in record_types
}
return aggregated_records
def get_records(self):
"""
Get all the records.
Returns
-------
dict
A dictionary where the keys are record types and the values are
an array of records with the specified type
"""
return self._records
def _flatten_records(self, records):
"""
Flatten the records array by joining all the arrays together.
"""
return list(itertools.chain.from_iterable(records))
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Logging
LOGGER_NAME = "model_analyzer_logger"
from .gpu_device import GPUDevice
from ..dcgm import dcgm_agent as dcgm_agent
from ..dcgm import dcgm_structs as structs
from .da_exceptions import TorchBenchAnalyzerException
import pynvml
import numba.cuda
numba.cuda.config.CUDA_LOG_LEVEL = "ERROR"
import logging
logger = logging.getLogger(LOGGER_NAME)
def type_convert_for_pynvml(original_input):
"""For pynvml 11.5.0+, most arguments and return values have been changed to strings.
This function converts the original bytes input to string for pynvml less than 11.5.0.
"""
if isinstance(original_input, bytes):
return original_input.decode('utf-8')
elif isinstance(original_input, str):
return original_input
elif isinstance(original_input, int):
return str(original_input)
raise TorchBenchAnalyzerException('Unsupported type for pynvml conversion: {}'.format(type(original_input)))
class GPUDeviceFactory:
"""
Factory class for creating GPUDevices
"""
def __init__(self, model_analyzer_backend='nvml'):
self._devices = []
self._devices_by_bus_id = {}
self._devices_by_uuid = {}
self._model_analyzer_backend = model_analyzer_backend
self._nvml = pynvml
self._nvml.nvmlInit()
self.init_all_devices()
def init_all_devices(self, dcgmPath=None):
"""
Create GPUDevice objects for all DCGM visible
devices.
Parameters
----------
dcgmPath : str
Absolute path to dcgm shared library
"""
if self._model_analyzer_backend == 'dcgm':
if numba.cuda.is_available():
logger.debug("Initiliazing GPUDevice handles using DCGM")
structs._dcgmInit(dcgmPath)
dcgm_agent.dcgmInit()
# Start DCGM in the embedded mode to use the shared library
dcgm_handle = dcgm_agent.dcgmStartEmbedded(
structs.DCGM_OPERATION_MODE_MANUAL)
# Create a GPU device for every supported DCGM device
dcgm_device_ids = dcgm_agent.dcgmGetAllSupportedDevices(dcgm_handle)
for device_id in dcgm_device_ids:
device_atrributes = dcgm_agent.dcgmGetDeviceAttributes(
dcgm_handle, device_id).identifiers
pci_bus_id = device_atrributes.pciBusId.upper()
device_uuid = device_atrributes.uuid
device_name = device_atrributes.deviceName
try :
gpu_device = GPUDevice(device_name, device_id, pci_bus_id,device_uuid)
except TorchBenchAnalyzerException as e:
logger.debug("Skipping device %s due to %s", device_name, e)
continue
self._devices.append(gpu_device)
self._devices_by_bus_id[pci_bus_id] = gpu_device
self._devices_by_uuid[device_uuid] = gpu_device
dcgm_agent.dcgmShutdown()
else:
logger.debug("Initializing GPUDevice handles using NVML")
# Create a GPU device for every supported NVML device
nvml_device_count = self._nvml.nvmlDeviceGetCount()
for device_id in range(nvml_device_count):
handle = self._nvml.nvmlDeviceGetHandleByIndex(device_id)
device_name = type_convert_for_pynvml(self._nvml.nvmlDeviceGetName(handle))
pci_bus_id = type_convert_for_pynvml(self._nvml.nvmlDeviceGetPciInfo(handle).busId)
device_uuid = type_convert_for_pynvml(self._nvml.nvmlDeviceGetUUID(handle))
try:
gpu_device = GPUDevice(device_name, device_id, pci_bus_id, device_uuid)
except TorchBenchAnalyzerException as e:
logger.debug("Skipping device %s due to %s", device_name, e)
continue
self._devices.append(gpu_device)
self._devices_by_bus_id[pci_bus_id] = gpu_device
self._devices_by_uuid[device_uuid] = gpu_device
self._nvml.nvmlShutdown()
def get_device_by_bus_id(self, bus_id, dcgmPath=None):
"""
Get a GPU device by using its bus ID.
Parameters
----------
bus_id : bytes
Bus id corresponding to the GPU. The bus id should be created by
converting the colon separated hex notation into a bytes type
using ascii encoding. The bus id before conversion to bytes
should look like "00:65:00".
Returns
-------
Device
The device associated with this bus id.
"""
if bus_id in self._devices_by_bus_id:
return self._devices_by_bus_id[bus_id]
else:
raise TorchBenchAnalyzerException(
f'GPU with {bus_id} bus id is either not supported by DCGM or not present.'
)
def get_device_by_cuda_index(self, index):
"""
Get a GPU device using the CUDA index. This includes the index
provided by CUDA visible devices.
Parameters
----------
index : int
index of the device in the list of visible CUDA devices.
Returns
-------
Device
The device associated with the index provided.
Raises
------
IndexError
If the index is out of bound.
"""
devices = numba.cuda.list_devices()
if index > len(devices) - 1:
raise IndexError
cuda_device = devices[index]
device_identity = cuda_device.get_device_identity()
pci_domain_id = device_identity['pci_domain_id']
pci_device_id = device_identity['pci_device_id']
pci_bus_id = device_identity['pci_bus_id']
device_bus_id = \
f'{pci_domain_id:08X}:{pci_bus_id:02X}:{pci_device_id:02X}.0'
return self.get_device_by_bus_id(device_bus_id)
def get_device_by_uuid(self, uuid, dcgmPath=None):
"""
Get a GPU device using the GPU uuid.
Parameters
----------
uuid : str
index of the device in the list of visible CUDA devices.
Returns
-------
Device
The device associated with the uuid.
Raises
------
TritonModelAnalyzerExcpetion
If the uuid does not exist this exception will be raised.
"""
if uuid in self._devices_by_uuid:
return self._devices_by_uuid[uuid]
else:
raise TorchBenchAnalyzerException(
f'GPU UUID {uuid} was not found.')
def verify_requested_gpus(self, requested_gpus):
"""
Creates a list of GPU UUIDs corresponding to the GPUs visible to
numba.cuda among the requested gpus
Parameters
----------
requested_gpus : list of str or list of ints
Can either be GPU UUIDs or GPU device ids
Returns
-------
List of GPUDevices
list of GPUDevices corresponding to visible GPUs among requested
Raises
------
TorchBenchAnalyzerException
"""
if self._model_analyzer_backend == 'dcgm':
cuda_visible_gpus = self.get_cuda_visible_gpus()
else:
cuda_visible_gpus = self._devices
if len(requested_gpus) == 1:
if requested_gpus[0] == 'all':
self._log_gpus_used(cuda_visible_gpus)
return cuda_visible_gpus
elif requested_gpus[0] == '[]':
logger.debug("No GPUs requested")
return []
try:
# Check if each string in the list can be parsed as an int
requested_cuda_indices = list(map(int, requested_gpus))
requested_gpus = []
for idx in requested_cuda_indices:
try:
requested_gpus.append(self.get_device_by_cuda_index(idx))
except TorchBenchAnalyzerException:
raise TorchBenchAnalyzerException(
f"Requested GPU with device id : {idx}. This GPU is not supported by DCGM."
)
except ValueError:
# requested_gpus are assumed to be UUIDs
requested_gpus = [
self.get_device_by_uuid(uuid) for uuid in requested_gpus
]
pass
# Return the intersection of CUDA visible UUIDs and requested/supported UUIDs.
if self._model_analyzer_backend == 'dcgm':
available_gpus = list(set(cuda_visible_gpus) & set(requested_gpus))
else:
available_gpus = set(requested_gpus)
self._log_gpus_used(available_gpus)
return available_gpus
def get_cuda_visible_gpus(self):
"""
Returns
-------
list of GPUDevice
UUIDs of the DCGM supported devices visible to CUDA
"""
cuda_visible_gpus = []
if numba.cuda.is_available():
for cuda_device in numba.cuda.list_devices():
try:
cuda_visible_gpus.append(
self.get_device_by_cuda_index(cuda_device.id))
except TorchBenchAnalyzerException:
# Device not supported by DCGM, log warning
logger.debug(
f"Device '{str(cuda_device.name, encoding='ascii')}' with "
f"cuda device id {cuda_device.id} is not supported by DCGM."
)
return cuda_visible_gpus
def _log_gpus_used(self, gpus):
"""
Log the info for the GPUDevices in use
"""
for gpu in gpus:
logger.debug(
f"Using GPU {gpu.device_id()} {gpu.device_name()} with UUID {gpu.device_uuid()}"
)
|
from functools import total_ordering
from .cpu_record import CPURecord
@total_ordering
class CPUPeakMemory(CPURecord):
"""
The peak memory usage in the CPU.
"""
tag = "cpu_peak_memory"
def __init__(self, value, timestamp=0):
"""
Parameters
----------
value : float
The value of the CPU metrtic
timestamp : int
The timestamp for the record in nanoseconds
"""
super().__init__(value, timestamp)
@staticmethod
def header(aggregation_tag=False):
"""
Parameters
----------
aggregation_tag: bool
An optional tag that may be displayed
as part of the header indicating that
this record has been aggregated using
max, min or average etc.
Returns
-------
str
The full name of the
metric.
"""
return ("Max " if aggregation_tag else "") + "GPU Memory Usage (MB)"
def __eq__(self, other):
"""
Allows checking for
equality between two records
"""
return self.value() == other.value()
def __lt__(self, other):
"""
Allows checking if
this record is less than
the other
"""
return self.value() > other.value()
def __add__(self, other):
"""
Allows adding two records together
to produce a brand new record.
"""
return CPUPeakMemory(value=(self.value() + other.value()))
def __sub__(self, other):
"""
Allows subtracting two records together
to produce a brand new record.
"""
return CPUPeakMemory(value=(other.value() - self.value()))
|
# Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import total_ordering
from .gpu_record import GPURecord
@total_ordering
class GPUPeakMemory(GPURecord):
"""
The peak memory usage in the GPU. Because I didn't specify the aggregate function, the default is MAX inherited from Record Class.
"""
tag = "gpu_peak_memory"
def __init__(self, value, device_uuid=None, timestamp=0):
"""
Parameters
----------
value : float
The value of the GPU metrtic
device_uuid : str
The GPU device uuid this metric is associated
with.
timestamp : int
The timestamp for the record in nanoseconds
"""
super().__init__(value, device_uuid, timestamp)
@staticmethod
def header(aggregation_tag=False):
"""
Parameters
----------
aggregation_tag: bool
An optional tag that may be displayed
as part of the header indicating that
this record has been aggregated using
max, min or average etc.
Returns
-------
str
The full name of the
metric.
"""
return ("Max " if aggregation_tag else "") + "GPU Memory Usage (MB)"
def __eq__(self, other):
"""
Allows checking for
equality between two records
"""
return self.value() == other.value()
def __lt__(self, other):
"""
Allows checking if
this record is less than
the other
"""
return self.value() > other.value()
def __add__(self, other):
"""
Allows adding two records together
to produce a brand new record.
"""
return GPUPeakMemory(device_uuid=None,
value=(self.value() + other.value()))
def __sub__(self, other):
"""
Allows subtracting two records together
to produce a brand new record.
"""
return GPUPeakMemory(device_uuid=None,
value=(other.value() - self.value()))
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from abc import ABCMeta, abstractmethod
from statistics import mean
import importlib
from .da_exceptions import TorchBenchAnalyzerException
class RecordType(ABCMeta):
"""
A metaclass that holds the instantiated Record types
"""
record_types = {}
def __new__(cls, name, base, namespace):
"""
This function is called upon declaration of any classes of type
RecordType
"""
record_type = super().__new__(cls, name, base, namespace)
# If record_type.tag is a string, register it here
if isinstance(record_type.tag, str):
cls.record_types[record_type.tag] = record_type
return record_type
@classmethod
def get(cls, tag):
"""
Parameters
----------
tag : str
tag that a record type has registered it classname with
Returns
-------
The class of type RecordType correspoding to the tag
"""
if tag not in cls.record_types:
try:
importlib.import_module('model_analyzer.record.types.%s' % tag)
except ImportError as e:
print(e)
return cls.record_types[tag]
@classmethod
def get_all_record_types(cls):
"""
Returns
-------
dict
keys are tags and values are
all the types that have this as a
metaclass
"""
type_module_directory = \
os.path.join(
globals()['__spec__'].origin.rsplit('/', 1)[0], 'types')
for filename in os.listdir(type_module_directory):
if filename != '__init__.py' and filename.endswith('.py'):
try:
importlib.import_module(
f'model_analyzer.record.types.{filename[:-3]}')
except AttributeError:
raise TorchBenchAnalyzerException(
"Error retrieving all record types")
return cls.record_types
class Record(metaclass=RecordType):
"""
This class is used for representing
records
"""
def __init__(self, value, timestamp):
"""
Parameters
----------
value : float or int
The value of the GPU metrtic
timestamp : int
The timestamp for the record in nanoseconds
"""
assert type(value) is float or type(value) is int
assert type(timestamp) is int
self._value = value
self._timestamp = timestamp
@staticmethod
def aggregation_function():
"""
The function that is used to aggregate
this type of record
Returns
-------
callable()
[Records] -> Record
"""
return (lambda records: max(records, key=lambda r: r.value()))
@staticmethod
def value_function():
"""
Returns the average value from a list
Returns
-------
Average value of the list
"""
return (lambda values: mean(values))
@staticmethod
@abstractmethod
def header(aggregation_tag=False):
"""
Parameters
----------
aggregation_tag : boolean
An optional tag that may be displayed as part of the header
indicating that this record has been aggregated using max, min or
average etc.
Returns
-------
str
The full name of the
metric.
"""
@property
@abstractmethod
def tag(self):
"""
Returns
-------
str
the name tag of the record type.
"""
def to_dict(self):
return (self.tag, self.__dict__)
@classmethod
def from_dict(cls, record_dict):
record = cls(0)
for key in ['_value', '_timestamp']:
if key in record_dict:
setattr(record, key, record_dict[key])
return record
def value(self):
"""
This method returns the value of recorded metric
Returns
-------
float
value of the metric
"""
return self._value
def timestamp(self):
"""
This method should return the time at which the record was created.
Returns
-------
float
timestamp passed in during
record creation
"""
return self._timestamp
def __mul__(self, other):
"""
Defines left multiplication for records with floats or ints.
Returns
-------
Record
"""
if isinstance(other, int) or isinstance(other, float):
return type(self)(value=(self.value() * other))
else:
raise TypeError
def __rmul__(self, other):
"""
Defines right multiplication
"""
return self.__mul__(other)
def __truediv__(self, other):
"""
Defines left multiplication for records with floats or ints
Returns
-------
Record
"""
if isinstance(other, int) or isinstance(other, float):
return type(self)(value=(self.value() / other))
else:
raise TypeError
|
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import total_ordering
from .gpu_record import GPURecord
@total_ordering
class GPUPowerUsage(GPURecord):
"""
GPU Power Usage
"""
tag = "gpu_power_usage"
def __init__(self, value, device_uuid=None, timestamp=0):
"""
Parameters
----------
value : float
The value of the GPU metrtic
device_uuid : str
The GPU device uuid this metric is associated
with.
timestamp : int
The timestamp for the record in nanoseconds
"""
super().__init__(value, device_uuid, timestamp)
@staticmethod
def aggregation_function():
"""
The function that is used to aggregate
this type of record
"""
def average(seq):
return sum(seq[1:], start=seq[0]) / len(seq)
return average
@staticmethod
def header(aggregation_tag=False):
"""
Parameters
----------
aggregation_tag: bool
An optional tag that may be displayed as part of the header
indicating that this record has been aggregated using max, min or
average etc.
Returns
-------
str
The full name of the
metric.
"""
return ("Average " if aggregation_tag else "") + "GPU Power Usage (W)"
def __eq__(self, other):
"""
Allows checking for
equality between two records
"""
return self.value() == other.value()
def __lt__(self, other):
"""
Allows checking if
this record is less than
the other
"""
return self.value() < other.value()
def __add__(self, other):
"""
Allows adding two records together
to produce a brand new record.
"""
return GPUPowerUsage(device_uuid=None,
value=(self.value() + other.value()))
def __sub__(self, other):
"""
Allows subtracting two records together
to produce a brand new record.
"""
return GPUPowerUsage(device_uuid=None,
value=(self.value() - other.value()))
|
from functools import total_ordering
from .gpu_record import GPURecord
@total_ordering
class GPUDRAMActive(GPURecord):
"""
GPU DRAM active record
"""
tag = "gpu_dramactive"
def __init__(self, value, device_uuid=None, timestamp=0):
"""
Parameters
----------
value : float
The value of the GPU metrtic
device_uuid : str
The GPU device uuid this metric is associated
with.
timestamp : int
The timestamp for the record in nanoseconds
"""
super().__init__(value, device_uuid, timestamp)
@staticmethod
def aggregation_function():
"""
The function that is used to aggregate
this type of record
"""
def average(seq):
return sum(seq[1:], start=seq[0]) / len(seq)
return average
@staticmethod
def header(aggregation_tag=False):
"""
Parameters
----------
aggregation_tag: bool
An optional tag that may be displayed
as part of the header indicating that
this record has been aggregated using
max, min or average etc.
Returns
-------
str
The full name of the
metric.
"""
return ("Average " if aggregation_tag else "") + "GPU FP32 Active (%)"
def __eq__(self, other):
"""
Allows checking for
equality between two records
"""
return self.value() == other.value()
def __lt__(self, other):
"""
Allows checking if
this record is less than
the other
"""
return self.value() < other.value()
def __add__(self, other):
"""
Allows adding two records together
to produce a brand new record.
"""
return GPUDRAMActive(device_uuid=None,
value=(self.value() + other.value()))
def __sub__(self, other):
"""
Allows subtracting two records together
to produce a brand new record.
"""
return GPUDRAMActive(device_uuid=None,
value=(self.value() - other.value()))
|
class TorchBenchAnalyzerException(Exception):
"""
A custom exception specific to the TorchBench Model Analyzer
"""
pass
|
# Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import total_ordering
from .gpu_record import GPURecord
@total_ordering
class GPUUtilization(GPURecord):
"""
GPU utilization record
"""
tag = "gpu_utilization"
def __init__(self, value, device_uuid=None, timestamp=0):
"""
Parameters
----------
value : float
The value of the GPU metrtic
device_uuid : str
The GPU device uuid this metric is associated
with.
timestamp : int
The timestamp for the record in nanoseconds
"""
super().__init__(value, device_uuid, timestamp)
@staticmethod
def aggregation_function():
"""
The function that is used to aggregate
this type of record
"""
def average(seq):
return sum(seq[1:], start=seq[0]) / len(seq)
return average
@staticmethod
def header(aggregation_tag=False):
"""
Parameters
----------
aggregation_tag: bool
An optional tag that may be displayed
as part of the header indicating that
this record has been aggregated using
max, min or average etc.
Returns
-------
str
The full name of the
metric.
"""
return ("Average " if aggregation_tag else "") + "GPU Utilization (%)"
def __eq__(self, other):
"""
Allows checking for
equality between two records
"""
return self.value() == other.value()
def __lt__(self, other):
"""
Allows checking if
this record is less than
the other
"""
return self.value() < other.value()
def __add__(self, other):
"""
Allows adding two records together
to produce a brand new record.
"""
return GPUUtilization(device_uuid=None,
value=(self.value() + other.value()))
def __sub__(self, other):
"""
Allows subtracting two records together
to produce a brand new record.
"""
return GPUUtilization(device_uuid=None,
value=(self.value() - other.value()))
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .record import Record
class GPURecord(Record):
"""
This is a base class for any
GPU based record
"""
def __init__(self, value, device_uuid=None, timestamp=0):
"""
Parameters
----------
value : float
The value of the GPU metrtic
device_uuid : str
The GPU device uuid this metric is associated
with.
timestamp : int
The timestamp for the record in nanoseconds
"""
super().__init__(value, timestamp)
self._device_uuid = device_uuid
def device_uuid(self):
"""
Returns
-------
str
uuid for the GPU that this metric was sampled on
"""
return self._device_uuid
@classmethod
def from_dict(cls, record_dict):
record = cls(0)
for key in ['_value', '_timestamp', '_device']:
if key in record_dict:
setattr(record, key, record_dict[key])
return record
|
# Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import total_ordering
from .gpu_record import GPURecord
@total_ordering
class GPUFreeMemory(GPURecord):
"""
The free memory in the GPU.
"""
tag = "gpu_free_memory"
def __init__(self, value, device_uuid=None, timestamp=0):
"""
Parameters
----------
value : float
The value of the GPU metrtic
device_uuid : str
The GPU device uuid this metric is associated
with.
timestamp : int
The timestamp for the record in nanoseconds
"""
super().__init__(value, device_uuid, timestamp)
@staticmethod
def header(aggregation_tag=False):
"""
Parameters
----------
aggregation_tag: bool
An optional tag that may be displayed
as part of the header indicating that
this record has been aggregated using
max, min or average etc.
Returns
-------
str
The full name of the
metric.
"""
return ("Max " if aggregation_tag else "") + "GPU Memory Available (MB)"
def __eq__(self, other):
"""
Allows checking for
equality between two records
"""
return self.value() == other.value()
def __lt__(self, other):
"""
Allows checking if
this record is less than
the other
"""
return self.value() < other.value()
def __add__(self, other):
"""
Allows adding two records together
to produce a brand new record.
"""
return GPUFreeMemory(device_uuid=None,
value=(self.value() + other.value()))
def __sub__(self, other):
"""
Allows subtracting two records together
to produce a brand new record.
"""
return GPUFreeMemory(device_uuid=None,
value=(self.value() - other.value()))
|
from functools import total_ordering
from .gpu_record import GPURecord
@total_ordering
class GPUFP32Active(GPURecord):
"""
GPU FP32 active record
"""
tag = "gpu_fp32active"
def __init__(self, value, device_uuid=None, timestamp=0):
"""
Parameters
----------
value : float
The value of the GPU metrtic
device_uuid : str
The GPU device uuid this metric is associated
with.
timestamp : int
The timestamp for the record in nanoseconds
"""
super().__init__(value, device_uuid, timestamp)
@staticmethod
def aggregation_function():
"""
The function that is used to aggregate
this type of record
"""
def average(seq):
return sum(seq[1:], start=seq[0]) / len(seq)
return average
@staticmethod
def header(aggregation_tag=False):
"""
Parameters
----------
aggregation_tag: bool
An optional tag that may be displayed
as part of the header indicating that
this record has been aggregated using
max, min or average etc.
Returns
-------
str
The full name of the
metric.
"""
return ("Average " if aggregation_tag else "") + "GPU FP32 Active (%)"
def __eq__(self, other):
"""
Allows checking for
equality between two records
"""
return self.value() == other.value()
def __lt__(self, other):
"""
Allows checking if
this record is less than
the other
"""
return self.value() < other.value()
def __add__(self, other):
"""
Allows adding two records together
to produce a brand new record.
"""
return GPUFP32Active(device_uuid=None,
value=(self.value() + other.value()))
def __sub__(self, other):
"""
Allows subtracting two records together
to produce a brand new record.
"""
return GPUFP32Active(device_uuid=None,
value=(self.value() - other.value()))
|
"""Scribe Uploader for Pytorch Benchmark Data
Currently supports data in pytest-benchmark format but can be extended.
New fields can be added just by modifying the schema in this file, schema
checking is only here to encourage reusing existing fields and avoiding typos.
"""
import argparse
import time
import multiprocessing
import json
import os
import requests
import subprocess
from collections import defaultdict
class ScribeUploader:
def __init__(self, category):
self.category = category
def format_message(self, field_dict):
assert 'time' in field_dict, "Missing required Scribe field 'time'"
message = defaultdict(dict)
for field, value in field_dict.items():
if field in self.schema['normal']:
message['normal'][field] = str(value)
elif field in self.schema['int']:
message['int'][field] = int(value)
elif field in self.schema['float']:
message['float'][field] = float(value)
else:
raise ValueError("Field {} is not currently used, "
"be intentional about adding new fields".format(field))
return message
def _upload_intern(self, messages: list):
for m in messages:
json_str = json.dumps(m)
cmd = ['scribe_cat', self.category, json_str]
subprocess.run(cmd)
def upload(self, messages: list):
if os.environ.get('SCRIBE_INTERN'):
return self._upload_intern(messages)
access_token = os.environ.get("SCRIBE_GRAPHQL_ACCESS_TOKEN")
if not access_token:
raise ValueError("Can't find access token from environment variable")
url = "https://graph.facebook.com/scribe_logs"
r = requests.post(
url,
data={
"access_token": access_token,
"logs": json.dumps(
[
{
"category": self.category,
"message": json.dumps(message),
"line_escape": False,
}
for message in messages
]
),
},
)
print(r.text)
r.raise_for_status()
class PytorchBenchmarkUploader(ScribeUploader):
def __init__(self):
super().__init__('perfpipe_pytorch_benchmarks')
self.schema = {
'int': [
'time', 'rounds',
],
'normal': [
'benchmark_group', 'benchmark_name',
'benchmark_class', 'benchmark_time',
'git_repo', 'git_commit_id', 'git_branch',
'git_commit_time', 'git_dirty',
'pytorch_version', 'python_version',
'torchtext_version', 'torchvision_version',
'machine_kernel', 'machine_processor', 'machine_hostname',
'github_run_id', 'torchbench_score_version',
],
'float': [
'stddev', 'min', 'median', 'max', 'mean', 'runtime',
'torchbench_score',
'torchbench_score_jit_speedup',
'torchbench_subscore_cpu_train',
'torchbench_subscore_cpu_infer',
'torchbench_subscore_gpu_train',
'torchbench_subscore_gpu_infer',
]
}
def post_pytest_benchmarks(self, pytest_json, max_data_upload=100):
machine_info = pytest_json['machine_info']
commit_info = pytest_json['commit_info']
upload_time = int(time.time())
messages = []
for b in pytest_json['benchmarks']:
base_msg = {
"time": upload_time,
"benchmark_group": b['group'],
"benchmark_name": b['name'],
"benchmark_class": b['fullname'],
"benchmark_time": pytest_json['datetime'],
"git_repo": commit_info['project'],
"git_commit_id": commit_info['id'],
"git_branch": commit_info['branch'],
"git_commit_time": commit_info['time'],
"git_dirty": commit_info['dirty'],
"pytorch_version": machine_info.get('pytorch_version', None),
"torchtext_version": machine_info.get('torchtext_version', None),
"torchvision_version": machine_info.get('torchvision_version', None),
"python_version": machine_info['python_implementation_version'],
"machine_kernel": machine_info['release'],
"machine_processor": machine_info['processor'],
"machine_hostname": machine_info['node'],
"github_run_id": machine_info.get('github_run_id', None),
"torchbench_score_version": machine_info.get('torchbench_score_version', None),
}
stats_msg = {"stddev": b['stats']['stddev'],
"rounds": b['stats']['rounds'],
"min": b['stats']['min'],
"median": b['stats']['median'],
"max": b['stats']['max'],
"mean": b['stats']['mean'],
}
stats_msg.update(base_msg)
messages.append(self.format_message(stats_msg))
if 'data' in b['stats']:
for runtime in b['stats']['data'][:max_data_upload]:
runtime_msg = {"runtime": runtime}
runtime_msg.update(base_msg)
messages.append(self.format_message(runtime_msg))
self.upload(messages)
def post_torchbench_score(self, pytest_json, score):
machine_info = pytest_json['machine_info']
commit_info = pytest_json['commit_info']
upload_time = int(time.time())
scribe_message = {
"time": upload_time,
"benchmark_time": pytest_json['datetime'],
"git_repo": commit_info['project'],
"git_commit_id": commit_info['id'],
"git_branch": commit_info['branch'],
"git_commit_time": commit_info['time'],
"git_dirty": commit_info['dirty'],
"pytorch_version": machine_info.get('pytorch_version', None),
"torchtext_version": machine_info.get('torchtext_version', None),
"torchvision_version": machine_info.get('torchvision_version', None),
"python_version": machine_info['python_implementation_version'],
"machine_kernel": machine_info['release'],
"machine_processor": machine_info['processor'],
"machine_hostname": machine_info['node'],
"github_run_id": machine_info.get('github_run_id', None),
"torchbench_score_version": machine_info.get('torchbench_score_version', None),
"torchbench_score": score["score"]["total"],
"torchbench_score_jit_speedup": score["score"]["jit-speedup"],
"torchbench_subscore_cpu_train": score["score"]["subscore-cpu-train"],
"torchbench_subscore_cpu_infer": score["score"]["subscore-cpu-eval"],
"torchbench_subscore_gpu_train": score["score"]["subscore-cuda-train"],
"torchbench_subscore_gpu_infer": score["score"]["subscore-cuda-eval"],
}
m = self.format_message(scribe_message)
self.upload([m])
if __name__ == "__main__":
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("--pytest_bench_json", required=True,
type=argparse.FileType('r'),
help='Upload json data formatted by pytest-benchmark module')
parser.add_argument("--torchbench_score_file", required=True,
type=argparse.FileType('r'),
help="torchbench score file to include")
args = parser.parse_args()
# Result sanity check
json_name = os.path.basename(args.pytest_bench_json.name)
json_score = json.load(args.torchbench_score_file)
score_data = None
for data in json_score:
if os.path.basename(data["file"]) == json_name:
score_data = data
assert score_data, f"Can't find {json_name} score in {args.torchbench_score_file}. Stop."
benchmark_uploader = PytorchBenchmarkUploader()
json_data = json.load(args.pytest_bench_json)
benchmark_uploader.post_pytest_benchmarks(json_data)
benchmark_uploader.post_torchbench_score(json_data, score_data)
|
import os
import re
import requests
from bs4 import BeautifulSoup
from collections import defaultdict
from datetime import datetime, date, timedelta
from pathlib import Path
torch_wheel_cuda_version = "cu113"
torch_wheel_python_version = "cp38"
torch_wheel_platform = "linux_x86_64"
torch_wheel_nightly_base = f"https://download.pytorch.org/whl/nightly/{torch_wheel_cuda_version}/"
torch_nightly_wheel_index = f"https://download.pytorch.org/whl/nightly/{torch_wheel_cuda_version}/torch_nightly.html"
def memoize(function):
"""
"""
call_cache = {}
def memoized_function(*f_args):
if f_args in call_cache:
return call_cache[f_args]
call_cache[f_args] = result = function(*f_args)
return result
return memoized_function
@memoize
def get_wheel_index_data(py_version, platform_version, url=torch_nightly_wheel_index):
"""
"""
r = requests.get(url)
r.raise_for_status()
soup = BeautifulSoup(r.text, 'html.parser')
links = soup.find_all('a')
data = defaultdict(dict)
for link in soup.find_all('a'):
pkg, version, py, py_m, platform = re.search("([a-z_]*)-(.*)-(.*)-(.*)-(.*)\.whl", link.text).groups()
if py == py_version and platform == platform_version:
full_url = os.path.join(torch_wheel_nightly_base, link.text)
data[pkg][version] = full_url
return data
def get_nightly_wheel_urls(packages:list, date:date,
py_version=torch_wheel_python_version, platform_version=torch_wheel_platform):
"""Gets urls to wheels for specified packages matching the date, py_version, platform_version
"""
date_str = f"{date.year}{date.month:02}{date.day:02}"
data = get_wheel_index_data(py_version, platform_version)
dbg_key = None
versions = []
for pkg in packages:
pkg_versions = data[pkg]
keys = [key for key in pkg_versions if date_str in key]
assert len(keys) <= 1, "Did not expect multiple versions matching a date"
if len(keys) == 0:
return None
if pkg == 'torch':
dbg_key = keys[0]
full_url = pkg_versions[keys[0]]
versions.append(full_url)
#print(f" \"{dbg_key}\" \\")
return tuple(versions)
def get_nightly_wheels_in_range(packages:list, start_date:date, end_date:date,
py_version=torch_wheel_python_version, platform_version=torch_wheel_platform):
rc = []
curr_date = start_date
while curr_date < end_date:
curr_wheels = get_nightly_wheel_urls(packages, curr_date,
py_version=py_version,
platform_version=platform_version)
if curr_wheels is not None:
rc.append(curr_wheels)
curr_date += timedelta(days=1)
return rc
def get_n_prior_nightly_wheels(packages:list, n:int,
py_version=torch_wheel_python_version, platform_version=torch_wheel_platform):
end_date = date.today()
start_date = end_date - timedelta(days=n)
return get_nightly_wheels_in_range(packages, start_date, end_date,
py_version=py_version, platform_version=platform_version)
def create_requirements_files(root: Path, packages: list, start_date: date, end_date: date,
py_version=torch_wheel_python_version, platform_version=torch_wheel_platform):
root = Path(root)
curr_date = start_date
while curr_date < end_date:
curr_wheels = get_nightly_wheel_urls(packages, curr_date,
py_version=py_version,
platform_version=platform_version)
if curr_wheels is not None:
filename = root / f"requirements-{str(curr_date)}.txt"
with open(filename, 'w') as f:
for pkg in curr_wheels:
f.write(pkg + '\n')
curr_date += timedelta(days=1)
def parse_date_str(s: str):
return datetime.strptime(s, '%Y%m%d').date()
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('action', choices=['create_requirements'])
parser.add_argument('--start_date', type=parse_date_str)
parser.add_argument('--end_date', default=date.today(),
type=parse_date_str)
parser.add_argument('--packages', nargs='+', default=['torch', 'torchvision', 'torchtext'])
parser.add_argument('--output_dir')
args = parser.parse_args()
if args.action == 'create_requirements':
assert args.start_date is not None
assert args.end_date is not None
assert args.output_dir is not None
assert not os.path.exists(args.output_dir), "provide non-existing output dir"
os.mkdir(args.output_dir)
create_requirements_files(args.output_dir, args.packages, args.start_date, args.end_date)
|
import argparse
import pathlib
import yaml
CORE_MODEL_PATH = pathlib.Path(__file__).parent.parent.absolute().joinpath("torchbenchmark", "models")
def get_model_list():
models = list(map(lambda x: x.name, filter(lambda x: x.is_dir(), CORE_MODEL_PATH.iterdir())))
return models
def check_csv_file(csv_file, known_models):
optimial_bsizes = {}
with open(csv_file, "r") as cf:
opt_csv = cf.readlines()
for line in opt_csv:
model, bsize = line.split(",")
bsize = int(bsize)
if not bsize == 0:
optimial_bsizes[model] = bsize
assert model in known_models, f"Model {model} is not covered in TorchBench core model list."
return optimial_bsizes
def update_model_optimal_bsizes(device, new_bsize, known_models):
# get the metadata of exist model
for model in new_bsize:
metadata_path = CORE_MODEL_PATH.joinpath(model).joinpath("metadata.yaml")
with open(metadata_path, "r") as mp:
metadata = yaml.safe_load(mp)
if not "devices" in metadata or device not in metadata["devices"]:
metadata["devices"] = {}
metadata["devices"][device] = {}
metadata["devices"][device]["eval_batch_size"] = new_bsize[model]
with open(metadata_path, "w") as mp:
yaml.safe_dump(metadata, mp)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--device", required=True, type=str, help="Name of the device")
parser.add_argument("--optimal-bsize-csv", required=True, type=str, help="Optimal Batchsize CSV file")
args = parser.parse_args()
known_models = get_model_list()
new_bsize = check_csv_file(args.optimal_bsize_csv, known_models)
update_model_optimal_bsizes(args.device, new_bsize, known_models)
|
"""
Dump the contents of a pytest benchmark .json file.
"""
import argparse
import json
from tabulate import tabulate
def print_benchmark_stats(data):
print_stats = ['min', 'max', 'mean', 'stddev', 'rounds', 'median']
headers = ['name'] + print_stats
rows = []
for benchmark in data['benchmarks']:
row = [benchmark['name']]
row += [benchmark['stats'][k] for k in print_stats]
rows.append(row)
print(tabulate(rows, headers=headers))
print()
def print_kv_table(table_name, data):
headers = [table_name, '']
rows = [(k, data[k]) for k in data]
print(tabulate(rows, headers=headers))
print()
def print_other_info(data):
print_kv_table('Machine Info', data['machine_info'])
print_kv_table('Commit Info', data['commit_info'])
if __name__ == "__main__":
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("json_file")
parser.add_argument("--table", default="benchmarks",
choices=['benchmarks', 'other'],
help="which section of the json file to tablify")
args = parser.parse_args()
with open(args.json_file) as f:
data = json.load(f)
if args.table == 'benchmarks':
print_benchmark_stats(data)
elif args.table == 'other':
print_other_info(data) |
import pathlib
import torch
from typing import Optional, List, Tuple
from torchbenchmark import ModelTask
import os
import sys
import time
import numpy
from components.model_analyzer.TorchBenchAnalyzer import ModelAnalyzer
from run_sweep import WORKER_TIMEOUT, WARMUP_ROUNDS, ModelTestResult, NANOSECONDS_PER_MILLISECONDS
def run_one_step_flops(func, device: str, nwarmup=WARMUP_ROUNDS, num_iter=10, flops=True) -> Tuple[float, float, Optional[Tuple[torch.Tensor]]]:
"Run one step of the model, and return the latency in milliseconds."
# Warm-up `nwarmup` rounds
for _i in range(nwarmup):
func()
result_summary = []
if flops:
model_analyzer = ModelAnalyzer()
model_analyzer.start_monitor()
model_analyzer.set_monitoring_interval(0.01)
for _i in range(num_iter):
if device == "cuda":
torch.cuda.synchronize()
# Collect time_ns() instead of time() which does not provide better precision than 1
# second according to https://docs.python.org/3/library/time.html#time.time.
t0 = time.time_ns()
func()
torch.cuda.synchronize() # Wait for the events to be recorded!
t1 = time.time_ns()
else:
t0 = time.time_ns()
func()
t1 = time.time_ns()
result_summary.append((t1 - t0) / NANOSECONDS_PER_MILLISECONDS)
if flops:
model_analyzer.stop_monitor()
model_analyzer.aggregate()
tflops = model_analyzer.calculate_flops()
wall_latency = numpy.median(result_summary)
return (wall_latency, tflops)
def _run_model_test_proper_bs(model_path: pathlib.Path, test: str, device: str, jit: bool, batch_size: Optional[int], extra_args: List[str]) -> ModelTestResult:
assert test == "train" or test == "eval", f"Test must be either 'train' or 'eval', but get {test}."
result = ModelTestResult(name=model_path.name, test=test, device=device, extra_args=extra_args, batch_size=None, precision="fp32",
status="OK", results={})
# Run the benchmark test in a separate process
print(f"Running model {model_path.name} ... ", flush=True)
status: str = "OK"
bs_name = "batch_size"
correctness_name = "correctness"
error_message: Optional[str] = None
result.results['details'] = []
task = ModelTask(os.path.basename(model_path), timeout=WORKER_TIMEOUT)
MAX_EXP = 30
for batch_size_exp in range(MAX_EXP):
batch_size = 2 ** batch_size_exp
try:
print(f"Batch Size {batch_size} ", end='')
latency_ms_cur = 0
if not task.model_details.exists:
status = "NotExist"
return
task.make_model_instance(test=test, device=device, jit=jit, batch_size=batch_size, extra_args=extra_args)
if task.get_model_attribute("ALLOW_CUSTOMIZE_BSIZE") == False:
raise ValueError(f"Model does not support tuning batch size")
result.precision = task.get_model_attribute("dargs", "precision")
# Check the batch size in the model matches the specified value
if batch_size and (not task.get_model_attribute(bs_name) == batch_size):
raise ValueError(f"User specify batch size {batch_size}, but model {result.name} runs with batch size {task.get_model_attribute(bs_name)}. Please report a bug.")
latency_ms_cur, tflops_cur = run_one_step_flops(task.invoke, device)
latency_ms_cur = latency_ms_cur / batch_size
result.results['details'].append({'batch_size': batch_size, "latency_ms": latency_ms_cur, "tflops": tflops_cur})
# if the model provides eager eval result, save it for cosine similarity
correctness = task.get_model_attribute(correctness_name)
if correctness is not None:
result.results[correctness_name] = correctness
except NotImplementedError as e:
status = "NotImplemented"
error_message = str(e)
except TypeError as e: # TypeError is raised when the model doesn't support variable batch sizes
status = "TypeError"
error_message = str(e)
except KeyboardInterrupt as e:
status = "UserInterrupted"
error_message = str(e)
except ValueError as e:
status = "ValueError"
error_message = str(e)
except Exception as e:
status = f"{type(e).__name__}"
error_message = str(e)
finally:
print(f"[ {status} ]")
result.status = status
if error_message:
result.results["error_message"] = error_message
if status == "UserInterrupted":
sys.exit(1)
if status != 'OK':
if result.results['details']:
result.results['optimal_tflops_bs'] = max(result.results['details'], key=lambda x:x['tflops'])['batch_size']
return result
# find the best case
result.results['optimal_tflops_bs'] = max(result.results['details'], key=lambda x:x['tflops'])['batch_size']
return result |
"""Scribe Uploader for Pytorch Benchmark V2 Data
Currently supports data in pytest-benchmark format but can be extended.
New fields can be added just by modifying the schema in this file, schema
checking is only here to encourage reusing existing fields and avoiding typos.
"""
import argparse
import time
import multiprocessing
import json
import os
import requests
import subprocess
from collections import defaultdict
TORCHBENCH_V2_SCORE_SCHEMA = [
'total',
'delta',
'cuda-train-overall',
'cuda-train-nlp',
'cuda-train-classification',
'cuda-train-segmentation',
'cuda-train-speech',
'cuda-train-recommendation',
'cuda-eval-overall',
'cuda-eval-nlp',
'cuda-eval-classification',
'cuda-eval-segmentation',
'cuda-eval-speech',
'cuda-eval-recommendation',
'cpu-train-overall',
'cpu-train-nlp',
'cpu-train-classification',
'cpu-train-segmentation',
'cpu-train-speech',
'cpu-train-recommendation',
'cpu-eval-overall',
'cpu-eval-nlp',
'cpu-eval-classification',
'cpu-eval-segmentation',
'cpu-eval-speech',
'cpu-eval-recommendation',
]
def decorate_torchbench_score_schema(schema):
return f"torchbench_score_{schema}"
class ScribeUploader:
def __init__(self, category):
self.category = category
def format_message(self, field_dict):
assert 'time' in field_dict, "Missing required Scribe field 'time'"
message = defaultdict(dict)
for field, value in field_dict.items():
if field in self.schema['normal']:
message['normal'][field] = str(value)
elif field in self.schema['int']:
message['int'][field] = int(value)
elif field in self.schema['float']:
message['float'][field] = float(value)
else:
raise ValueError("Field {} is not currently used, "
"be intentional about adding new fields".format(field))
return message
def _upload_intern(self, messages: list):
for m in messages:
json_str = json.dumps(m)
cmd = ['scribe_cat', self.category, json_str]
subprocess.run(cmd)
def upload(self, messages: list):
if os.environ.get('SCRIBE_INTERN'):
return self._upload_intern(messages)
access_token = os.environ.get("SCRIBE_GRAPHQL_ACCESS_TOKEN")
if not access_token:
raise ValueError("Can't find access token from environment variable")
url = "https://graph.facebook.com/scribe_logs"
r = requests.post(
url,
data={
"access_token": access_token,
"logs": json.dumps(
[
{
"category": self.category,
"message": json.dumps(message),
"line_escape": False,
}
for message in messages
]
),
},
)
print(r.text)
r.raise_for_status()
class PytorchBenchmarkUploader(ScribeUploader):
def __init__(self):
super().__init__('perfpipe_pytorch_benchmarks')
self.schema = {
'int': [
'time', 'rounds',
],
'normal': [
'benchmark_group', 'benchmark_name',
'benchmark_class', 'benchmark_time',
'git_repo', 'git_commit_id', 'git_branch',
'git_commit_time', 'git_dirty',
'pytorch_version', 'python_version',
'torchtext_version', 'torchvision_version',
'machine_kernel', 'machine_processor', 'machine_hostname',
'github_run_id', 'torchbench_score_version',
],
'float': [
'stddev', 'min', 'median', 'max', 'mean', 'runtime',
]
}
# Append the TorchBench score schema
self.schema['float'].extend(list(map(decorate_torchbench_score_schema, TORCHBENCH_V2_SCORE_SCHEMA)))
def post_pytest_benchmarks(self, pytest_json, max_data_upload=100):
machine_info = pytest_json['machine_info']
commit_info = pytest_json['commit_info']
upload_time = int(time.time())
messages = []
for b in pytest_json['benchmarks']:
base_msg = {
"time": upload_time,
"benchmark_group": b['group'],
"benchmark_name": b['name'],
"benchmark_class": b['fullname'],
"benchmark_time": pytest_json['datetime'],
"git_repo": commit_info['project'],
"git_commit_id": commit_info['id'],
"git_branch": commit_info['branch'],
"git_commit_time": commit_info['time'],
"git_dirty": commit_info['dirty'],
"pytorch_version": machine_info.get('pytorch_version', None),
"torchtext_version": machine_info.get('torchtext_version', None),
"torchvision_version": machine_info.get('torchvision_version', None),
"python_version": machine_info['python_implementation_version'],
"machine_kernel": machine_info['release'],
"machine_processor": machine_info['processor'],
"machine_hostname": machine_info['node'],
"github_run_id": machine_info.get('github_run_id', None),
"torchbench_score_version": machine_info.get('torchbench_score_version', None),
}
stats_msg = {"stddev": b['stats']['stddev'],
"rounds": b['stats']['rounds'],
"min": b['stats']['min'],
"median": b['stats']['median'],
"max": b['stats']['max'],
"mean": b['stats']['mean'],
}
stats_msg.update(base_msg)
messages.append(self.format_message(stats_msg))
if 'data' in b['stats']:
for runtime in b['stats']['data'][:max_data_upload]:
runtime_msg = {"runtime": runtime}
runtime_msg.update(base_msg)
messages.append(self.format_message(runtime_msg))
self.upload(messages)
def post_torchbench_score(self, pytest_json, score):
machine_info = pytest_json['machine_info']
commit_info = pytest_json['commit_info']
upload_time = int(time.time())
scribe_message = {
"time": upload_time,
"benchmark_time": pytest_json['datetime'],
"git_repo": commit_info['project'],
"git_commit_id": commit_info['id'],
"git_branch": commit_info['branch'],
"git_commit_time": commit_info['time'],
"git_dirty": commit_info['dirty'],
"pytorch_version": machine_info.get('pytorch_version', None),
"torchtext_version": machine_info.get('torchtext_version', None),
"torchvision_version": machine_info.get('torchvision_version', None),
"python_version": machine_info['python_implementation_version'],
"machine_kernel": machine_info['release'],
"machine_processor": machine_info['processor'],
"machine_hostname": machine_info['node'],
"github_run_id": machine_info.get('github_run_id', None),
"torchbench_score_version": machine_info.get('torchbench_score_version', None),
}
for s in TORCHBENCH_V2_SCORE_SCHEMA:
decorated_schema = decorate_torchbench_score_schema(s)
if s == "total" or s == "delta":
scribe_message[decorated_schema] = score["score"][s]
else:
scribe_message[decorated_schema] = score["score"]["domain"][s]
m = self.format_message(scribe_message)
self.upload([m])
if __name__ == "__main__":
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("--pytest_bench_json", required=True,
type=argparse.FileType('r'),
help='Upload json data formatted by pytest-benchmark module')
parser.add_argument("--torchbench_score_file", required=True,
type=argparse.FileType('r'),
help="torchbench score file to include")
args = parser.parse_args()
# Result sanity check
json_name = os.path.basename(args.pytest_bench_json.name)
json_score = json.load(args.torchbench_score_file)
score_data = None
for data in json_score:
if os.path.basename(data["file"]) == json_name:
score_data = data
assert score_data, f"Can't find {json_name} score in {args.torchbench_score_file}. Stop."
benchmark_uploader = PytorchBenchmarkUploader()
json_data = json.load(args.pytest_bench_json)
benchmark_uploader.post_pytest_benchmarks(json_data)
benchmark_uploader.post_torchbench_score(json_data, score_data)
|
"""Scribe Uploader for Pytorch Benchmark Data
Currently supports userbenchmark result json file.
"""
import argparse
import time
import json
import os
import requests
from collections import defaultdict
from datetime import datetime
def get_metrics_date_from_file(fname: str) -> str:
bname = os.path.basename(fname)
dt = datetime.strptime(bname, "metrics-%Y%m%d%H%M%S.json")
return dt.strftime("%Y-%m-%d")
class ScribeUploader:
def __init__(self, category):
self.category = category
def format_message(self, field_dict):
assert 'time' in field_dict, "Missing required Scribe field 'time'"
message = defaultdict(dict)
for field, value in field_dict.items():
if field in self.schema['normal']:
message['normal'][field] = str(value)
elif field in self.schema['int']:
message['int'][field] = int(value)
elif field in self.schema['float']:
message['float'][field] = float(value)
else:
raise ValueError("Field {} is not currently used, "
"be intentional about adding new fields".format(field))
return message
def upload(self, messages: list):
access_token = os.environ.get("TORCHBENCH_USERBENCHMARK_SCRIBE_GRAPHQL_ACCESS_TOKEN")
if not access_token:
raise ValueError("Can't find access token from environment variable")
url = "https://graph.facebook.com/scribe_logs"
r = requests.post(
url,
data={
"access_token": access_token,
"logs": json.dumps(
[
{
"category": self.category,
"message": json.dumps(message),
"line_escape": False,
}
for message in messages
]
),
},
)
print(r.text)
r.raise_for_status()
class TorchBenchUserbenchmarkUploader(ScribeUploader):
CLIENT_NAME = 'torchbench_userbenchmark_upload_scribe.py'
# We use the UNIX_USER field to store the name of the benchmark platform
UNIX_USER = None
SUBMISSION_GROUP_GUID = 'oss-ci'
def __init__(self, platform_name):
super().__init__('perfpipe_pytorch_user_benchmarks')
assert platform_name, f"We require non-empty platform_name from user."
self.UNIX_USER = f"torchbench_userbenchmark_{platform_name}_ci"
self.schema = {
'int': [
'time', # timestamp of upload
],
# string fields
'normal': [
'benchmark_date', # date of benchmark
'client_name', # name of upload client (logger)
'unix_user', # name of upload user
'submission_group_guid', # name of data batch (for debugging)
'pytorch_git_version', # pytorch version
'metric_id', # id of the metric (e.g., userbenchmark.nvfuser.nvfuser:autogen-42)
],
# float perf metrics go here
'float': [
'metric_value'
]
}
def get_metric_name(self, bm_name, metric_name):
return f"userbenchmark.{bm_name}.{metric_name}"
def post_userbenchmark_results(self, bm_time, bm_data):
messages = []
bm_name = bm_data["name"]
base_message = {
'time': int(time.time()),
'benchmark_date': bm_time,
'client_name': self.CLIENT_NAME,
'unix_user': self.UNIX_USER,
'submission_group_guid': self.SUBMISSION_GROUP_GUID,
'pytorch_git_version': bm_data["environ"]["pytorch_git_version"]
}
# construct message and upload
for metric in bm_data["metrics"]:
msg = base_message.copy()
metric_name = self.get_metric_name(bm_name, metric)
msg['metric_id'] = metric_name
msg['metric_value'] = bm_data['metrics'][metric]
formatted_msg = self.format_message(msg)
messages.append(formatted_msg)
# print(messages)
self.upload(messages)
def process_benchmark_json(userbenchmark_json):
# Result sanity check
json_name = os.path.basename(userbenchmark_json.name)
benchmark_time = get_metrics_date_from_file(json_name)
benchmark_data = json.load(userbenchmark_json)
return benchmark_time, benchmark_data
if __name__ == "__main__":
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("--userbenchmark_platform", required=True,
help='Name of the userbenchmark platform')
parser.add_argument("--userbenchmark_json", required=True,
type=argparse.FileType('r'),
help='Upload userbenchmark json data')
args = parser.parse_args()
benchmark_time, benchmark_data = process_benchmark_json(args.userbenchmark_json)
# use uploader
uploader = TorchBenchUserbenchmarkUploader(args.userbenchmark_platform)
uploader.post_userbenchmark_results(benchmark_time, benchmark_data)
|
import argparse
import json
import sys
from pathlib import Path
from datetime import datetime
REPO_ROOT = Path(__file__).parent.parent.parent.resolve()
class add_path():
def __init__(self, path):
self.path = path
def __enter__(self):
sys.path.insert(0, self.path)
def __exit__(self, exc_type, exc_value, traceback):
try:
sys.path.remove(self.path)
except ValueError:
pass
with add_path(str(REPO_ROOT)):
from utils.s3_utils import S3Client, USERBENCHMARK_S3_BUCKET, USERBENCHMARK_S3_OBJECT
from userbenchmark.utils import get_date_from_metrics, get_ub_name
def upload_s3(ub_name: str, platform_name: str, date_str: str, file_path: Path):
"""S3 path:
s3://ossci-metrics/torchbench_userbenchmark/<userbenchmark-name>/<platform-name>/<date>/metrics-<time>.json"""
s3client = S3Client(USERBENCHMARK_S3_BUCKET, USERBENCHMARK_S3_OBJECT)
prefix = f"{ub_name}/{platform_name}/{date_str}"
s3client.upload_file(prefix=prefix, file_path=file_path)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("--userbenchmark_platform", required=True,
help='Name of the userbenchmark platform')
parser.add_argument("--userbenchmark_json", required=True,
help='Upload userbenchmark json data')
args = parser.parse_args()
json_path = Path(args.userbenchmark_json)
assert json_path.exists(), f"Specified result json path {args.userbenchmark_json} does not exist."
date_str = get_date_from_metrics(json_path.stem)
ub_name = get_ub_name(args.userbenchmark_json)
upload_s3(ub_name, args.userbenchmark_platform, date_str, json_path)
|
"""
This script reads from a PyTorch benchmarking directory and generates a yaml file
that drives the bisector to run tests on the specified PyTorch commits.
This only works on V1 and later benchmark, V0 is not supported.
"""
import os
import re
import git
import json
import yaml
import argparse
import dataclasses
from pathlib import Path
# We will generate bisection config for tests with performance change > 7%
PERF_CHANGE_THRESHOLD = 7
# Assume the nightly branch commit message is in the following format
# Hash in the parentheses links to the commit on the master branch
NIGHTLY_COMMIT_MSG = "nightly release \((.*)\)"
# Timeout of the bisection job in hours
PERF_TEST_TIMEOUT_THRESHOLD = 120
GITHUB_ISSUE_TEMPLATE = """
TorchBench CI has detected a performance signal.
Base PyTorch version: {start_version}
Base PyTorch commit: {start}
Affected PyTorch version: {end_version}
Affected PyTorch commit: {end}
Affected Tests:
{test_details}
cc @xuzhao9
"""
@dataclasses.dataclass
class PyTorchVer:
version: str
commit: str
def exist_dir_path(string):
if os.path.isdir(string):
return string
else:
raise NotADirectoryError(string)
def get_pytorch_main_commit(pytorch_repo, nightly_commit):
repo = git.Repo(pytorch_repo)
msg = repo.commit(nightly_commit).message
# There are two possibilities of the hash `nightly_commit`:
# 1. The hash belongs to the nightly branch
# If so, the git commit message should match `NIGHTLY_COMMIT_MSG`
# 2. The hash belongs to the master/main branch
# We can directly use this hash in this case
nightly_commit_regex = re.compile(NIGHTLY_COMMIT_MSG)
search_result = nightly_commit_regex.search(msg)
if search_result:
return search_result.group(1)
# We now believe the commit now belongs to the master/main branch
# Unfortunately, there is no way to map a commit back to a branch with gitpython
return nightly_commit
def find_latest_nonempty_json(path):
json_files = list(filter(lambda x: x.endswith(".json"), os.listdir(path)))
json_files.sort(reverse=True)
for f in json_files:
# Return the first non-empty json file
json_path = os.path.join(path, f)
if os.path.exists(json_path) and os.stat(json_path).st_size:
return json_path
print(f"Can't find non-empty json files in path: {path}")
return None
def get_pytorch_version(pytorch_src_path, json_path):
with open(json_path, "r") as json_obj:
bm_result = json.load(json_obj)
nightly_git_version = bm_result["machine_info"]["pytorch_git_version"]
# get main git commit by git version
main_commit = get_pytorch_main_commit(pytorch_src_path, nightly_git_version)
pytorch_ver = PyTorchVer(version=bm_result["machine_info"]["pytorch_version"],
commit=main_commit)
return pytorch_ver
def get_workflow_id(workflow_dir):
prefix = "gh"
prefix_loc = workflow_dir.find(prefix)
return int(workflow_dir[prefix_loc + len(prefix):])
# Compare the tests and generate a list of tests whose perf change larger than threshold
def generate_bisection_tests(base, tip):
def get_test_stats(bm):
ret = {}
for benchmark in bm["benchmarks"]:
name = benchmark["name"]
ret[name] = benchmark["stats"]["mean"]
return ret
base_tests = get_test_stats(base)
tip_tests = get_test_stats(tip)
signals = []
signal_details = {}
for benchmark, tip_latency in tip_tests.items():
base_latency = base_tests.get(benchmark, None)
if base_latency is None:
# This benchmark is new or was failing, so there is no prior point
# of reference against which to compare.
continue
delta_percent = (tip_latency - base_latency) / base_latency * 100
if abs(delta_percent) >= PERF_CHANGE_THRESHOLD:
signals.append(benchmark)
signal_details[benchmark] = delta_percent
return (signals, signal_details)
def generate_bisection_config(base_file, tip_file, base_commit, tip_commit):
result = {}
with open(base_file, "r") as bf:
base = json.load(bf)
with open(tip_file, "r") as tf:
tip = json.load(tf)
result["start_version"] = base["machine_info"]["pytorch_version"]
result["start_git_version"] = base["machine_info"]["pytorch_git_version"]
result["start"] = base_commit
result["end_version"] = tip["machine_info"]["pytorch_version"]
result["end_git_version"] = tip["machine_info"]["pytorch_git_version"]
result["end"] = tip_commit
result["threshold"] = PERF_CHANGE_THRESHOLD
result["direction"] = "both"
result["timeout"] = PERF_TEST_TIMEOUT_THRESHOLD
(result["tests"], result["details"]) = generate_bisection_tests(base, tip)
if not result["tests"]:
del result["tests"]
del result["details"]
return result
def generate_gh_issue(ghi_fpath, result):
ghi_config = result
ghi_config["test_details"] = ""
for test, delta in result["details"].items():
sign = "+" if delta > 0 else ""
ghi_config["test_details"] += f"- {test}: {sign}{delta:.5f}%\n"
ghi_body = GITHUB_ISSUE_TEMPLATE.format(**ghi_config)
with open(ghi_fpath, "w") as f:
f.write(ghi_body)
# Setup TORCHBENCH_PERF_SIGNAL to enable follow-up steps
def setup_gh_env(affected_pytorch_version):
fname = os.environ["GITHUB_ENV"]
content = f"TORCHBENCH_PERF_SIGNAL='{affected_pytorch_version}'\n"
with open(fname, 'a') as fo:
fo.write(content)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("--pytorch-dir",
required=True,
help="PyTorch source directory",
type=exist_dir_path)
parser.add_argument("--benchmark-dir",
required=True,
help="PyTorch benchmark result directory",
type=exist_dir_path)
parser.add_argument("--out",
required=True,
help="Result output file")
parser.add_argument("--github-issue",
help="Setup environment variables and GitHub Issue file for GitHub Actions")
args = parser.parse_args()
# input directory
input_dir = Path(args.benchmark_dir)
tip_json_file = find_latest_nonempty_json(input_dir)
assert tip_json_file, "The input benchmark directory must contain a non-empty json file!"
tip_version = get_pytorch_version(args.pytorch_dir, tip_json_file)
parent_dir = input_dir.parent
base_benchmark_dirs = list(filter(lambda x: get_workflow_id(x) < get_workflow_id(os.path.basename(input_dir)),
os.listdir(parent_dir)))
# Search from the latest to the earliest
base_benchmark_dirs.sort(reverse=True)
base_benchmark_paths = [ os.path.join(parent_dir, name) for name in base_benchmark_dirs if os.path.isdir(os.path.join(parent_dir, name)) ]
result = {}
# Use the latest benchmark result with a different version than tip
for bm in base_benchmark_paths:
json_file = find_latest_nonempty_json(bm)
if json_file:
base_version = get_pytorch_version(args.pytorch_dir, json_file)
if base_version.commit != tip_version.commit:
result = generate_bisection_config(json_file, tip_json_file, base_version.commit, tip_version.commit)
break
with open(args.out, "w") as fo:
yaml.dump(result, fo)
# If there is at least one regressing test, setup the Bisection GitHub Action workflow
if args.github_issue and "tests" in result:
setup_gh_env(result["end_version"])
generate_gh_issue(args.github_issue, result)
|
import os
import re
import tabulate
import argparse
MAGIC_PREFIX = "STABLE_TEST_MODEL: "
THRESHOLD = 7
def _parse_pr_body(body):
magic_lines = list(filter(lambda x: MAGIC_PREFIX == x[:len(MAGIC_PREFIX)], body.splitlines()))
if len(magic_lines):
return magic_lines[-1][len(MAGIC_PREFIX):].strip()
def _parse_repeated_test_log(log, csv):
repeated_test_result = []
regex_keys = ["gpu", "cpu_dispatch", "cpu_total"]
regex_dict = {
"gpu": re.compile('GPU Time:\s*([0-9.]+) milliseconds'),
"cpu_dispatch": re.compile('CPU Dispatch Time:\s*([0-9.]+) milliseconds'),
"cpu_total": re.compile('CPU Total Wall Time:\s*([0-9.]+) milliseconds')
}
for line in log.splitlines():
matches = list(map(lambda x: None if not regex_dict[x].search(line) else regex_dict[x].search(line).groups(), regex_keys))
for x in range(len(matches)):
if matches[x]:
if x == 0:
repeated_test_result.append({})
repeated_test_result[-1][regex_keys[x]] = float(matches[x][0])
print(_visualize_repeated_test_result(regex_keys, repeated_test_result, csv))
cpu_total_times = list(map(lambda x: x["cpu_total"], repeated_test_result))
return cpu_total_times
def _visualize_repeated_test_result(keys, result, csv):
output = [["Run Number", "GPU Time", "CPU Dispatch Time", "Wall Time"]]
for index, res in enumerate(result):
r = [index]
for k in keys:
r.append(str(res[k]))
output.append(r)
if not csv:
return tabulate.tabulate(output, headers='firstrow')
else:
return "\n".join(map(lambda x: ",".join(x), output))
def _is_stable(total_times):
return ((max(total_times) - min(total_times)) / min(total_times) * 100) <= THRESHOLD
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--pr-body", type=argparse.FileType("r"))
parser.add_argument("--log", type=argparse.FileType("r"))
parser.add_argument("--csv", action='store_true')
args = parser.parse_args()
if args.pr_body:
body = args.pr_body.read()
model = _parse_pr_body(body)
print(model)
if args.log:
log = args.log.read()
cpu_total_times = _parse_repeated_test_log(log, args.csv)
if not _is_stable(cpu_total_times):
print("GPU stability test failed. Please fix the model code and re-run the test.")
exit(1)
|
"""
This script runs userbenchmarks abtest upon two PyTorch versions.
"""
import argparse
import os
import subprocess
import shutil
import sys
import json
from pathlib import Path
from bmutils import REPO_ROOT, add_path
from typing import Dict, Optional
with add_path(REPO_ROOT):
import torchbenchmark.util.gitutils as gitutils
from userbenchmark import list_userbenchmarks
from utils.cuda_utils import prepare_cuda_env, DEFAULT_CUDA_VERSION
USERBENCHMARK_OUTPUT_PATH = os.path.join(REPO_ROOT, ".userbenchmark")
# only preserve the first 10 chars of the git hash
GIT_HASH_LEN = 10
def cleanup():
print("Cleaning up torch packages...", end="", flush=True)
CLEANUP_ROUND = 5
# Clean up multiple times to make sure the packages are all uninstalled
for _ in range(CLEANUP_ROUND):
command = ["pip", "uninstall", "-y", "torch"]
subprocess.check_call(command, shell=False)
print("done")
def run_commit(repo_path: str, env: os._Environ, commit: str, bm_name: str, skip_build: bool=False) -> Path:
"Run the userbenchmark on the commit. Return the metrics output file path."
# build the pytorch commit if required
if not skip_build:
cleanup()
build_pytorch_commit(repo_path, commit, cuda_env=env)
# run_benchmark
return run_benchmark(bm_name, cuda_env=env)
def validate_benchmark_output(bm_output: Path, bm_name: str):
with open(bm_output, "r") as bmobj:
output = json.load(bmobj)
assert output["name"] == bm_name, f"Expected benchmark name {bm_name}, getting {output['name']}."
assert "environ" in output and "pytorch_git_version" in output["environ"], \
f"Missing pytorch git version in {bm_output}."
assert "metrics" in output, f"Missing definition of metrics in {bm_output}."
def run_benchmark(bm_name: str, cuda_env: os._Environ) -> Path:
def find_latest_output(p: str) -> Optional[Path]:
if not os.path.exists(p) or not os.path.isdir(p):
return None
json_files = [ os.path.join(p, jf) for jf in sorted(os.listdir(p)) if jf.endswith(".json") ]
if len(json_files) == 0:
return None
return json_files[-1]
command = [sys.executable, "run_benchmark.py", bm_name]
try:
subprocess.check_call(command, env=cuda_env, cwd=REPO_ROOT, shell=False)
except subprocess.CalledProcessError as e:
print(f"Failed to call userbenchmark {command}. Error: {e}")
sys.exit(1)
output_path = os.path.join(USERBENCHMARK_OUTPUT_PATH, bm_name)
output_file = find_latest_output(output_path)
if not output_file:
print(f"Benchmark {bm_name} didn't print any output. Exit.")
sys.exit(1)
validate_benchmark_output(output_file, bm_name)
return output_file
def setup_build_env(env) -> Dict[str, str]:
env["USE_CUDA"] = "1"
env["BUILD_CAFFE2_OPS"] = "0"
# Do not build the test
env["BUILD_TEST"] = "0"
env["USE_MKLDNN"] = "1"
env["USE_MKL"] = "1"
env["USE_CUDNN"] = "1"
env["CMAKE_PREFIX_PATH"] = env["CONDA_PREFIX"]
return env
def build_pytorch_commit(repo_path: str, commit: str, cuda_env: os._Environ):
# checkout pytorch commit
print(f"Checking out pytorch commit {commit} ...", end="", flush=True)
if not gitutils.checkout_git_commit(repo_path, commit):
sys.exit(1)
print("done.")
# build pytorch
print(f"Building pytorch commit {commit} ...", end="", flush=True)
# Check if version.py exists, if it does, remove it.
# This is to force pytorch update the version.py file upon incremental compilation
version_py_path = os.path.join(repo_path, "torch/version.py")
if os.path.exists(version_py_path):
os.remove(version_py_path)
try:
# some packages are not included in the wheel, so use `develop`, not `install`
command = ["python", "setup.py", "develop"]
# setup environment variables
build_env = setup_build_env(cuda_env)
subprocess.check_call(command, cwd=repo_path, env=build_env, shell=False)
except subprocess.CalledProcessError:
# If failed, remove the build directory, then try again
build_path = os.path.join(repo_path, "build")
if os.path.exists(build_path):
shutil.rmtree(build_path)
subprocess.check_call(command, cwd=repo_path, env=build_env, shell=False)
finally:
command_testbuild = ["python", "-c", "'import torch'"]
subprocess.check_call(command_testbuild, cwd=os.environ["HOME"], env=build_env, shell=False)
print("done")
def process_test_result(result_a: Path, result_b: Path, output_dir: str) -> str:
def validate_results(a, b) -> bool:
metrics = a["metrics"].keys()
return sorted(metrics) == sorted(b["metrics"])
# check two results are different files
assert not result_a == result_b, f"Path {result_a} and {result_b} are the same. Exit."
# validate results
with open(result_a, "r") as fa:
a = json.load(fa)
with open(result_b, "r") as fb:
b = json.load(fb)
assert validate_results(a, b), f"Result validation failed for {result_a} and {result_b}."
# print result in csv format
header = ["Metric", a["environ"]["pytorch_git_version"][:GIT_HASH_LEN], b["environ"]["pytorch_git_version"][:GIT_HASH_LEN]]
out = [header]
metrics = a["metrics"].keys()
for m in sorted(metrics):
val = [m]
val.append(a["metrics"][m])
val.append(b["metrics"][m])
out.append(val)
out = "\n".join([";".join(map(lambda y: str(y), x)) for x in out])
os.makedirs(output_dir, exist_ok=True)
with open(os.path.join(output_dir, "control.json"), "w") as fcontrol:
json.dump(a, fcontrol, indent=4)
with open(os.path.join(output_dir, "treatment.json"), "w") as ftreatment:
json.dump(b, ftreatment, indent=4)
with open(os.path.join(output_dir, "result.csv"), "w") as fout:
fout.write(out + "\n")
return out
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--pytorch-repo", required=True, type=str, help="PyTorch repo path")
parser.add_argument("--base", required=True, type=str, help="PyTorch base commit")
parser.add_argument("--head", required=True, type=str, help="PyTorch head commit")
parser.add_argument("--userbenchmark", required=True, type=str, help="Name of the userbenchmark to run")
parser.add_argument("--output-dir", required=True, type=str, help="Output dir path")
parser.add_argument("--skip-build", action="store_true", help="Skip PyTorch build")
args = parser.parse_args()
# sanity checks
assert args.userbenchmark in list_userbenchmarks(), f"Available userbenchmark list: {list_userbenchmarks()}, " \
f"but you specified {args.userbenchmark}."
if not args.skip_build:
assert Path(args.pytorch_repo).is_dir(), f"Specified PyTorch repo dir {args.pytorch_repo} doesn't exist."
commits = gitutils.get_git_commits(args.pytorch_repo, args.base, args.head)
assert commits, f"Can't find git commit {args.base} or {args.head} in repo {args.pytorch_repo}"
# setup cuda environment
cuda_env = prepare_cuda_env(cuda_version=DEFAULT_CUDA_VERSION)
result_a = run_commit(args.pytorch_repo, cuda_env, args.base, args.userbenchmark, args.skip_build)
result_b = run_commit(args.pytorch_repo, cuda_env, args.head, args.userbenchmark, args.skip_build)
compare_result = process_test_result(result_a, result_b, args.output_dir)
print(compare_result)
|
"""
Script that runs torchbench with a benchmarking config.
The configs are located within the configs/ directory.
For example, the default config we use is `torchdynamo/eager-overhead`
"""
import re
import sys
import os
import yaml
import argparse
import subprocess
import itertools
from dataclasses import dataclass
from pathlib import Path
from typing import List, Optional
from bmutils import add_path
from bmutils.summarize import analyze_result
REPO_DIR = str(Path(__file__).parent.parent.parent.resolve())
with add_path(REPO_DIR):
from torchbenchmark import _list_model_paths
from utils.cuda_utils import prepare_cuda_env, install_pytorch_nightly
@dataclass
class BenchmarkModelConfig:
models: Optional[List[str]]
device: str
test: str
batch_size: Optional[int]
cuda_version: Optional[str]
args: List[str]
rewritten_option: str
def rewrite_option(option: List[str]) -> str:
out = []
for x in option:
out.append(x.replace("--", ""))
if option == ['']:
return "eager"
else:
return "-".join(out)
def get_models(config) -> Optional[str]:
# if the config doesn't specify the 'models' key,
# returns None (means running all models)
if not "models" in config:
return None
# get list of models
models = list(map(lambda x: os.path.basename(x), _list_model_paths()))
enabled_models = []
for model_pattern in config["models"]:
r = re.compile(model_pattern)
matched_models = list(filter(lambda x: r.match(x), models))
enabled_models.extend(matched_models)
assert enabled_models, f"The model patterns you specified {config['models']} does not match any model. Please double check."
return enabled_models
def get_subrun_key(subrun_key):
return "-".join(subrun_key)
def get_cuda_versions(config):
if not "cuda_version" in config:
return [None]
return config["cuda_version"]
def get_tests(config):
if not "test" in config:
return ["train", "eval"]
return config["test"]
def get_devices(config):
if not "device" in config:
return ["cpu", "cuda"]
return config["device"]
def get_batch_sizes(config):
if not "batch_size" in config:
return [None]
return config["batch_size"]
def get_subrun(device, test, batch_size, cuda_version):
if not batch_size and not cuda_version:
return (device, test)
if not batch_size:
return (device, test, f"cuda_{cuda_version}")
if not cuda_version:
return (device, test, f"bs_{batch_size}")
return (device, test, f"bs_{batch_size}", f"cuda_{cuda_version}")
def parse_bmconfigs(repo_path: Path, config_name: str) -> List[BenchmarkModelConfig]:
if not config_name.endswith(".yaml"):
config_name += ".yaml"
config_file = repo_path.joinpath("configs").joinpath(*config_name.split("/"))
if not config_file.exists():
raise RuntimeError(f"Benchmark model config {config_file} does not exist.")
with open(config_file, "r") as cf:
config = yaml.safe_load(cf)
out = {}
models = get_models(config)
devices = get_devices(config)
tests = get_tests(config)
batch_sizes = get_batch_sizes(config)
cuda_versions = get_cuda_versions(config)
bm_matrix = [devices, tests, batch_sizes, cuda_versions]
for device, test, batch_size, cuda_version in itertools.product(*bm_matrix):
subrun = get_subrun(device, test, batch_size, cuda_version)
out[subrun] = []
for args in config["args"]:
out[subrun].append(BenchmarkModelConfig(models=models, device=device, test=test, \
batch_size=batch_size, cuda_version=cuda_version, args=args.split(" "), \
rewritten_option=rewrite_option(args.split(" "))))
return out
def prepare_bmconfig_env(config: BenchmarkModelConfig, repo_path: Path, dryrun=False):
"""Prepare the correct cuda version environment for the benchmarking."""
if not config.cuda_version:
return os.environ.copy()
cuda_version = config.cuda_version
new_env = prepare_cuda_env(cuda_version=cuda_version)
install_pytorch_nightly(cuda_version=cuda_version, env=new_env, dryrun=dryrun)
return new_env
def run_bmconfig(config: BenchmarkModelConfig, repo_path: Path, output_path: Path, dryrun=False):
run_env = prepare_bmconfig_env(config, repo_path=repo_path, dryrun=dryrun)
cmd = [sys.executable, "run_sweep.py", "-d", config.device, "-t", config.test]
if config.batch_size:
cmd.append("-b")
cmd.append(str(config.batch_size))
if config.models:
cmd.append("-m")
cmd.extend(config.models)
if config.args != ['']:
cmd.extend(config.args)
output_dir = output_path.joinpath("json")
output_dir.mkdir(exist_ok=True, parents=True)
cmd.extend(["-o", os.path.join(output_dir.absolute(), f"{config.rewritten_option}.json")])
print(f"Now running benchmark command: {cmd}.", flush=True)
if dryrun:
return
subprocess.check_call(cmd, cwd=repo_path, env=run_env)
def gen_output_csv(output_path: Path, base_key: str):
result = analyze_result(output_path.joinpath("json").absolute(), base_key=base_key)
with open(output_path.joinpath("summary.csv"), "w") as sw:
sw.write(result)
def check_env(bmconfigs):
"""Check that the machine has been properly setup to run the config."""
for subrun in total_run:
bmconfigs = total_run[subrun]
for bmconfig in bmconfigs:
if bmconfig.cuda_version:
cuda_path = Path("/").joinpath("usr", "local", f"cuda-{bmconfig.cuda_version}")
assert cuda_path.exists() and cuda_path.is_dir(), f"Expected CUDA path {str(cuda_path)} doesn't exist. Please report a bug."
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--config", "-c", required=True, help="Specify benchmark config to run.")
parser.add_argument("--benchmark-repo", "-b", required=True, help="Specify the pytorch/benchmark repository location.")
parser.add_argument("--output-dir", "-o", required=True, help="Specify the directory to save the outputs.")
parser.add_argument("--dryrun", action="store_true", help="Dry run the script and don't run the benchmark.")
args = parser.parse_args()
repo_path = Path(args.benchmark_repo)
assert repo_path.exists(), f"Path {args.benchmark_repo} doesn't exist. Exit."
output_path = Path(args.output_dir)
output_path.mkdir(exist_ok=True, parents=True)
total_run = parse_bmconfigs(repo_path, args.config)
assert len(total_run), "Size of the BenchmarkModel list must be larger than zero."
check_env(total_run)
for subrun in total_run:
subrun_key = get_subrun_key(subrun)
bmconfigs = total_run[subrun]
assert len(bmconfigs), f"Size of subrun {subrun} must be larger than zero."
subrun_path = output_path.joinpath(subrun_key)
subrun_path.mkdir(exist_ok=True, parents=True)
for bm in bmconfigs:
run_bmconfig(bm, repo_path, subrun_path, args.dryrun)
if not args.dryrun:
gen_output_csv(subrun_path, base_key=bmconfigs[0].rewritten_option)
|
import argparse
import os
import json
import yaml
from pathlib import Path
WORKFLOW_LINK_TEMPLATE = "https://github.com/pytorch/benchmark/actions/runs/"
def check_env(bisection_root: str):
"Check `bisection_root` contains bisection config file, github issue file, and result json."
# gh-issue.md exists
# result.json exists
bisection_path = Path(bisection_root)
assert os.environ["GITHUB_ENV"], f"GITHUB_ENV environment variable doesn't exist."
assert bisection_path.is_dir(), f"Specified bisection root {bisection_path} is not a directory."
assert bisection_path.joinpath("gh-issue.md").exists(), \
f"Bisection directory {bisection_path} doesn't contain file gh-issue.md."
assert bisection_path.joinpath("result.json").exists(), \
f"Bisection directory {bisection_path} doesn't contain file result.json."
assert bisection_path.joinpath("config.yaml").exists(), \
f"Bisection directory {bisection_path} doesn't contain file config.yaml."
def setup_gh_issue(bisection_root: str, gh_workflow_id: str):
bisection_path = Path(bisection_root)
json_path = bisection_path.joinpath("result.json")
with open(json_path, "r") as jp:
result = jp.read()
result = f"\nResult json: \n```\n{result}\n```"
workflow_str = f"\nBisection workflow link: {WORKFLOW_LINK_TEMPLATE}{gh_workflow_id}\n"
gh_issue_path = bisection_path.joinpath("gh-issue.md")
with open(gh_issue_path, "a") as ghi:
ghi.write(result)
ghi.write(workflow_str)
def set_env_if_nonempty(bisection_root: str):
bisection_path = Path(bisection_root)
json_path = bisection_path.joinpath("result.json")
with open(json_path, "r") as jp:
result = json.load(jp)
# if result is empty, no need to setup the env
if result["result"] == []:
return
yaml_path = bisection_path.joinpath("config.yaml")
with open(yaml_path, "r") as config_file:
config = yaml.safe_load(config_file)
affected_pytorch_version = config["end_version"]
fname = os.environ["GITHUB_ENV"]
content = f"TORCHBENCH_PERF_BISECTION_NONEMPTY_SIGNAL='{affected_pytorch_version}'\n"
with open(fname, 'a') as fo:
fo.write(content)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--bisection-root", required=True, help="Root directory of the bisection directory")
parser.add_argument("--gh-workflow-id", required=True, help="GitHub workflow id")
args = parser.parse_args()
check_env(args.bisection_root)
setup_gh_issue(args.bisection_root, args.gh_workflow_id)
set_env_if_nonempty(args.bisection_root)
|
import sys
from pathlib import Path
CURRENT_DIR = Path(__file__).parent
REPO_ROOT = str(CURRENT_DIR.parent.parent.parent)
class add_path():
def __init__(self, path):
self.path = path
def __enter__(self):
sys.path.insert(0, self.path)
def __exit__(self, exc_type, exc_value, traceback):
try:
sys.path.remove(self.path)
except ValueError:
pass |
import json
import os
import re
from pathlib import Path
import argparse
ATTRIBUTES = ["batch_size", "precision"]
def get_nonempty_json(d):
r = []
for f in filter(lambda x: x.endswith(".json"), os.listdir(d)):
fullpath = os.path.join(d, f)
if os.stat(fullpath).st_size:
r.append(fullpath)
return r
def process_json(result, f, base_key):
with open(f, "r") as jf:
tbo = json.load(jf)
key = Path(f).stem
for test in tbo:
k = (test["name"], test["test"], test["device"])
status = test["status"]
if k not in result:
result[k] = {}
if key == base_key:
result[k]["precision"] = test["precision"]
result[k]["batch_size"] = test["batch_size"]
result[k][key] = {}
result[k][key]["status"] = status
result[k][key]["results"] = test["results"]
def insert_if_nonexist(arr, k, loc=None):
if k in arr:
return
if loc == None:
arr.append(k)
return
arr.insert(loc, k)
# Result header
# Model (<test>, <device>); <base arg>; <arg1>; <arg2>; ...; <argn>
def generate_header(result, base_key):
header = []
args = []
test = list(result.keys())[0][1]
device = list(result.keys())[0][2]
base_arg = None
for t in result:
assert t[1] == test, f"Both {t[1]} and {test} exist in result, can't analyze."
assert t[2] == device, f"Both {t[2]} and {device} exist in result, can't analyze."
result_keys = result[t].keys()
for k in filter(lambda x: not x in ATTRIBUTES, result_keys):
if k == base_key:
insert_if_nonexist(args, f"{k} (latency)", loc=0)
else:
insert_if_nonexist(args, f"{k} (correctness)")
insert_if_nonexist(args, f"{k} (latency)")
insert_if_nonexist(args, f"{k} (speedup)")
header.append(f"Model ({test}, {device})")
header.append(f"precision")
header.append(f"batch size")
header.extend(args)
return header
def split_header(header):
regex = "(.*) \(([a-z]+)\)"
g = re.match(regex, header).groups()
return (g[0], g[1])
def is_ok(r):
return r["status"] == "OK"
def find_result_by_header(r, header, base_arg):
# tp: correct, latency, or speedup
args, tp = header
if tp == "correctness":
if is_ok(r[args]) and "correctness" in r[args]["results"]:
return r[args]["results"]["correctness"]
else:
return "N/A"
elif tp == "latency":
if is_ok(r[args]):
return round(r[args]["results"]["latency_ms"], 3)
else:
return r[args]["status"]
elif tp == "speedup":
if is_ok(r[base_arg]) and is_ok(r[args]):
return round(r[base_arg]["results"]["latency_ms"] / r[args]["results"]["latency_ms"], 3)
else:
return "N/A"
else:
assert False, f"Found unknown type {tp}"
# Dump the result to csv, so that can be used in Google Sheets
def dump_result(result, header, base_key):
s = [";".join(header) + "\n"]
# sort models by their names in lowercase
for k in sorted(result, key=lambda x: x[0].lower()):
rt = [str(k[0]), str(result[k]["precision"]), str(result[k]["batch_size"])]
for h in header[3:]:
rt.append(str(find_result_by_header(result[k], split_header(h), base_key)))
s.append(";".join(rt) + "\n")
return "".join(s)
def analyze_result(result_dir: str, base_key: str) -> str:
files = get_nonempty_json(result_dir)
# make sure the baseline file exists
file_keys = list(map(lambda x: Path(x).stem, files))
assert base_key in file_keys, f"Baseline key {base_key} is not found in all files: {file_keys}."
result = {}
for f in files:
process_json(result, f, base_key)
header = generate_header(result, base_key)
s = dump_result(result, header, base_key)
return s
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-r", "--result-dir", required=True, help="Specify the result directory")
parser.add_argument("-b", "--base-key", default="eager", help="Specify the baseline key")
args = parser.parse_args()
s = analyze_result(args.result_dir, args.base_key)
print(s)
|
import argparse
import sys
import subprocess
from pathlib import Path
from aicluster import run_aicluster_benchmark
REPO_ROOT = Path(__file__).parent.parent.parent.parent.resolve()
class add_path():
def __init__(self, path):
self.path = path
def __enter__(self):
sys.path.insert(0, self.path)
def __exit__(self, exc_type, exc_value, traceback):
try:
sys.path.remove(self.path)
except ValueError:
pass
with add_path(str(REPO_ROOT)):
from userbenchmark import get_userbenchmarks_by_platform
def run_userbenchmark(ub_name, dryrun=True):
workdir = REPO_ROOT
command = [sys.executable, "run_benchmark.py", ub_name]
print(f"Running user benchmark command: {command}")
if not dryrun:
subprocess.check_call(command, cwd=workdir)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--platform", choices=["gcp_a100", "aws_t4_metal", "ai_cluster"], required=True, help="specify the benchmark platform.")
parser.add_argument("--dryrun", action="store_true", help="only dry run the command.")
args = parser.parse_args()
benchmarks = get_userbenchmarks_by_platform(args.platform)
if args.platform == "ai_cluster":
assert not args.dryrun, "AICluster workflow doesn't support dryrun."
for ub in benchmarks:
run_aicluster_benchmark(ub, check_success=True, upload_scribe=True)
else:
for ub in benchmarks:
run_userbenchmark(ub_name=ub, dryrun=args.dryrun)
|
"""
The script to upload TorchBench CI result from S3 to Scribe (Internal).
To run this script, users need to set two environment variables:
- AWS_ACCESS_KEY_ID
- AWS_SECRET_ACCESS_KEY
It assumes the following hierarchy of the result directory:
torchbench-aicluster-metrics/
|-distributed
|-metrics-20220805192500.json
"""
import argparse
import boto3
import datetime
import os
import subprocess
import sys
import yaml
from pathlib import Path
AICLUSTER_S3_BUCKET = "ossci-metrics"
AICLUSTER_S3_OBJECT = "torchbench-aicluster-metrics"
INDEX_FILE_NAME = "index.yaml"
REPO_ROOT = Path(__file__).parent.parent.parent.parent.resolve()
class add_path():
def __init__(self, path):
self.path = path
def __enter__(self):
sys.path.insert(0, self.path)
def __exit__(self, exc_type, exc_value, traceback):
try:
sys.path.remove(self.path)
except ValueError:
pass
with add_path(str(REPO_ROOT)):
from scripts.userbenchmark.upload_scribe import TorchBenchUserbenchmarkUploader, process_benchmark_json
class S3Client:
def __init__(self, bucket=AICLUSTER_S3_BUCKET, object=AICLUSTER_S3_OBJECT):
self.s3 = boto3.client('s3')
self.bucket = bucket
self.object = object
def download_file(self, key, dest_dir):
filename = S3Client.get_filename_from_key(key)
assert filename, f"Expected non-empty filename from key {key}."
with open(os.path.join(dest_dir, filename), 'wb') as f:
self.s3.download_fileobj(self.bucket, key, f)
def upload_file(self, prefix, file_path):
file_name = file_path.name
s3_key = f"{self.object}/{prefix}/{file_name}" if prefix else f"{self.object}/{file_name}"
response = self.s3.upload_file(str(file_path), self.bucket, s3_key)
print(f"S3 client response: {response}")
def exists(self, prefix, file_name):
"""Test if the key object/prefix/file_name exists in the S3 bucket.
If True, return the S3 object key. Return None otherwise. """
s3_key = f"{self.object}/{prefix}/{file_name}" if prefix else f"{self.object}/{file_name}"
result = self.s3.list_objects_v2(Bucket=self.bucket, Prefix=s3_key)
if 'Contents' in result:
return s3_key
return None
def list_directory(self, directory=None):
"""List the directory files in the S3 bucket path.
If the directory doesn't exist, report an error. """
prefix = f"{self.object}/{directory}/" if directory else f"{self.object}/"
pages = self.s3.get_paginator("list_objects").paginate(Bucket=self.bucket, Prefix=prefix)
keys = filter(lambda x: not x == prefix, [e['Key'] for p in pages for e in p['Contents']])
return list(keys)
def get_filename_from_key(object_key):
filename = object_key.split('/')[-1]
return filename
def determine_success_today(index, allow_yesterday=True):
"""
Determine whether today or yesterday's run is successful.
"""
# get today's date in UTC
today = datetime.datetime.utcnow().date()
today_str = f"metrics-{today.strftime('%Y%m%d')}"
yesterday = (today - datetime.timedelta(days=1))
yesterday_str = f"metrics-{yesterday.strftime('%Y%m%d')}"
for index_key in index:
# check if today or yesterday's date exists in the index
if today_str in index_key:
print(f"Found today run log: {index_key} ")
return True
if allow_yesterday and yesterday_str in index_key:
print(f"Found yesterday run log: {index_key} ")
return True
# not found, the last run probably failed
return False
def get_metrics_index(s3, benchmark_name, work_dir):
"""
1. Try to download the index file from S3,
2. if not found, create an initial one with the metrics files from S3 directory
3. Otherwise, compare the downloaded index file with the metrics file list, update the index file, and return
"""
def gen_index_obj(index_key):
"download and load the index file if exists, otherwise, return empty object."
if not index_key:
return {}
filename = S3Client.get_filename_from_key(index_key)
s3.download_file(index_key, work_dir)
with open(work_dir.joinpath(filename), "r") as index_f:
index = yaml.safe_load(index_f)
return index
def filter_metric_files(metric_files):
filtered_metrics = list(filter(lambda x: S3Client.get_filename_from_key(x) \
and S3Client.get_filename_from_key(x).startswith("metrics-") \
and x.endswith(".json"), \
s3.list_directory(directory=None)))
return filtered_metrics
def update_index_from_metrics(index, metric_files):
metric_filenames = list(map(lambda x: S3Client.get_filename_from_key(x), metric_files))
for metric_filename in metric_filenames:
if not metric_filename in index:
index[metric_filename] = {}
index[metric_filename]["uploaded-scribe"] = False
return index
index_key = s3.exists(prefix=benchmark_name, file_name=INDEX_FILE_NAME)
index = gen_index_obj(index_key)
metric_files = filter_metric_files(s3.list_directory(directory=None))
updated_index = update_index_from_metrics(index, metric_files)
return updated_index
def upload_metrics_to_scribe(s3, benchmark_name, index, work_dir):
"""
for each 'uploaded-scrbe: False' file in index
1. download it from S3
2. upload it to scribe
3. if success, update the index file with 'uploaded-scribe: True'
upload the updated index file to S3 after processing all files
"""
try:
for index_key in index:
assert "uploaded-scribe" in index[index_key], \
f"Index key {index_key} missing field uploaded-scribe!"
index_file_path = work_dir.joinpath(INDEX_FILE_NAME)
with open(index_file_path, "w") as index_file:
yaml.safe_dump(index, index_file)
need_upload_metrics = filter(lambda x: not index[x]["uploaded-scribe"], index.keys())
for upload_metrics in need_upload_metrics:
# download the metrics file from S3 to work_dir
print(f"Downloading metrics file {upload_metrics} to local.")
metrics_key = s3.exists(prefix=None, file_name=upload_metrics)
assert metrics_key, f"Expected metrics file {upload_metrics} does not exist."
s3.download_file(metrics_key, work_dir)
# upload it to scribe
print(f"Uploading metrics file {upload_metrics} to scribe.")
metrics_path = str(work_dir.joinpath(upload_metrics).resolve())
with open(metrics_path, "r") as mp:
benchmark_time, benchmark_data = process_benchmark_json(mp)
uploader = TorchBenchUserbenchmarkUploader(platform_name="ai_cluster")
# user who run the benchmark on ai cluster
uploader.UNIX_USER = "diegosarina"
uploader.SUBMISSION_GROUP_GUID = "ai_cluster"
uploader.post_userbenchmark_results(benchmark_time, benchmark_data)
# update the index file
index[upload_metrics]["uploaded-scribe"] = True
with open(index_file_path, "w") as index_file:
yaml.safe_dump(index, index_file)
except subprocess.SubprocessError:
print(f"Failed to upload the file to scribe.")
finally:
# upload the result index file to S3
s3.upload_file(prefix=benchmark_name, file_path=index_file_path)
def get_work_dir(benchmark_name):
workdir = Path(REPO_ROOT).joinpath(".userbenchmark").joinpath(benchmark_name).joinpath("logs")
workdir.mkdir(parents=True, exist_ok=True)
return workdir
def run_aicluster_benchmark(benchmark_name: str, check_success=True, upload_scribe=True):
work_dir = get_work_dir(benchmark_name)
print(f"Running benchmark {benchmark_name} on aicluster, work directory: {work_dir}")
s3 = S3Client()
# get the benchmark metrics index or create a new one
index = get_metrics_index(s3, benchmark_name, work_dir)
# if the previous run is not successful, exit immediately
if check_success and not determine_success_today(index):
assert False, f"Don't find the last successful run in index: { index }. Please report a bug."
# upload to scribe by the index
if upload_scribe:
upload_metrics_to_scribe(s3, benchmark_name, index, work_dir)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--benchmark", required=True, help="Name of the benchmark to run.")
parser.add_argument("--check-success", action="store_true", help="Determine whether checking the run is successful in the last two days.")
parser.add_argument("--upload-scribe", action="store_true", help="Update the result to Scribe.")
args = parser.parse_args()
run_aicluster_benchmark(benchmark_name=args.benchmark, check_success=args.check_success, upload_scribe=args.upload_scribe)
|
import yaml
from pathlib import Path
CURRENT_DIR = Path(__file__).parent
def list_userbenchmarks():
ub_dirs = [x for x in CURRENT_DIR.iterdir() if x.is_dir() and x.joinpath('__init__.py').exists() ]
ub_names = list(map(lambda x: x.name, ub_dirs))
return ub_names
def get_ci_from_ub(ub_name):
ci_file = CURRENT_DIR.joinpath(ub_name).joinpath("ci.yaml")
if not ci_file.exists():
return None
with open(ci_file, "r") as ciobj:
cicfg = yaml.safe_load(ciobj)
ret = {}
ret["name"] = ub_name
ret["ci_cfg"] = cicfg
return ret
def get_userbenchmarks_by_platform(platform):
ub_names = list_userbenchmarks()
cfgs = list(map(lambda x: x["name"], filter(lambda x: x and x["ci_cfg"]["platform"] == platform, map(get_ci_from_ub, ub_names))))
return cfgs
|
import os
import sys
from datetime import datetime, timedelta
import time
import json
from pathlib import Path
from dataclasses import dataclass
from typing import Any, Dict, List, Optional
REPO_PATH = Path(os.path.abspath(__file__)).parent.parent
USERBENCHMARK_OUTPUT_PREFIX = ".userbenchmark"
PLATFORMS = [
"gcp_a100",
"aws_t4_metal",
]
class add_path():
def __init__(self, path):
self.path = path
def __enter__(self):
sys.path.insert(0, self.path)
def __exit__(self, exc_type, exc_value, traceback):
try:
sys.path.remove(self.path)
except ValueError:
pass
with add_path(str(REPO_PATH)):
from utils.s3_utils import S3Client, USERBENCHMARK_S3_BUCKET, USERBENCHMARK_S3_OBJECT
@dataclass
class TorchBenchABTestMetric:
control: float
treatment: float
delta: float
@dataclass
class TorchBenchABTestResult:
control_env: Dict[str, str]
treatment_env: Dict[str, str]
bisection: Optional[str]
details: Dict[str, TorchBenchABTestMetric]
def get_output_json(bm_name, metrics) -> Dict[str, Any]:
import torch
return {
"name": bm_name,
"environ": {"pytorch_git_version": torch.version.git_version},
"metrics": metrics,
}
def dump_output(bm_name, output, target_dir: Path=None) -> None:
if target_dir is None:
target_dir = get_output_dir(bm_name)
fname = "metrics-{}.json".format(datetime.fromtimestamp(time.time()).strftime("%Y%m%d%H%M%S"))
full_fname = os.path.join(target_dir, fname)
with open(full_fname, 'w') as f:
json.dump(output, f, indent=4)
def get_date_from_metrics(metrics_file: str) -> str:
datetime_obj = datetime.strptime(metrics_file, "metrics-%Y%m%d%H%M%S")
return datetime.strftime(datetime_obj, "%Y-%m-%d")
def get_ub_name(metrics_file_path: str) -> str:
with open(metrics_file_path, "r") as mf:
metrics = json.load(mf)
return metrics["name"]
def get_output_dir(bm_name) -> Path:
current_dir = Path(os.path.dirname(os.path.abspath(__file__)))
target_dir = current_dir.parent.joinpath(USERBENCHMARK_OUTPUT_PREFIX, bm_name)
target_dir.mkdir(exist_ok=True, parents=True)
return target_dir
def get_date_from_metrics_s3_key(metrics_s3_key: str) -> datetime:
metrics_s3_json_filename = metrics_s3_key.split('/')[-1]
return datetime.strptime(metrics_s3_json_filename, 'metrics-%Y%m%d%H%M%S.json')
def get_latest_jsons_in_s3_from_last_n_days(bm_name: str, platform_name: str, date: datetime, ndays: int=7, limit: int=100) -> List[str]:
"""Retrieves the most recent n day metrics json filenames from S3 before the given date, inclusive of that date.
If fewer than n days are found, returns all found items without erroring, even if there were no items.
Returns maximum 100 results by default. """
s3 = S3Client(USERBENCHMARK_S3_BUCKET, USERBENCHMARK_S3_OBJECT)
directory = f'{bm_name}/{platform_name}'
if not s3.exists(None, directory):
return []
previous_json_files = []
current_date = date
while len(previous_json_files) < limit and current_date >= date - timedelta(days=ndays):
current_date_str = current_date.strftime('%Y-%m-%d')
current_directory = f'{directory}/{current_date_str}'
if s3.exists(None, current_directory):
files = s3.list_directory(current_directory)
metric_jsons = [f for f in files if f.endswith('.json') and 'metrics' in f]
metric_jsons.sort(key=lambda x: get_date_from_metrics_s3_key(x), reverse=True)
previous_json_files.extend(metric_jsons[:limit - len(previous_json_files)])
# Move on to the previous date.
current_date -= timedelta(days=1)
return previous_json_files
|
import itertools
import time
from datetime import datetime
from typing import List
import yaml
import json
import numpy as np
import argparse
from ..utils import REPO_PATH, add_path, get_output_dir, get_output_json, dump_output
with add_path(REPO_PATH):
from components._impl.workers.subprocess_rpc import UnserializableException, ChildTraceException
from torchbenchmark.util.experiment.instantiator import list_models, load_model_isolated, TorchBenchModelConfig
from torchbenchmark.util.experiment.metrics import TorchBenchModelMetrics, get_model_test_metrics
BM_NAME = "model-stableness"
# By default, use 7 percent as the threshold for stableness checking
STABLE_THRESHOLD = 0.07
# By default, run 15 iterations
DEFAULT_ITERATIONS = 15
def generate_model_config(model_name: str) -> List[TorchBenchModelConfig]:
devices = ["cpu", "cuda"]
tests = ["train", "eval"]
cfgs = itertools.product(*[devices, tests])
result = [TorchBenchModelConfig(
name=model_name,
device=device,
test=test,
batch_size=None,
jit=False,
extra_args=[],
extra_env=None,
) for device, test in cfgs]
return result
def parse_args(args: List[str]):
parser = argparse.ArgumentParser()
parser.add_argument("-r", "--rounds", default=DEFAULT_ITERATIONS, type=int, help="Number of rounds to run to simulate measuring max delta in workflow.")
parser.add_argument("-m", "--models", default="", help="Specify the models to run, default (empty) runs all models.")
parser.add_argument("-d", "--device", default="cpu", help="Specify the device.")
parser.add_argument("-t", "--test", default="eval", help="Specify the test.")
parser.add_argument("-o", "--output", type=str, help="The default output json file.")
args = parser.parse_args(args)
return args
def _get_median_latencies(raw_metrics):
has_all_latencies = len(list(filter(lambda x: 'latencies' in x, raw_metrics)))
if not has_all_latencies == len(raw_metrics):
return None
median_latencies = list(map(lambda x: np.median(x['latencies']), raw_metrics))
return median_latencies
def reduce_results(full_results):
ub_metrics = {}
latencies_by_cfg = {}
for round_result in full_results:
for result in round_result:
cfg = result['cfg']
cfg_name = f"{cfg['name']}_{cfg['device']}_{cfg['test']}_ootb_latencies"
if not cfg_name in latencies_by_cfg:
latencies_by_cfg[cfg_name] = []
latencies_by_cfg[cfg_name].append(result['raw_metrics'])
for cfg_name in latencies_by_cfg:
raw_metrics = latencies_by_cfg[cfg_name]
latencies = _get_median_latencies(raw_metrics)
if latencies:
ub_metrics[f"{cfg_name}_maxdelta"] = (max(latencies) - min(latencies)) / min(latencies)
else:
ub_metrics[f"{cfg_name}_maxdelta"] = -1.0
return ub_metrics
def reduce_results_by_device(full_results):
def _cfg_to_key(cfg):
key = {}
key["model"] = cfg["name"]
key["test"] = cfg["test"]
return frozenset(key.items())
result_by_device = {}
result_yaml_obj = {}
for round_result in full_results:
for result in round_result:
cfg = result['cfg']
device = cfg['device']
raw_metrics = result['raw_metrics']
result_by_device[device] = {} if not device in result_by_device else result_by_device[device]
key = _cfg_to_key(cfg)
result_by_device[device][key] = [] if not key in result_by_device[device] else result_by_device[device][key]
result_by_device[device][key].append(raw_metrics)
for device in result_by_device:
result_yaml_obj[device] = []
for key in result_by_device[device]:
latencies = _get_median_latencies(result_by_device[device][key])
if not latencies:
continue
max_delta = (max(latencies) - min(latencies)) / min(latencies)
stable_obj = dict(key)
stable_obj["max_delta"] = str(max_delta)
if max_delta < STABLE_THRESHOLD:
stable_obj["stable"] = True
else:
stable_obj["stable"] = False
result_yaml_obj[device].append(stable_obj)
return result_yaml_obj
def generate_filter(args: argparse.Namespace):
allowed_models = args.models
if allowed_models:
allowed_models = allowed_models.split(",") if "," in allowed_models else [allowed_models]
allowed_devices = args.device
allowed_devices = allowed_devices.split(",") if "," in allowed_devices else [allowed_devices]
allowed_tests = args.test
allowed_tests = allowed_tests.split(",") if "," in allowed_tests else [allowed_tests]
def cfg_filter(cfg: TorchBenchModelConfig) -> bool:
if cfg.device in allowed_devices and cfg.test in allowed_tests:
if not allowed_models:
return True
else:
return cfg.name in allowed_models
return False
return cfg_filter
def run(args: List[str]):
args = parse_args(args)
output_dir = get_output_dir(BM_NAME)
models = list_models()
cfgs = list(itertools.chain(*map(generate_model_config, models)))
cfg_filter = generate_filter(args)
# run a model cfg and get latencies
full_results = []
for _round in range(args.rounds):
single_round_result = []
for cfg in filter(cfg_filter, cfgs):
print(f"[Round {_round}/{args.rounds}] Running {cfg}")
try:
task = load_model_isolated(cfg)
# get the model test metrics
metrics: TorchBenchModelMetrics = get_model_test_metrics(task, metrics=["latencies"])
single_round_result.append({
'cfg': cfg.__dict__,
'raw_metrics': metrics.__dict__,
})
except NotImplementedError:
# some models don't implement the test specified
single_round_result.append({
'cfg': cfg.__dict__,
'raw_metrics': "NotImplemented",
})
except ChildTraceException as exception:
single_round_result.append({
'cfg': cfg.__dict__,
'raw_metrics': str(exception),
})
except UnserializableException as exception:
single_round_result.append({
'cfg': cfg.__dict__,
'raw_metrics': exception.args_repr,
})
finally:
# Remove task reference to trigger deletion in gc
task = None
full_results.append(single_round_result)
print(full_results)
ub_metrics = reduce_results(full_results)
# reduce full results to metrics
# log detailed results in the .userbenchmark/model-stableness/logs/ directory
output_json = get_output_json(BM_NAME, ub_metrics)
log_dir = output_dir.joinpath("logs")
log_dir.mkdir(exist_ok=True, parents=True)
fname = "logs-{}.json".format(datetime.fromtimestamp(time.time()).strftime("%Y%m%d%H%M%S"))
full_fname = log_dir.joinpath(fname)
with open(full_fname, 'w') as f:
json.dump(full_results, f, indent=4)
# output userbenchmark metrics in the .userbenchmark/model-stableness directory
print(output_json)
dump_output(BM_NAME, output_json)
# output the stableness result yaml
yaml_dicts = reduce_results_by_device(full_results)
for device in yaml_dicts:
fname = f"summary-{device}.yaml"
full_fname = log_dir.joinpath(fname)
with open(full_fname, "w") as f:
f.write(yaml.safe_dump(yaml_dicts[device]))
|
from pathlib import Path
import json
import re
def get_run(test_dir):
run = {}
testdir_name = test_dir.name
regex = "cuda-(.*)-(.*)"
g = re.match(regex, testdir_name).groups()
run["test"] = g[0]
run["cuda_version"] = g[1]
eager_json = test_dir.joinpath("json", "eager.json")
assert eager_json.exists(), f"Expected json path {str(eager_json)} doesn't exist."
with open(eager_json, "r") as ej:
run["result"] = json.load(ej)
return run
def get_runs(work_dir: Path):
runs = []
for subdir in filter(lambda x: x.is_dir(), work_dir.iterdir()):
run = get_run(subdir)
runs.append(run)
return runs
def add_test_results(runs, result_metrics, base_cuda_version):
assert len(runs) >= 2, f"Expected more than 2 runs per group, getting {len(runs)}."
base_run = list(filter(lambda x: x['cuda_version'] == base_cuda_version, runs))[0]
for run in runs:
if run["cuda_version"] == base_cuda_version:
continue
for test in run["result"]:
test_name = f"{test['name']}-{test['test']}-{run['cuda_version']}-speedup"
if test['status'] == 'OK':
base_test = list(filter(lambda x: x['name'] == test['name'] and x['test'] == test['test'], base_run['result']))[0]
result_metrics[test_name] = base_test['results']['latency_ms'] / test['results']['latency_ms']
else:
# status has error
result_metrics[test_name] = "-1.0"
return result_metrics
def analyze(result_dir):
result_dir = Path(result_dir)
assert result_dir.is_dir(), f"Expected directory {str(result_dir)} doesn't exist."
result_metrics = { }
runs = get_runs(result_dir)
cuda_versions = sorted(map(lambda x: x["cuda_version"], runs))
base_cuda_version = cuda_versions[0]
cuda_train = list(filter(lambda x: x["test"] == "train", runs))
add_test_results(cuda_train, result_metrics, base_cuda_version=base_cuda_version)
cuda_eval = list(filter(lambda x: x["test"] == "eval", runs))
add_test_results(cuda_eval, result_metrics, base_cuda_version=base_cuda_version)
return result_metrics
|
import argparse
import time
import sys
import subprocess
from datetime import datetime
from .result_analyzer import analyze
from typing import List
from ..utils import dump_output, get_output_dir, get_output_json, add_path, REPO_PATH
with add_path(REPO_PATH):
from utils.cuda_utils import DEFAULT_CUDA_VERSION, CUDA_VERSION_MAP
BM_NAME = "cuda-compare"
def install_nightlies(dryrun):
default_cuda_version = CUDA_VERSION_MAP[DEFAULT_CUDA_VERSION]["pytorch_url"]
install_cmd = ["pip", "install", "--pre", "torch", "torchvision", "torchtext", "torchaudio",
"-f", f"https://download.pytorch.org/whl/nightly/{default_cuda_version}/torch_nightly.html"]
print(f"Installing pytorch packages: {install_cmd}")
if not dryrun:
subprocess.check_call(install_cmd, cwd=REPO_PATH)
def install_torchbench(dryrun):
install_cmd = [sys.executable, "install.py"]
print(f"Installing torchbench: {install_cmd}")
if not dryrun:
subprocess.check_call(install_cmd, cwd=REPO_PATH)
def run_benchmark(output_path, config, dryrun=False):
benchmark_script = REPO_PATH.joinpath(".github", "scripts", "run-config.py")
benchmark_cmd = [sys.executable, str(benchmark_script), "-c", config, "-b", str(REPO_PATH), "-o", str(output_path)]
print(f"Running benchmark: {benchmark_cmd}")
if not dryrun:
subprocess.check_call(benchmark_cmd, cwd=REPO_PATH)
def dump_result_to_json(metrics):
result = get_output_json(BM_NAME, metrics)
dump_output(BM_NAME, result)
def get_timestamp():
return datetime.fromtimestamp(time.time()).strftime("%Y%m%d%H%M%S")
def get_work_dir(output_dir):
work_dir = output_dir.joinpath(f"run-{get_timestamp()}")
work_dir.mkdir(exist_ok=True, parents=True)
return work_dir
def parse_args(args):
parser = argparse.ArgumentParser()
parser.add_argument("--dryrun", action='store_true', help="Only generate the test scripts. Do not run the benchmark.")
parser.add_argument("--config", "-c", type=str, default="devinfra/cuda-113-116-compare", help="Specify the config file")
parser.add_argument("--analyze", type=str, help="Only analyze the result of the specified work directory.")
args = parser.parse_args(args)
return args
def run(args: List[str]):
args = parse_args(args)
if args.analyze:
metrics = analyze(args.analyze)
dump_result_to_json(metrics)
return
work_dir = get_work_dir(get_output_dir(BM_NAME))
install_nightlies(args.dryrun)
install_torchbench(args.dryrun)
run_benchmark(work_dir, args.config, dryrun=args.dryrun)
if not args.dryrun:
metrics = analyze(work_dir)
dump_result_to_json(metrics) |
from pathlib import Path
import re
import functools
def is_userbenchmark_runscript(run_script_file):
MAGIC_LINE = "# GENERATED BY userbenchmark/release-test/__init__.py. DO NOT EDIT!"
with open(run_script_file, "r") as rsf:
script = rsf.read()
if MAGIC_LINE in script:
return True
return False
def get_run_keys(work_dir: Path):
run_keys = []
for subdir in filter(lambda x: x.is_dir(), work_dir.iterdir()):
run_script_file = subdir.joinpath("run.sh")
if run_script_file.is_file() and is_userbenchmark_runscript(run_script_file):
run_keys.append(subdir.name)
return run_keys
def get_workloads(run_dir: Path):
return list(map(lambda x: x.name, filter(lambda x: x.is_dir(), run_dir.iterdir())))
def dump_result_csv(work_dir, result):
csv_object = [["Benchmark"]]
DELIMITER = ";"
# generate header
run_keys = sorted(result.keys())
workloads = sorted(result[run_keys[0]])
metrics = sorted(result[run_keys[0]][workloads[0]])
for run_key in run_keys:
csv_object[0].append(f"{run_key}")
# generate data
for run_key in run_keys:
for wl_id, workload in enumerate(workloads):
for mid, metric in enumerate(metrics):
if len(csv_object) <= len(workloads) * len(metrics):
csv_object.append([f"{workload}-{metric}"])
csv_object[wl_id*len(metrics)+mid+1].append(str(result[run_key][workload][metric]))
csv_text = []
for csv_line in csv_object:
csv_text.append(DELIMITER.join(csv_line))
csv_text = "\n".join(csv_text) + "\n"
print(csv_text)
summary_file = work_dir.joinpath("summary.csv")
# write result file to summary
with open(summary_file, "w") as sf:
sf.write(csv_text)
def get_peak_mem(mem_log):
# example log:
# Max GPU Mem. Max RSS Mem. Max PSS Mem.
# 697 1971.07 1438.21
max_gpu_mem = 0.0
max_cpu_mem = 0.0
for line in mem_log:
numbers = re.split('\s+', line.strip())
if len(numbers) == 3:
gpu_mem = float(numbers[0])
cpu_mem = float(numbers[1])
max_gpu_mem = gpu_mem if gpu_mem > max_gpu_mem else max_gpu_mem
max_cpu_mem = cpu_mem if cpu_mem > max_cpu_mem else max_cpu_mem
return max_gpu_mem, max_cpu_mem
def analyze_workload(run_dir: Path, workload_name: str, res):
workload_dir = run_dir.joinpath(workload_name)
assert workload_dir.joinpath("result.log").exists() and workload_dir.joinpath("result_mem.log").exists(), \
f"Error: missing benchmark result file result.log or result_mem.log in {workload_dir}."
LATENCY_REGEX = "Total time elapsed: (.*) seconds."
with open(workload_dir.joinpath("result.log"), "r") as lf:
latency_log = lf.readlines()[-1].strip()
with open(workload_dir.joinpath("result_mem.log"), "r") as mf:
mem_log = mf.readlines()
latency = re.search(LATENCY_REGEX, latency_log).groups()[0]
res[workload_name] = {}
res[workload_name]["latency"] = latency
res[workload_name]["gpu_memory"], res[workload_name]["cpu_memory"] = get_peak_mem(mem_log)
return res
def dump_userbenchmark_result(results):
metrics = {}
for run_key in results:
for workload in results[run_key]:
for metric in results[run_key][workload]:
metric_name = f"{run_key}-{workload}-{metric}"
metrics[metric_name] = results[run_key][workload][metric]
return metrics
def analyze_run_key(work_dir, run_key, r):
run_dir = work_dir.joinpath(run_key)
workloads = get_workloads(run_dir)
workload_results = functools.reduce(lambda r, w: analyze_workload(run_dir, w, r), workloads, {})
r[run_key] = workload_results
return r
def analyze(work_dir: Path):
# get base_args (directory starting with "pytorch-")
work_dir = Path(work_dir)
run_keys = get_run_keys(work_dir)
assert run_keys, f"Expected non-empty run keys, get {run_keys}"
results = functools.reduce(lambda r, k: analyze_run_key(work_dir, k, r), run_keys, {})
# dump result to csv file
dump_result_csv(work_dir, results)
# dump results to userbenchmark object
results = dump_userbenchmark_result(results)
return results |
import argparse
import os
import yaml
import time
import shutil
import itertools
import subprocess
from datetime import datetime
from git import Repo
from pathlib import Path
from typing import List
from ..utils import dump_output, get_output_dir, get_output_json
from .result_analyzer import analyze
# Expected WORK_DIR structure
# WORK_DIR/
# |---examples/
# |---pytorch-<ver1>-cuda<ver1>/
# |---run.sh
# |---mnist/
# |---mnist-hogwild/
# |---<other-benchmarks>
# |---pytorch-<ver2>-cuda<ver2>/
# |---summary.csv
BM_NAME = "release-test"
EXAMPLE_URL = "https://github.com/pytorch/examples.git"
CURRENT_DIR = os.path.dirname(os.path.abspath(__file__))
DEFAULT_CONFIG_PATH = os.path.join(os.path.dirname(os.path.abspath(__file__)), "configs")
RUN_TEMPLATE = """
# GENERATED BY userbenchmark/release-test/__init__.py. DO NOT EDIT!
bash {RELEASE_TEST_ROOT}/setup_env.sh '{CUDA_VERSION}' '{MAGMA_VERSION}' '{PYTORCH_VERSION}' '{PYTORCH_CHANNEL}' '{WORK_DIR}'
bash {RELEASE_TEST_ROOT}/run_release_test.sh '{CUDA_VERSION}' '{RESULT_DIR}'
"""
def get_timestamp():
return datetime.fromtimestamp(time.time()).strftime("%Y%m%d%H%M%S")
def get_work_dir(output_dir):
work_dir = output_dir.joinpath(f"run-{get_timestamp()}")
work_dir.mkdir(exist_ok=True, parents=True)
return work_dir
def generate_test_scripts(config, work_dir):
assert "cuda" in config and isinstance(config["cuda"], list), f"Expected CUDA config list, but not found."
assert "pytorch" in config and isinstance(config["pytorch"], list), f"Exptected pytorch version list, but not found."
bm_matrix = [config["cuda"], config["pytorch"]]
run_scripts = {}
for cuda, pytorch in itertools.product(*bm_matrix):
run_key = f"pytorch-{pytorch['version']}-cuda-{cuda['version']}"
run_script = RUN_TEMPLATE.format(RELEASE_TEST_ROOT=CURRENT_DIR,
CUDA_VERSION=cuda["version"],
MAGMA_VERSION=cuda["magma_version"],
PYTORCH_VERSION=pytorch["version"],
PYTORCH_CHANNEL=pytorch["conda_channel"],
WORK_DIR=work_dir,
RESULT_DIR=work_dir.joinpath(run_key))
run_scripts[run_key] = run_script
return run_scripts
def dump_test_scripts(run_scripts, work_dir):
for run_key, run_script in run_scripts.items():
run_script_loc = work_dir.joinpath(run_key)
run_script_loc.mkdir(exist_ok=True)
with open(run_script_loc.joinpath("run.sh"), "w") as rs:
rs.write(run_script)
def dump_result_to_json(metrics):
result = get_output_json(BM_NAME, metrics)
dump_output(BM_NAME, result)
def run_benchmark(run_scripts, work_dir):
for run_key, _rscript in run_scripts.items():
run_script_path = work_dir.joinpath(run_key, "run.sh")
# run the benchmark
print(f"Running benchmark {run_key} ...")
subprocess.check_call(["bash", str(run_script_path)])
def get_config(config_name: str):
if os.path.exists(os.path.join(DEFAULT_CONFIG_PATH, config_name)):
config_name = os.path.join(DEFAULT_CONFIG_PATH, config_name)
elif os.path.exists(os.path.join(DEFAULT_CONFIG_PATH, f"{config_name}.yaml")):
config_name = os.path.join(DEFAULT_CONFIG_PATH, f"{config_name}.yaml")
else:
raise ValueError(f"Can't find config name {config_name} in config path {DEFAULT_CONFIG_PATH}.")
with open(config_name, "r") as yfile:
config = yaml.safe_load(yfile)
return config
def parse_args(args):
parser = argparse.ArgumentParser()
parser.add_argument("--config", "-c", default="1.12.1", type=str, help="Config for release testing")
parser.add_argument("--dry-run", action='store_true', help="Only generate the test scripts. Do not run the benchmark.")
parser.add_argument("--analyze", type=str, help="Only analyze the result of the specified work directory.")
args = parser.parse_args(args)
return args
def prepare_release_tests(args: argparse.Namespace, work_dir: Path):
config = get_config(args.config)
run_scripts = generate_test_scripts(config, work_dir)
dump_test_scripts(run_scripts, work_dir)
# clone the examples repo
Repo.clone_from(EXAMPLE_URL, work_dir.joinpath("examples"))
return run_scripts
def cleanup_release_tests(work_dir):
examples_path = work_dir.joinpath("examples")
if examples_path.exists():
shutil.rmtree(examples_path)
def run(args: List[str]):
args = parse_args(args)
if args.analyze:
analyze(args.analyze)
return
work_dir = get_work_dir(get_output_dir(BM_NAME))
run_scripts = prepare_release_tests(args=args, work_dir=work_dir)
if not args.dry_run:
run_benchmark(run_scripts, work_dir)
metrics = analyze(work_dir)
dump_result_to_json(metrics)
cleanup_release_tests(work_dir)
|
"""
Run PyTorch cpu benchmarking.
"""
import json
import os
import re
import sys
import time
from datetime import datetime
from pathlib import Path
REPO_PATH = Path(__file__).absolute().parent.parent.parent
USERBENCHMARK_OUTPUT_PREFIX = ".userbenchmark"
class add_path():
def __init__(self, path):
self.path = path
def __enter__(self):
sys.path.insert(0, self.path)
def __exit__(self, exc_type, exc_value, traceback):
try:
sys.path.remove(self.path)
except ValueError:
pass
def get_output_dir(bm_name, test_date=None):
current_dir = Path(__file__).parent.absolute()
bm_out_dir = current_dir.parent.parent.joinpath(USERBENCHMARK_OUTPUT_PREFIX, bm_name)
test_date = test_date if test_date else datetime.fromtimestamp(time.time()).strftime("%Y%m%d%H%M%S")
output_dir = bm_out_dir.joinpath("cpu-" + test_date)
output_dir.mkdir(exist_ok=True, parents=True)
return output_dir
def get_output_json(bm_name, metrics):
import torch
return {
"name": bm_name,
"environ": {"pytorch_git_version": torch.version.git_version},
"metrics": metrics,
}
def dump_output(bm_name, output, output_dir=None, fname=None):
output_dir = output_dir if output_dir else get_output_dir(bm_name)
fname = fname if fname else "metrics-{}.json".format(os.getpid())
full_fname = os.path.join(output_dir, fname)
with open(full_fname, "w") as f:
json.dump(output, f, indent=4)
def get_run(test_dir: Path):
run = {}
testdir_name = test_dir.name
regex = "(.*)_(.*)_(.*)"
g = re.match(regex, testdir_name).groups()
run["test"] = g[0]
run["model"] = g[1]
run["mode"] = g[2]
run["results"] = []
ins_jsons = filter(lambda x: x.is_file(), test_dir.iterdir())
for ins_json in ins_jsons:
with open(ins_json, "r") as ij:
run["results"].append(json.load(ij))
return run
def get_runs(work_dir: Path):
runs = []
for subdir in filter(lambda x: x.is_dir(), work_dir.iterdir()):
run = get_run(subdir)
runs.append(run)
return runs
def add_test_results(runs, result_metrics):
# metrics name examples:
# timm_regnet-eval-eager_latency
# timm_regnet-eval-eager_cmem
for run in runs:
run_base_name = f"{run['model']}-{run['test']}-{run['mode']}"
ins_number = len(run["results"])
assert ins_number
latency_metric = "latency" in run["results"][0]["metrics"]
cmem_metric = "cpu_peak_mem" in run["results"][0]["metrics"]
latency_sum = 0
cmem_sum = 0
for ins_res in run["results"]:
if latency_metric:
latency_sum += ins_res["metrics"]["latency"]
if cmem_metric:
cmem_sum += ins_res["metrics"]["cpu_peak_mem"]
if latency_metric:
result_metrics[f"{run_base_name}_latency"] = latency_sum / ins_number
if cmem_metric:
result_metrics[f"{run_base_name}_cmem"] = cmem_sum / ins_number
return result_metrics
def analyze(result_dir):
result_dir = Path(result_dir)
assert result_dir.is_dir(), f"Expected directory {str(result_dir)} doesn't exist."
result_metrics = {}
runs = get_runs(result_dir)
cpu_train = list(filter(lambda x: x["test"] == "train", runs))
if len(cpu_train):
add_test_results(cpu_train, result_metrics)
cpu_eval = list(filter(lambda x: x["test"] == "eval", runs))
if len(cpu_eval):
add_test_results(cpu_eval, result_metrics)
return result_metrics
|
"""
Run PyTorch cpu benchmarking.
"""
import argparse
import itertools
import os
import subprocess
import sys
import time
import yaml
from datetime import datetime
from pathlib import Path
from typing import List
from .cpu_utils import REPO_PATH, get_output_dir, get_output_json, dump_output, analyze
from ..utils import add_path
with add_path(REPO_PATH):
from torchbenchmark.util.experiment.instantiator import (list_models, TorchBenchModelConfig,
list_devices, list_tests)
BM_NAME = "cpu"
CURRENT_DIR = os.path.dirname(os.path.realpath(__file__))
def generate_model_configs(devices: List[str], tests: List[str], model_names: List[str], batch_size: int, jit: bool, extra_args: List[str]) -> List[TorchBenchModelConfig]:
"""Use the default batch size and default mode."""
if not model_names:
model_names = list_models()
cfgs = itertools.product(*[devices, tests, model_names])
result = [TorchBenchModelConfig(
name=model_name,
device=device,
test=test,
batch_size=batch_size,
jit=jit,
extra_args=extra_args,
extra_env=None,
) for device, test, model_name in cfgs]
return result
def dump_result_to_json(metrics, output_dir, fname):
result = get_output_json(BM_NAME, metrics)
dump_output(BM_NAME, result, output_dir, fname)
def validate(candidates: List[str], choices: List[str]) -> List[str]:
"""Validate the candidates provided by the user is valid"""
for candidate in candidates:
assert candidate in choices, f"Specified {candidate}, but not in available list: {choices}."
return candidates
def generate_model_configs_from_yaml(yaml_file: str) -> List[TorchBenchModelConfig]:
yaml_file_path = os.path.join(CURRENT_DIR, yaml_file)
with open(yaml_file_path, "r") as yf:
config_obj = yaml.safe_load(yf)
models = config_obj["model"] if "model" in config_obj else None
models = validate(parse_str_to_list(models), list_models()) if models else list_models()
extra_args = config_obj["extra_args"].split(' ') if config_obj["extra_args"] else []
configs = []
for model in models:
config = TorchBenchModelConfig(
name=model,
device="cpu",
test=config_obj["test"],
batch_size=config_obj["batch_size"] if "batch_size" in config_obj else None,
jit=config_obj["jit"] if "jit" in config_obj else False,
extra_args=extra_args,
extra_env=None,
)
configs.append(config)
return configs
def parse_str_to_list(candidates):
if isinstance(candidates, list):
return candidates
candidates = list(map(lambda x: x.strip(), candidates.split(",")))
return candidates
def parse_args(args):
parser = argparse.ArgumentParser()
parser.add_argument("--device", "-d", default="cpu", help="Devices to run, splited by comma.")
parser.add_argument("--test", "-t", default="eval", help="Tests to run, splited by comma.")
parser.add_argument("--model", "-m", default=None, help="Only run the specifice models, splited by comma.")
parser.add_argument("--batch-size", "-b", default=None, help="Run the specifice batch size.")
parser.add_argument("--jit", action="store_true", help="Convert the models to jit mode.")
parser.add_argument("--config", "-c", default=None, help="YAML config to specify tests to run.")
parser.add_argument("--output", "-o", default=None, help="Output dir.")
parser.add_argument("--launcher", action="store_true", help="Use torch.backends.xeon.run_cpu to get the peak performance on Intel(R) Xeon(R) Scalable Processors.")
parser.add_argument("--launcher-args", default=None, help="Provide the args of torch.backends.xeon.run_cpu. See `python -m torch.backends.xeon.run_cpu --help`")
parser.add_argument("--dryrun", action="store_true", help="Dryrun the command.")
return parser.parse_known_args(args)
def run(args: List[str]):
args, extra_args = parse_args(args)
test_date = datetime.fromtimestamp(time.time()).strftime("%Y%m%d%H%M%S")
if args.config:
configs = generate_model_configs_from_yaml(args.config)
else:
# If not specified, use the entire model set
if not args.model:
args.model = list_models()
devices = validate(parse_str_to_list(args.device), list_devices())
tests = validate(parse_str_to_list(args.test), list_tests())
models = validate(parse_str_to_list(args.model), list_models())
configs = generate_model_configs(devices, tests, model_names=models, batch_size=args.batch_size, jit=args.jit, extra_args=extra_args)
args.output = args.output if args.output else get_output_dir(BM_NAME, test_date)
try:
for config in configs:
run_benchmark(config, args)
except KeyboardInterrupt:
print("User keyboard interrupted!")
result_metrics = analyze(args.output)
dump_result_to_json(result_metrics, Path(args.output).parent, f"metrics-{test_date}.json")
def run_benchmark(config, args):
benchmark_script = REPO_PATH.joinpath("userbenchmark", "cpu", "run_config.py")
cmd = [sys.executable]
if args.launcher:
cmd.extend(["-m", "torch.backends.xeon.run_cpu"])
if args.launcher_args:
cmd.extend(args.launcher_args.split(" "))
cmd.append(str(benchmark_script))
if config.name:
cmd.append("-m")
cmd.append(config.name)
if config.device:
cmd.append("--device")
cmd.append(config.device)
if config.batch_size:
cmd.append("-b")
cmd.append(str(config.batch_size))
if config.test:
cmd.append("-t")
cmd.append(config.test)
if config.jit:
cmd.append("--jit")
cmd.extend(config.extra_args)
cmd.append("-o")
cmd.append(args.output)
print(f"Running benchmark: {cmd}")
if not args.dryrun:
subprocess.check_call(cmd, cwd=REPO_PATH)
|
"""
Run PyTorch cpu benchmarking.
"""
import argparse
import os
import numpy
from typing import List, Dict, Optional
from pathlib import Path
from cpu_utils import add_path, REPO_PATH, get_output_dir, get_output_json, dump_output
with add_path(str(REPO_PATH)):
from torchbenchmark.util.experiment.instantiator import (list_models, load_model_isolated, TorchBenchModelConfig,
list_devices, list_tests)
from torchbenchmark.util.experiment.metrics import TorchBenchModelMetrics, get_model_test_metrics
BM_NAME = 'cpu'
CURRENT_DIR = os.path.dirname(os.path.realpath(__file__))
def get_metrics(_config: TorchBenchModelConfig) -> List[str]:
return ["latencies", "cpu_peak_mem"]
def get_output_subdir(config: TorchBenchModelConfig) -> str:
mode = "jit" if config.jit else "eager"
subdir = f"{config.test}_{config.name}_{mode}"
return subdir
def result_to_output_metrics(metrics: TorchBenchModelMetrics) -> Dict[str, float]:
result_metrics = {}
if metrics.latencies:
latency_metric = "latency"
median_latency = numpy.median(metrics.latencies)
assert median_latency, f"Run failed for metric {latency_metric}"
result_metrics[latency_metric] = median_latency
if metrics.cpu_peak_mem:
cpu_peak_mem = "cpu_peak_mem"
result_metrics[cpu_peak_mem] = metrics.cpu_peak_mem
return result_metrics
def dump_result_to_json(metrics, output_dir):
result = get_output_json(BM_NAME, metrics)
dump_output(BM_NAME, result, output_dir)
def validate(candidate: str, choices: List[str]) -> str:
"""Validate the candidates provided by the user is valid"""
assert candidate in choices, f"Specified {candidate}, but not in available list: {choices}."
return candidate
def run_config(config: TorchBenchModelConfig, dryrun: bool=False) -> Optional[TorchBenchModelMetrics]:
"""This function only handles NotImplementedError, all other errors will fail."""
metrics = get_metrics(config)
print(f"Running {config} ...", end='')
if dryrun:
return None
# We do not allow RuntimeError in this test
try:
# load the model instance within separate subprocess
model = load_model_isolated(config)
# get the model test metrics
result: TorchBenchModelMetrics = get_model_test_metrics(model, metrics=metrics)
except NotImplementedError as e:
print(" [NotImplemented]")
return None
print(" [Done]")
return result
def run(args: List[str], extra_args: List[str]):
device = validate(args.device, list_devices())
test = validate(args.test, list_tests())
model = validate(args.model, list_models())
config = TorchBenchModelConfig(
name=model,
device=device,
test=test,
batch_size=args.batch_size,
jit=args.jit,
extra_args=extra_args,
extra_env=None)
try:
metrics = run_config(config, dryrun=args.dryrun)
except KeyboardInterrupt:
print("User keyboard interrupted!")
if not args.dryrun:
args.output = args.output if args.output else get_output_dir(BM_NAME)
target_dir = Path(args.output).joinpath(get_output_subdir(config))
target_dir.mkdir(exist_ok=True, parents=True)
metrics_dict = result_to_output_metrics(metrics)
dump_result_to_json(metrics_dict, target_dir)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--device", "-d", default="cpu", help="Devices to run.")
parser.add_argument("--test", "-t", default="eval", help="Tests to run.")
parser.add_argument("--model", "-m", default=None, type=str, help="Only run the specifice model.")
parser.add_argument("--batch-size", "-b", default=None, type=int, help="Run the specifice batch size.")
parser.add_argument("--jit", action="store_true", help="Convert the models to jit mode.")
parser.add_argument("--output", "-o", default=None, help="Output dir.")
parser.add_argument("--dryrun", action="store_true", help="Dryrun the command.")
args, extra_args = parser.parse_known_args()
run(args, extra_args)
|
from typing import List
import submitit
import torch
from torchbenchmark.util.distributed.submit import parse_args, get_init_file, TrainerWrapper
from ..utils import dump_output
BM_NAME = "distributed"
def gen_metrics_from_result(result):
assert isinstance(result, List), "The result of submitit should be a list."
metrics = {}
for result_id, r in enumerate(result):
for metric_name in r:
metrics[f"{result_id}-{metric_name}"] = r[metric_name]
return metrics
def run(args: List[str]):
args, model_args = parse_args(args)
# Note that the folder will depend on the job_id, to easily track experiments
executor = submitit.AutoExecutor(folder=args.job_dir, cluster=args.cluster, slurm_max_num_timeout=3000)
executor.update_parameters(
gpus_per_node=args.ngpus,
# one task per GPU
tasks_per_node=args.ngpus,
cpus_per_task=10,
nodes=args.nodes,
timeout_min=args.timeout,
# Below are cluster dependent parameters
slurm_partition=args.partition,
slurm_signal_delay_s=120,
slurm_exclude=args.exclude,
)
executor.update_parameters(name="distbench", slurm_array_parallelism=1, timeout_min=1000)
args.dist_url = get_init_file(args).as_uri()
args.output_dir = args.job_dir
args.extra_args = []
if model_args:
args.extra_args = model_args
job = executor.submit(TrainerWrapper(args, model_args))
# waits for completion and returns output
result = job.results()
# dump the output file
output = {
"name": BM_NAME,
"environ": {"pytorch_git_version": torch.version.git_version},
"args": vars(args),
"metrics": gen_metrics_from_result(result),
}
dump_output(BM_NAME, output)
|
import csv
import json
import copy
import argparse
from typing import OrderedDict
from dataclasses import dataclass
import os
import pickle
from collections import defaultdict
import tabulate
import sys
def parse_partial(args):
"""
Schema:
model_data["model"]["backend"][#nodes] = result
where "result" can be a list of results, or "error"
"""
model_data = defaultdict(lambda: defaultdict(lambda: defaultdict(lambda: defaultdict(list))))
rank_id = 0
log_path = os.path.join(args.results_dir, f"{args.job_id}_{rank_id}_log.out")
with open(log_path, "r") as f:
content = f.read()
pieces = content.split("<RESULT>")
pieces = [x.split("</RESULT>") for x in pieces]
pieces = [x[0] for x in pieces if len(x) == 2]
pieces = [json.loads(x) for x in pieces]
for row in pieces:
model = row["model_name"]
backend = row["backend"]
nodes = row["nodes"]
has_breaks = str(row["has_breaks"] if "has_breaks" in row else "False")
if isinstance(row["result"], dict):
latency = float(row["result"]["latency_median"])
if isinstance(model_data[model][backend][nodes][has_breaks], list):
model_data[model][backend][nodes][has_breaks].append(latency)
else:
model_data[model][backend][nodes][has_breaks] = "error"
return model_data
def model_name(model):
if "torchbenchmark.models." in model:
model = model[len("torchbenchmark.models."):]
if ".Model" in model:
model = model[:model.find(".Model")]
return model
def median(x):
if len(x) == 0:
return 0
x = copy.copy(x)
x = sorted(x)
idx = int(len(x)/2)
if len(x) % 2 == 0:
return (x[idx - 1] + x[idx]) / 2
else:
return x[idx]
def print_model_table(args, model, model_data):
node_counts = OrderedDict()
for backend in model_data:
for node in model_data[backend]:
node_counts[node] = node # hack orderedset
node_counts = list(node_counts)
node_counts = sorted(node_counts)
rows = []
for has_breaks in [False, True]:
for backend in model_data:
row = [f"{backend} {'w/' if has_breaks else 'wo/'}breaks", ]
for node in node_counts:
if node in model_data[backend]:
res = model_data[backend][node][str(has_breaks)]
if isinstance(res, list):
if len(res) > 0:
res = f"{median(res):.3f}"
else:
res = 0.0
row.append(res)
else:
row.append("-")
rows.append(row)
hdr = ("backend", ) + tuple(f"{node}_latency" for node in node_counts)
print(f"{model_name(model)}:")
print(tabulate.tabulate(rows, headers=hdr))
print()
def print_csv(args, data):
csv_data = []
node_counts = OrderedDict()
for model in data:
for backend in data[model]:
for node in data[model][backend]:
node_counts[node] = node # hack orderedset
node_counts = list(node_counts)
node_counts = sorted(node_counts)
labels = ["model", "has_ddp_breaks", "backend"]
for node in node_counts:
labels.append(f"{node}-node median")
# labels.append(f"{node}-node min")
# labels.append(f"{node}-node max")
for has_breaks in [False, True]:
for model in data:
for backend in data[model]:
row = {
"model": model,
"has_ddp_breaks": str(has_breaks),
"backend": backend,
}
for node in node_counts:
if node in data[model][backend]:
latency = data[model][backend][node][str(has_breaks)]
else:
latency = 0.
if isinstance(latency, list) and len(latency) == 0:
latency = 0.
node_label_median = f"{node}-node median"
node_label_min = f"{node}-node min"
node_label_max = f"{node}-node max"
latency_list = latency if isinstance(latency, list) else [latency]
row[node_label_median] = median(latency_list)
# row[node_label_min] = min(latency_list)
# row[node_label_max] = max(latency_list)
csv_data.append(row)
csv_writer = csv.DictWriter(sys.stdout, fieldnames=labels)
csv_writer.writeheader()
for row in csv_data:
csv_writer.writerow(row)
def print_results(args, data):
for model in data:
print_model_table(args, model, data[model])
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--job_id", required=True)
parser.add_argument("--results_dir", required=True)
parser.add_argument("--csv_out", action="store_true")
args = parser.parse_args()
data = parse_partial(args)
if args.csv_out:
print_csv(args, data)
else:
print_results(args, data)
if __name__ == "__main__":
main()
|
import argparse
import importlib
import os
import copy
import csv
import dataclasses
import functools
import io
import json
import multiprocessing
import queue
import submitit
import time
from datetime import datetime, timedelta
import sys
import torch
import uuid
import warnings
from pathlib import Path
from torch.distributed.fsdp.wrap import transformer_auto_wrap_policy
from typing import Any, Dict, List, Optional, Tuple
MODEL_PATH_TEMPLATE = "torchbenchmark.models.{}.Model"
def output_csv(filename, headers, row):
assert filename
existed = os.path.exists(filename)
output = csv.writer(
io.TextIOWrapper(
open(filename, "ab", buffering=0),
"utf-8",
write_through=True,
),
lineterminator="\n",
)
if not existed:
output.writerow(headers)
output.writerow([(f"{x:.4f}" if isinstance(x, float) else x) for x in row])
def parse_args(args: List[str]=None):
parser = argparse.ArgumentParser(description='Submitit for PyTorch Distributed Benchmark', add_help=False)
parser.add_argument(
"--ngpus",
default=8,
type=int,
help="Number of gpus to request on each node"
)
parser.add_argument(
"--nodes",
default=None,
type=int,
action="extend",
nargs="+",
help="Number of nodes to request. Provide a list of nodes to test, e.g. `--nodes 8 4 2 1 --next_arg..."
)
parser.add_argument(
"--filter_models",
default=None,
type=str,
action="extend",
nargs="+",
help="List of models to test, e.g. --filter hf_T5 hf_T5_large resnet50"
)
parser.add_argument(
"--timeout",
default=120,
type=int,
help="Duration of the job"
)
parser.add_argument(
"--profiler",
default=False,
type=bool,
help="Measure with PyTorch Profiler. Disabled by default, as it crashes on AWS"
)
parser.add_argument(
"--partition",
default="train",
type=str,
help="The Slurm partition to submit to"
)
parser.add_argument(
"--cluster",
default=None,
type=str,
help="Which slurm cluster to target. Use 'local' to run jobs locally, 'debug' to run jobs in process",
)
parser.add_argument(
"--distributed",
default="ddp_no_static_graph",
type=str,
help="the distributed runner to use"
)
parser.add_argument(
"--job_dir",
default=os.getcwd(),
type=str,
help="A shared folder across all worker processes"
)
parser.add_argument(
"--trainer",
type=str,
default="torchbenchmark.util.distributed.core_model.trainer.Trainer",
help="training paradigm, by default using DDP"
)
parser.add_argument(
"--index_file",
type=str,
default=f"ddp_experiments_{datetime.now().strftime('%Y%m%d-%H%M%S')}.csv",
help="training paradigm, by default using DDP"
)
parser.add_argument(
"--exclude",
type=str,
default="",
help="comma-separated list of nodes to exclude from the slurm allocation",
)
parser.add_argument(
"--repeat",
type=int,
default=1,
help="number of times to repeat the experiments",
)
parser.add_argument(
"--check_correctness_distributed",
action='store_true',
help="Do distributed correctness checks. Don't expect to use the same results for performance tests."
)
parser.add_argument(
"--precision",
type=str,
default=None,
help="Precision (e.g. amp, fp32, fp16)",
)
parser.add_argument(
"--nccl-socket-ifname",
type=str,
default="ens",
help="Value to use for NCCL_SOCKET_IFNAME environment variable",
)
try:
if args:
return parser.parse_args(args)
else:
return parser.parse_args()
except:
parser.print_help()
sys.exit(0)
def get_init_file(args):
# Init file must not exist, but it's parent dir must exist.
os.makedirs(args.job_dir, exist_ok=True)
init_file = Path(args.job_dir) / f"{uuid.uuid4().hex}_init"
print(init_file)
if init_file.exists():
os.remove(str(init_file))
return init_file
# This implements a barrier function, where all processes wait until they all
# reach the barrier() call.
# rank: there should be one
class FileBarrier:
def __init__(self, rank, world_size, sync_file, timeout: Optional[timedelta] = None):
self.rank = rank
self.world_size = world_size
self.sync_file = sync_file
self.store = torch.distributed.FileStore(sync_file, world_size)
if timeout is None:
timeout = timedelta(minutes=30)
self.store.set_timeout(timeout)
self.call_idx = 0
self.barrier()
def barrier(self):
self.call_idx += 1
my_key = f"barrier{self.call_idx}.{self.rank}"
self.store.add(my_key, 1)
wait_for = []
for i in range(self.world_size):
key = f"barrier{self.call_idx}.{i}"
wait_for.append(key)
self.store.wait(wait_for)
@dataclasses.dataclass
class ExperimentParams:
config: Dict
args: Any # arguments to the distributed trainer
model_args: Any # arguments to the model
is_reference: bool # should this experiment be treated as a reference for correctness?
# used for labeling filenames for correctness checks
def serialize_config(config: Dict):
keys = ["nodes", "model_name", "backend", "has_breaks"]
return "-".join([f"{k}_{config[k]}" for k in keys if k in config])
@dataclasses.dataclass
class JobConfig:
outer_sync_path: str
class TrainerWrapper(object):
# per_experiment_args is a list of expriments.
# Each experiment should be a tuple of (config dict, args, model_args).
# config: configuration data to attach to the result dict.
# args & model_args: arguments for core_model.Trainer.
def __init__(self, job_config: JobConfig, per_experiment_args: List[ExperimentParams]):
self.job_config = job_config
self.per_experiment_args = per_experiment_args
self.timeout = timedelta(45)
# this is called within a multiprocessing.Process.
def run_once(self, args, model_args, q):
print("run_once")
self._setup_gpu_args(args)
pos = args.model.rfind(".")
module = importlib.import_module(args.model[:pos])
model_class = getattr(module, args.model[(pos+1):])
pos = args.trainer.rfind(".")
module = importlib.import_module(args.trainer[:pos])
trainer_class = getattr(module, args.trainer[(pos+1):])
trainer = trainer_class(args, model_class, model_args=model_args)
result = trainer.measure()
print(f"result {result}")
q.put(result)
trainer.teardown()
def __call__(self):
results = []
job_env = submitit.JobEnvironment()
barrier = self._get_barrier()
print(f"This is node {job_env.node}")
# maps all configs that are expected to have the same output/gradients to the same value.
# i.e. we should expect that for a given model_name & number of nodes, we should get the same
# outputs and gradients, regardless of the backend/has_breaks/etc.
def reference_key(config):
return f"{config['model_name']}-{config['nodes']}"
latest_reference_file = {}
output_dir = self.per_experiment_args[0].args.output_dir
base_ref_name = Path(output_dir) / uuid.uuid4().hex
for experiment_args in self.per_experiment_args:
config = experiment_args.config
args = experiment_args.args
model_args = experiment_args.model_args
is_reference = experiment_args.is_reference
try:
key = reference_key(config)
if args.check_correctness_distributed:
# if this is a reference, dump the gradients into a file for later use.
# if this is not a reference, read the dumped gradients and compare.
if is_reference:
args.check_correctness_distributed = "reference"
args.reference_data_path = f"{base_ref_name}-{serialize_config(config)}"
latest_reference_file[key] = args.reference_data_path
else:
args.check_correctness_distributed = "test"
args.reference_data_path = latest_reference_file[key] if key in latest_reference_file else None
else:
args.check_correctness_distributed = None
if job_env.node >= args.nodes:
continue
result_dict = {**config}
q = multiprocessing.Queue()
proc = multiprocessing.Process(target=self.run_once, args=(args, model_args, q))
proc.start()
# wait for 3 minutes less than timeout, to give some buffer time so that
# the barrier doesn't time out.
# 3 minutes chosen based on 3x the 60s timeout for killing & joining jobs
# that are timing out.
timeout_seconds = (self.timeout - timedelta(minutes=3)).total_seconds()
# Wait in a loop because:
# - the queue has a limited buffer size, so we need to call q.get() before proc.join()
# in case the queue blocks when the worker process tries to put into the queue
# - if the worker process errors out, nothing will get put into the queue when it
# exits early and then we end up waiting until the timeout finishes
# So we wait in a loop and wait until either finishes
got_result = False
got_exit = False
exit_code = None
result = None
start_time = time.time()
while time.time() < start_time + timeout_seconds and not got_exit:
proc.join(timeout=1)
if proc.exitcode is not None:
got_exit = True
exit_code = proc.exitcode
if not got_result:
try:
result = q.get(timeout=1)
got_result = True
except queue.Empty:
pass
if not got_exit:
proc.kill()
proc.join(timeout=60)
proc.close()
if isinstance(result, dict) and 'latency_median' in result:
result_dict['result'] = result
else:
result_dict['result'] = None
print(f"exit code: {exit_code} and result: {result_dict}")
assert 'result' in result_dict
# wrap in <RESULT></RESULT> so we can parse partial results in the stdout logs
print(f"<RESULT>{json.dumps(result_dict)}</RESULT>")
results.append(result_dict)
finally:
barrier.barrier()
return results
def checkpoint(self):
self.args.dist_url = get_init_file(self.args).as_uri()
checkpoint_file = os.path.join(self.args.output_dir, "checkpoint.pth")
if os.path.exists(checkpoint_file):
self.args.resume = checkpoint_file
print("Requeuing ", self.args, self.model_args)
empty_trainer = type(self)(self.args, self.model_args)
return submitit.helpers.DelayedSubmission(empty_trainer)
def _get_barrier(self):
job_env = submitit.JobEnvironment()
rank = job_env.global_rank
world_size = job_env.num_tasks
return FileBarrier(
rank=rank,
world_size=world_size,
sync_file=self.job_config.outer_sync_path,
timeout=self.timeout
)
def _global_rank(self):
job_env = submitit.JobEnvironment()
return job_env.global_rank
def _setup_gpu_args(self, args):
job_env = submitit.JobEnvironment()
args.output_dir = Path(str(args.output_dir).replace("%j", str(job_env.job_id)))
args.gpu = job_env.local_rank
args.rank = job_env.global_rank
args.world_size = args.ngpus * args.nodes
print(f"Process group: {args.world_size} tasks, rank: {args.rank}")
os.environ["LOCAL_RANK"] = str(job_env.local_rank)
os.environ["RANK"] = str(job_env.global_rank)
os.environ["WORLD_SIZE"] = str(args.world_size)
os.environ["GPUS_PER_NODE"] = str(job_env.num_tasks//job_env.num_nodes)
# os.environ["NCCL_IB_DISABLE"] = str(1)
os.environ["NCCL_DEBUG"] = 'INFO'
os.environ["NCCL_DEBUG_SUBSYS"] = 'INIT,ENV,NET'
os.environ['NCCL_SOCKET_IFNAME'] = args.nccl_socket_ifname
# os.environ["NCCL_ALGO"] = 'ring'
os.environ["FI_PROVIDER"] = 'efa'
os.environ["FI_EFA_USE_DEVICE_RDMA"]= str(1)
os.environ["NET_TYPE"] = 'efa'
os.environ["ADAM_CAPTURABLE"] = str(1)
def parse_precision(args, copied_model_args):
if args.precision is not None:
copied_model_args.extend(["--precision", args.precision])
def get_node_list(args):
node_list = args.nodes
if node_list is None:
# run the 8-node version first so that all the caches get warmed up at the same time.
node_list = [8, 4, 2, 1]
return node_list
# takes `models` as a list of models in shortened form (i.e. not containing MODEL_PATH_TEMPLATE).
def filter_models(args, models: List[str]):
if args.filter_models is None:
return models
final_models = []
for m in args.filter_models:
if m in models:
final_models.append(m)
else:
warnings.warn(f"Model {m} was specified but is unsupported.")
return final_models
def benchmark_ddp(args, executor):
available_models = [
'hf_Bert',
'hf_GPT2_large',
'hf_T5_large',
'timm_vision_transformer_large',
'hf_T5',
'resnet50',
]
models = [MODEL_PATH_TEMPLATE.format(m) for m in filter_models(args, available_models)]
model_batch_size = {
'hf_Bert': 32,
'hf_GPT2_large': 4,
'hf_T5_large': 4,
'timm_vision_transformer_large': 16,
'hf_T5': 12,
'resnet50': 128,
}
model_batch_size = {MODEL_PATH_TEMPLATE.format(k): v for k, v in model_batch_size.items()}
# put eager first to ensure it can be used for reference values.
# try --torchdynamo eager or --torchdynamo aot_eager for debugging
model_args_configs = [
[], # no args = pure eager baseline
["--torchdynamo", "inductor"],
]
node_list = get_node_list(args)
def get_backend_name(model_args):
if "--torchdynamo" in model_args:
return "torchdynamo_" + model_args[model_args.index("--torchdynamo") + 1]
return "eager"
experiments = []
for i in range(args.repeat):
for nodes in node_list:
for model_name in models:
for model_args in model_args_configs:
for has_breaks in [True, False]:
backend_name = get_backend_name(model_args)
if backend_name == "eager" and has_breaks:
continue
is_reference = (backend_name == "eager")
# copy the model args so we can add more arguments without modifying
# the original model_args list.
copied_model_args = copy.copy(model_args)
breakname = "withbreaks" if has_breaks else "nobreaks"
if has_breaks:
copied_model_args.append("--optimize_dynamo_ddp")
if "inductor" in backend_name:
copied_model_args.extend(["--torchinductor_cudagraph", "False"])
if backend_name != "eager":
copied_model_args.extend(["--dynamo_disable_optimizer_step", "True"])
parse_precision(args, copied_model_args)
# skip non-distributed correctness checks to avoid extra iterations which can
# interfere with distributed correctness checks.
copied_model_args.append("--skip_correctness")
if args.check_correctness_distributed and "inductor" in backend_name:
copied_model_args.extend(["--torchinductor_fallback_random", "True"])
batch_size = model_batch_size[model_name]
args_copy = copy.deepcopy(args)
args_copy.model = model_name
args_copy.batch_size = batch_size
args_copy.nodes = nodes
args_copy.dist_url = get_init_file(args).as_uri()
args_copy.output_dir = args.job_dir
config = {
"nodes": nodes,
"model_name": model_name,
"backend": backend_name,
"has_breaks": has_breaks,
}
experiments.append(ExperimentParams(config, args_copy, copied_model_args, is_reference))
allocation_nodes = max(node_list)
executor.update_parameters(
nodes=allocation_nodes,
)
job_config = JobConfig(
outer_sync_path=str(get_init_file(args))
)
job = executor.submit(TrainerWrapper(job_config, experiments))
# print ID of the Slurm job
print(f"{allocation_nodes} nodes: {job.job_id}")
output_csv(
args.index_file,
("job_id",),
(job.job_id,),
)
# waits for completion and returns output
print(job.results())
def apply_fsdp(model, trainer, auto_wrap_policy):
from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
assert trainer == "fsdp"
fsdp_model = FSDP(
model,
auto_wrap_policy=auto_wrap_policy,
device_id=torch.cuda.current_device(),
use_orig_params=True,
)
return fsdp_model
def apply_fsdp_hf_T5_large(model, trainer):
from transformers.models.t5.modeling_t5 import T5Block
return apply_fsdp(
model,
trainer,
functools.partial(transformer_auto_wrap_policy, transformer_layer_cls=(T5Block,)),
)
def apply_fsdp_hf_GPT2_large(model, trainer):
from transformers.models.gpt2.modeling_gpt2 import GPT2Block
return apply_fsdp(
model,
trainer,
functools.partial(transformer_auto_wrap_policy, transformer_layer_cls=(GPT2Block,)),
)
def apply_fsdp_hf_Bert_large(model, trainer):
from transformers.models.bert.modeling_bert import BertLayer
return apply_fsdp(
model,
trainer,
functools.partial(transformer_auto_wrap_policy, transformer_layer_cls=(BertLayer,)),
)
def apply_fsdp_timm_VIT_large(model, trainer):
from timm.models.vision_transformer import Block
return apply_fsdp(
model,
trainer,
functools.partial(transformer_auto_wrap_policy, transformer_layer_cls=(Block,)),
)
def benchmark_fsdp(args, executor):
def get_backend_name(model_args):
if "--torchdynamo" in model_args:
return "torchdynamo_" + model_args[model_args.index("--torchdynamo") + 1]
return "eager"
def generic_setup(nodes, model_args):
backend_name = get_backend_name(model_args)
copied_model_args = copy.copy(model_args)
if "inductor" in backend_name:
copied_model_args.extend(["--torchinductor_cudagraph", "False"])
if backend_name != "eager":
copied_model_args.extend(["--dynamo_disable_optimizer_step", "True"])
copied_model_args.append("--skip_correctness")
if args.check_correctness_distributed and "inductor" in backend_name:
copied_model_args.extend(["--torchinductor_fallback_random", "True"])
args_copy = copy.deepcopy(args)
args_copy.nodes = nodes
args_copy.dist_url = get_init_file(args).as_uri()
args_copy.output_dir = args.job_dir
return args_copy, copied_model_args
def fsdp_is_reference(backend_name):
return backend_name == "eager"
def get_model_config(
nodes,
model_args,
model_name,
wrap_fn,
batch_size_per_nodes,
):
model_path = MODEL_PATH_TEMPLATE.format(model_name)
args_copy, copied_model_args = generic_setup(nodes, model_args)
copied_model_args.extend(["--distributed_wrap_fn", wrap_fn])
parse_precision(args, copied_model_args)
assert nodes in batch_size_per_nodes
args_copy.batch_size = batch_size_per_nodes[nodes]
args_copy.model = model_path
backend_name = get_backend_name(model_args)
config = {
"nodes": nodes,
"model_name": model_name,
"backend": backend_name,
}
return ExperimentParams(config, args_copy, copied_model_args, is_reference=fsdp_is_reference(backend_name))
is_amp = args.precision == "amp"
model_configs = {
"timm_vision_transformer_large": functools.partial(
get_model_config,
model_name="timm_vision_transformer_large",
wrap_fn="userbenchmark.ddp_experiments.apply_fsdp_timm_VIT_large",
batch_size_per_nodes={1: 16, 2: 16, 4: 16, 8: 16} if is_amp else {1: 6, 2: 6, 4: 6, 8: 6},
),
"hf_GPT2_large": functools.partial(
get_model_config,
model_name="hf_GPT2_large",
wrap_fn="userbenchmark.ddp_experiments.apply_fsdp_hf_GPT2_large",
batch_size_per_nodes={1: 8, 2: 8, 4: 8, 8: 8} if is_amp else {1: 6, 2: 6, 4: 6, 8: 6},
),
"hf_Bert_large": functools.partial(
get_model_config,
model_name="hf_Bert_large",
wrap_fn="userbenchmark.ddp_experiments.apply_fsdp_hf_Bert_large",
batch_size_per_nodes={1: 20, 2: 20, 4: 20, 8: 20} if is_amp else {1: 16, 2: 16, 4: 16, 8: 16},
),
"hf_T5_large": functools.partial(
get_model_config,
model_name="hf_T5_large",
wrap_fn="userbenchmark.ddp_experiments.apply_fsdp_hf_T5_large",
batch_size_per_nodes={1: 6, 2: 6, 4: 6, 8: 6},
),
}
selected_models = filter_models(args, [k for k, _ in model_configs.items()])
model_configs = {k: v for k, v in model_configs.items() if k in selected_models}
model_args_configs = [
[], # no args = pure eager baseline
["--torchdynamo", "inductor"],
]
node_list = get_node_list(args)
experiments = []
for i in range(args.repeat):
for nodes in node_list:
for model_name, config_generator in model_configs.items():
for model_args in model_args_configs:
experiments.append(config_generator(nodes, model_args))
allocation_nodes = max(node_list)
executor.update_parameters(
nodes=allocation_nodes,
)
job_config = JobConfig(
outer_sync_path=str(get_init_file(args))
)
job = executor.submit(TrainerWrapper(job_config, experiments))
# print ID of the Slurm job
print(f"{allocation_nodes} nodes: {job.job_id}")
output_csv(
args.index_file,
("job_id",),
(job.job_id,),
)
# waits for completion and returns output
print(job.results())
def main():
args = parse_args()
# Note that the folder will depend on the job_id, to easily track experiments
executor = submitit.AutoExecutor(folder=args.job_dir, cluster=args.cluster, slurm_max_num_timeout=3000)
executor.update_parameters(
gpus_per_node=args.ngpus,
# one task per GPU
tasks_per_node=args.ngpus,
cpus_per_task=12,
timeout_min=args.timeout,
# Below are cluster dependent parameters
slurm_partition=args.partition,
slurm_signal_delay_s=120,
slurm_exclude=args.exclude,
)
executor.update_parameters(name="distbench", slurm_array_parallelism=1, timeout_min=args.timeout)
if "ddp" in args.distributed:
benchmark_ddp(args, executor)
elif "fsdp" in args.distributed:
benchmark_fsdp(args, executor)
if __name__=="__main__":
import torch
if torch.version.debug:
raise RuntimeError("torch.version.debug == True, which is disallowed because " \
"NCCL performance is drastically worse when debug is on. Build with " \
"DEBUG=0 python setup.py [develop|install|bdist_wheel] instead."
)
main()
|
"""
Run PyTorch nightly benchmarking.
"""
import argparse
import itertools
import json
import math
import os
import yaml
import numpy
from typing import List, Tuple, Dict, Optional, Any
from ..utils import REPO_PATH, add_path, get_output_json, dump_output
with add_path(REPO_PATH):
from torchbenchmark.util.experiment.instantiator import list_models, load_model_isolated, TorchBenchModelConfig, \
list_devices, list_tests
from torchbenchmark.util.experiment.metrics import TorchBenchModelMetrics, get_model_test_metrics
BM_NAME = "torch-nightly"
CURRENT_DIR = os.path.dirname(os.path.realpath(__file__))
DEFAULT_DELTA_THRESHOLD = 0.07
DEFAULT_TARGET_SCORE = 1000.0
def generate_model_configs(devices: List[str], tests: List[str], model_names: List[str]) -> List[TorchBenchModelConfig]:
"""Use the default batch size and default mode."""
if not model_names:
model_names = list_models()
cfgs = itertools.product(*[devices, tests, model_names])
result = [TorchBenchModelConfig(
name=model_name,
device=device,
test=test,
batch_size=None,
jit=False,
extra_args=[],
extra_env=None,
) for device, test, model_name in cfgs]
return result
def get_metrics(_config: TorchBenchModelConfig) -> List[str]:
return ["latencies", "cpu_peak_mem", "gpu_peak_mem"]
def compute_score(results, reference_latencies: Dict[str, float]) -> float:
# sanity checks
latency_results = {k: v for k, v in results.items() if k.endswith("_latency")}
test_set = set(latency_results.keys())
reference_set = set(reference_latencies.keys())
test_only_set = test_set.difference(reference_set)
assert not test_only_set, f"Tests {test_only_set} only appears in result json, not in reference yaml."
reference_only_set = reference_set.difference(test_set)
assert not reference_only_set, f"Tests {reference_only_set} only appears in reference yaml, not in result json."
# check that for every test in reference_latencies, we can find the corresponding tests in latency_results
total_score = 0.0
weight = 1.0 / len(reference_latencies)
for key, ref_latency in reference_latencies.items():
test_latency = latency_results[key]
ref_latency = float(ref_latency)
delta = (test_latency - ref_latency) / test_latency
# If less than threshold, treat it as noise
if abs(delta) <= DEFAULT_DELTA_THRESHOLD:
test_latency = ref_latency
total_score += weight * math.log(ref_latency / test_latency)
score = math.exp(total_score) * DEFAULT_TARGET_SCORE
return score
def result_to_output_metrics(results: List[Tuple[TorchBenchModelConfig, TorchBenchModelMetrics]]) -> Dict[str, float]:
# metrics name examples:
# test_eval[timm_regnet-cuda-eager]_latency
# test_eval[timm_regnet-cuda-eager]_cmem
# test_eval[timm_regnet-cuda-eager]_gmem
result_metrics = {}
for config_id, (config, metrics) in enumerate(results):
metrics_base = f"test_{config.test}[{config.name}-{config.device}-eager]"
latency_metric = f"{metrics_base}_latency"
median_latency = numpy.median(metrics.latencies)
assert median_latency, f"Run failed for metric {latency_metric}"
result_metrics[latency_metric] = median_latency
if metrics.cpu_peak_mem:
cpu_peak_mem = f"{metrics_base}_cmem"
result_metrics[cpu_peak_mem] = metrics.cpu_peak_mem
if metrics.gpu_peak_mem:
gpu_peak_mem = f"{metrics_base}_gmem"
result_metrics[gpu_peak_mem] = metrics.gpu_peak_mem
return result_metrics
def dump_result_to_json(metrics):
result = get_output_json(BM_NAME, metrics)
dump_output(BM_NAME, result)
def validate(candidates: List[str], choices: List[str]) -> List[str]:
"""Validate the candidates provided by the user is valid"""
for candidate in candidates:
assert candidate in choices, f"Specified {candidate}, but not in available list: {choices}."
return candidates
def generate_model_configs_from_yaml(yaml_file: str) -> Tuple[TorchBenchModelConfig, List[float], Any]:
yaml_file_path = os.path.join(CURRENT_DIR, yaml_file)
with open(yaml_file_path, "r") as yf:
config_obj = yaml.safe_load(yf)
devices = config_obj["metadata"]["devices"]
configs = []
reference_latencies = {}
for device in devices:
for c in config_obj[device]:
if not c["stable"]:
continue
config = TorchBenchModelConfig(
name=c["model"],
device=device,
test=c["test"],
batch_size=c["batch_size"] if "batch_size" in c else None,
jit=c["jit"] if "jit" in c else False,
extra_args=[],
extra_env=None,
)
configs.append(config)
metrics_base = f"test_{config.test}[{config.name}-{config.device}-eager]"
latency_metric_key = f"{metrics_base}_latency"
reference_latencies[latency_metric_key] = c["median_latency"]
return configs, reference_latencies, config_obj
def parse_str_to_list(candidates):
if isinstance(candidates, list):
return candidates
candidates = list(map(lambda x: x.strip(), candidates.split(",")))
return candidates
def run_config(config: TorchBenchModelConfig, dryrun: bool=False) -> Optional[TorchBenchModelMetrics]:
"""This function only handles NotImplementedError, all other errors will fail."""
metrics = get_metrics(config)
print(f"Running {config} ...", end='')
if dryrun:
return None
# We do not allow RuntimeError in this test
try:
# load the model instance within the same process
model = load_model_isolated(config)
# get the model test metrics
result: TorchBenchModelMetrics = get_model_test_metrics(model, metrics=metrics)
except NotImplementedError as e:
print(" [NotImplemented]")
return None
print(" [Done]")
return result
def parse_args(args):
parser = argparse.ArgumentParser()
parser.add_argument("--device", "-d", default="cuda", help="Devices to run, splited by comma.")
parser.add_argument("--test", "-t", default="eval", help="Tests to run, splited by comma.")
parser.add_argument("--model", "-m", default=None, type=str, help="Only run the specifice models, splited by comma.")
parser.add_argument("--config", "-c", default=None, help="YAML config to specify tests to run.")
parser.add_argument("--dryrun", action="store_true", help="Dryrun the command.")
parser.add_argument("--score", default=None, help="Generate score from the past run json.")
return parser.parse_args(args)
def run(args: List[str]):
args = parse_args(args)
if args.score:
assert args.config, f"To compute score, you must specify the config YAML using --config."
configs, reference_latencies, config_obj = generate_model_configs_from_yaml(args.config)
with open(args.score, "r") as sp:
run_result = json.load(sp)
input_metrics = run_result["metrics"]
score = compute_score(input_metrics, reference_latencies)
score_version = config_obj["metadata"]["score_version"]
score_name = f"{score_version}_score"
print(f"TorchBench {score_name}: {score}.")
exit(0)
elif args.config:
configs, reference_latencies, config_obj = generate_model_configs_from_yaml(args.config)
else:
# If not specified, use the entire model set
if not args.model:
args.model = list_models()
devices = validate(parse_str_to_list(args.device), list_devices())
tests = validate(parse_str_to_list(args.test), list_tests())
models = validate(parse_str_to_list(args.model), list_models())
configs = generate_model_configs(devices, tests, model_names=models)
reference_latencies = None
results = []
try:
for config in configs:
metrics = run_config(config, dryrun=args.dryrun)
if metrics:
results.append([config, metrics])
except KeyboardInterrupt:
print("User keyboard interrupted!")
if not args.dryrun:
metrics = result_to_output_metrics(results)
if reference_latencies:
score = compute_score(metrics, reference_latencies)
score_version = config_obj["metadata"]["score_version"]
score_name = f"{score_version}_score"
metrics[score_name] = score
dump_result_to_json(metrics)
|
from typing import Optional
from ..utils import TorchBenchABTestResult, TorchBenchABTestMetric
DEFAULT_REGRESSION_DELTA_THRESHOLD = 0.07
def run(control, treatment) -> Optional[TorchBenchABTestResult]:
control_env = control["environ"]
treatment_env = treatment["environ"]
control_metrics = control["metrics"]
treatment_metrics = treatment["metrics"]
details = {}
for metric_names in control_metrics.keys():
control_metric = control_metrics[metric_names]
treatment_metric = treatment_metrics[metric_names]
delta = (treatment_metric - control_metric) / control_metric
if delta > DEFAULT_REGRESSION_DELTA_THRESHOLD:
details[metric_names] = TorchBenchABTestMetric(control=control_metric, treatment=treatment_metric, delta=delta)
return TorchBenchABTestResult(control_env=control_env, \
treatment_env=treatment_env, \
details=details, \
bisection=None)
|
from pathlib import Path
from typing import Any, Dict, List, Tuple
from torchbenchmark import load_model_by_name
import torch
from torch import _dynamo as torchdynamo
from torch.optim import Adadelta, Adagrad, Adam, AdamW, Adamax, ASGD, SGD, RAdam, Rprop, RMSprop, NAdam, SparseAdam, LBFGS
import torch.utils.benchmark as benchmark
from userbenchmark.utils import REPO_PATH, add_path, dump_output, get_output_json
import argparse
import gc
import sys
import itertools
import datetime
with add_path(REPO_PATH):
from torchbenchmark.util.experiment.instantiator import list_models
BM_NAME: str = 'optim'
continue_on_error: bool = False
run_on_subset: bool = False
ignore_skips: bool = False
MODEL_NAMES: List[str] = list_models()
SUBSET_OF_MODEL_NAMES: List[str] = [
'BERT_pytorch', 'DALLE2_pytorch', 'hf_GPT2_large', 'hf_T5_large', 'resnet50', 'timm_vision_transformer', 'yolov3'
]
DEVICES: List[str] = ['cuda', 'cpu']
OPTIM_NAMES = [o.__name__ for o in [Adadelta, Adagrad, Adam, AdamW, Adamax, ASGD, SGD, RAdam, Rprop, RMSprop, NAdam, SparseAdam]]
FUNC_STRS = ['pt2_' , '']
OPTIMIZERS = [
(Adadelta, {}),
(Adadelta, {'maximize': True}),
(Adadelta, {'foreach': False}),
(Adadelta, {'differentiable': True}),
(Adadelta, {'foreach': True}),
(Adagrad, {}),
(Adagrad, {'maximize': True}),
(Adagrad, {'foreach': False}),
(Adagrad, {'differentiable': True}),
(Adagrad, {'foreach': True,}),
(Adam, {}),
(Adam, {'amsgrad': True, 'maximize': True}),
(Adam, {'foreach': False}),
(Adam, {'differentiable': True}),
(Adam, {'foreach': True}),
(Adam, {'foreach': True, 'maximize': True, 'capturable': True}),
(Adam, {'foreach': True, 'maximize': True, 'capturable': True, 'amsgrad': True}),
(Adam, {'fused': True}),
(Adam, {'fused': True, 'amsgrad': True, 'maximize': True}),
(Adam, {'fused': True, 'capturable': True}),
(Adam, {'fused': True, 'capturable': True, 'amsgrad': True}),
(AdamW, {}),
(AdamW, {'amsgrad': True, 'maximize': True}),
(AdamW, {'foreach': False}),
(AdamW, {'differentiable': True}),
(AdamW, {'foreach': True}),
(AdamW, {'foreach': True, 'maximize': True, 'capturable': True}),
(AdamW, {'foreach': True, 'maximize': True, 'capturable': True, 'amsgrad': True}),
(AdamW, {'fused': True}),
(AdamW, {'fused': True, 'amsgrad': True, 'maximize': True}),
(AdamW, {'fused': True, 'capturable': True}),
(AdamW, {'fused': True, 'capturable': True, 'amsgrad': True}),
(Adamax, {}),
(Adamax, {'maximize': True}),
(Adamax, {'foreach': False}),
(Adamax, {'differentiable': True}),
(Adamax, {'foreach': True,}),
(ASGD, {}),
(ASGD, {'maximize': True}),
(ASGD, {'foreach': False}),
(ASGD, {'differentiable': True}),
(ASGD, {'foreach': True}),
(SGD, {}),
(SGD, {'maximize': True}),
(SGD, {'foreach': False}),
(SGD, {'differentiable': True}),
(SGD, {'foreach': True,}),
(SGD, {'foreach': True, 'momentum': 0.9, 'nesterov': True}),
(SGD, {'foreach': True, 'momentum': 0.9, }),
(RAdam, {}),
(RAdam, {'foreach': False}),
(RAdam, {'differentiable': True}),
(RAdam, {'foreach': True,}),
(Rprop, {}),
(Rprop, {'maximize': True}),
(Rprop, {'foreach': False}),
(Rprop, {'differentiable': True}),
(Rprop, {'foreach': True}),
(RMSprop, {}),
(RMSprop, {'maximize': True}),
(RMSprop, {'foreach': False}),
(RMSprop, {'differentiable': True}),
(RMSprop, {'foreach': True}),
(NAdam, {}),
(NAdam, {'foreach': False}),
(NAdam, {'differentiable': True}),
(NAdam, {'foreach': True}),
(SparseAdam, {}),
# LBFGS requires a closure
# (LBFGS, {}),
]
DENSE_MODELS = [
'BERT_pytorch',
'Background_Matting',
'DALLE2_pytorch',
'LearningToPaint',
'Super_SloMo',
'alexnet',
'attention_is_all_you_need_pytorch',
'dcgan',
'demucs',
'densenet121',
'detectron2_fasterrcnn_r_101_c4',
'detectron2_fasterrcnn_r_101_dc5',
'detectron2_fasterrcnn_r_101_fpn',
'detectron2_fasterrcnn_r_50_c4',
'detectron2_fasterrcnn_r_50_dc5',
'detectron2_fasterrcnn_r_50_fpn',
'detectron2_maskrcnn',
'detectron2_maskrcnn_r_101_c4',
'detectron2_maskrcnn_r_101_fpn',
'detectron2_maskrcnn_r_50_c4',
'detectron2_maskrcnn_r_50_fpn',
'dlrm',
'doctr_det_predictor',
'doctr_reco_predictor',
'drq',
'fambench_xlmr',
'fastNLP_Bert',
'functorch_dp_cifar10',
'functorch_maml_omniglot',
'gat',
'gcn',
'hf_Albert',
'hf_Bart',
'hf_Bert',
'hf_Bert_large',
'hf_BigBird',
'hf_DistilBert',
'hf_GPT2',
'hf_GPT2_large',
'hf_Longformer',
'hf_Reformer',
'hf_T5',
'hf_T5_base',
'hf_T5_large',
'lennard_jones',
'llama',
'maml',
'maml_omniglot',
'mnasnet1_0',
'mobilenet_v2',
'mobilenet_v2_quantized_qat',
'mobilenet_v3_large',
'moco',
'nvidia_deeprecommender',
'opacus_cifar10',
'phlippe_densenet',
'phlippe_resnet',
'pytorch_CycleGAN_and_pix2pix',
'pytorch_stargan',
'pytorch_struct',
'pytorch_unet',
'resnet152',
'resnet18',
'resnet50',
'resnet50_quantized_qat',
'resnext50_32x4d',
'sage',
'shufflenet_v2_x1_0',
'soft_actor_critic',
'speech_transformer',
'squeezenet1_1',
'tacotron2',
'timm_efficientdet',
'timm_efficientnet',
'timm_nfnet',
'timm_regnet',
'timm_resnest',
'timm_vision_transformer',
'timm_vision_transformer_large',
'timm_vovnet',
'torchrec_dlrm',
'tts_angular',
'vgg16',
'vision_maskrcnn',
'yolov3'
]
# Skips! Exclusions are represented by a dictionary of incompatible configs, where
# optim => optimizer name
# model => model name
# func_str => func string (e.g., pt2_)
# device => device name
# defaults => list of flag descriptions (strings) to exclude, e.g. no_foreach
# if empty list, will exclude all configurations
# Exclusions are general and will try to match on everything. For an exclusion
# {'optim': 'SparseAdam', 'model': 'BERT_pytorch'}, any configuration with
# SparseAdam on BERT_pytorch will be skipped.
EXCLUSIONS: List[Dict[str, Any]] = [
# SparseAdam does not support dense gradients
{'optim': 'SparseAdam', 'model': m} for m in DENSE_MODELS
] + [
# DALL-E 2, timm_efficientdet, tacotron2 Not Supported on CPU
{'model': 'DALLE2_pytorch', 'device': 'cpu'},
{'model': 'tacotron2', 'device': 'cpu'},
{'model': 'timm_efficientdet', 'device': 'cpu'},
# FCOS train is not supported by upstream detectron2.
# See GH issue: https://github.com/facebookresearch/detectron2/issues/4369.
{'model': 'detectron2_fcos_r_50_fpn'},
# moco uses DDP and DistributedDataParallel/allgather requires cuda
{'model': 'moco', 'device': 'cpu'},
# pyhpc_equation_of_state and pyhpc_isoneutral_mixing have no parameters
{'model': 'pyhpc_equation_of_state'},
{'model': 'pyhpc_isoneutral_mixing'},
{'model': 'pyhpc_turbulent_kinetic_energy'},
# fused/capturable requires params to be floats on CUDA
{'defaults': ['fused'], 'device': 'cpu'},
{'defaults': ['capturable'], 'device': 'cpu'},
] + [
# PT2 dynamo tracing for the for-loop implementation takes over 30s.
# This is known + not going to be improved anytime soon, see
# https://github.com/pytorch/torchdynamo/issues/1803#issuecomment-1336688894
# Run PT2 on for-loop implementations for only the subset of models. Skip everything else.
{'model': m, 'device': d, 'func_str': 'pt2_', 'defaults': [df]}
for d in DEVICES
for m in set(MODEL_NAMES) - set(SUBSET_OF_MODEL_NAMES)
for df in ['no_foreach', 'differentiable'] + ([] if d == 'cuda' else ['default', 'maximize', 'amsgrad, maximize'])
] + [
# torch.compile()'d optimizer.step() has too many arguments in C++
# See GH issue: https://github.com/pytorch/pytorch/issues/97361
{'model': m, 'device': 'cpu', 'func_str': 'pt2_', 'defaults': []} for m in [
'BERT_pytorch', 'Background_Matting', 'Super_SloMo', 'attention_is_all_you_need_pytorch',
'densenet121', 'detectron2_fasterrcnn_r_101_c4', 'detectron2_fasterrcnn_r_101_dc5',
'detectron2_fasterrcnn_r_101_fpn', 'detectron2_fasterrcnn_r_50_fpn', 'detectron2_maskrcnn',
'detectron2_maskrcnn_r_101_c4', 'detectron2_maskrcnn_r_101_fpn',
'detectron2_maskrcnn_r_50_fpn', 'doctr_det_predictor', 'doctr_reco_predictor', 'fambench_xlmr',
'fastNLP_Bert', 'hf_Bart', 'hf_Bert', 'hf_Bert_large', 'hf_BigBird', 'hf_DistilBert', 'hf_GPT2',
'hf_GPT2_large', 'hf_Longformer', 'hf_Reformer', 'hf_T5', 'hf_T5_base', 'hf_T5_large', 'llama',
'mnasnet1_0', 'mobilenet_v2', 'mobilenet_v2_quantized_qat', 'mobilenet_v3_large',
'phlippe_densenet', 'pytorch_unet', 'resnet152', 'resnet50', 'resnet50_quantized_qat', 'resnext50_32x4d',
'shufflenet_v2_x1_0', 'timm_efficientnet', 'timm_nfnet', 'timm_regnet',
'timm_vision_transformer', 'yolov3']
] + [
# torch.compile()'d optimizer.step() has too many arguments in the generated
# C++ kernel for both CUDA and CPU for single tensor implementations.
# See GH issue: https://github.com/pytorch/pytorch/issues/97361
{'model': m, 'func_str': 'pt2_', 'defaults': [df]} for m in [
'DALLE2_pytorch', 'fambench_xlmr'] for df in ['no_foreach', 'differentiable']
] + [
# torch.compile()'d optimizer.step() has too many arguments in the generated
# C++ kernel even when params are on CUDA for single tensor implementations on NAdam.
# See GH issue: https://github.com/pytorch/pytorch/issues/97361
{'model': m, 'device': 'cuda', 'func_str': 'pt2_', 'defaults': [df], 'optim': 'NAdam'} for m in [
'densenet121', 'doctr_reco_predictor', 'fambench_xlmr', 'hf_Bart', 'hf_Bert_large', 'hf_GPT2_large','hf_Longformer',
'hf_T5_base', 'hf_T5_large', 'moco', 'resnet152', 'yolov3'
] for df in ['no_foreach', 'differentiable']
] + [
# torch.compile()'d optimizer.step() has too many arguments in the generated
# C++ kernel even when params are on CUDA for single tensor implementations on ASGD.
# See GH issue: https://github.com/pytorch/pytorch/issues/97361
{'model': m, 'device': 'cuda', 'func_str': 'pt2_', 'defaults': [df], 'optim': 'ASGD'} for m in [
'densenet121', 'fambench_xlmr', 'hf_Bart', 'hf_Bert_large', 'hf_GPT2_large', 'hf_Longformer',
'hf_T5_base', 'hf_T5_large', 'moco'
] for df in ['no_foreach', 'differentiable']
]
# Returns clones of params and not a generator.
def _get_model_params(m) -> List[torch.nn.Parameter]:
model, _ = m.get_module()
params_clone = []
for p in model.parameters():
params_clone.append(p.clone().detach())
return params_clone
lil_cache: Tuple[str, str, List[torch.nn.Parameter]] = ('', '', [])
# Returns clones of params given a model name
def get_model_params(modelName: str, device: str) -> List[torch.nn.Parameter]:
global lil_cache
cached_mn, cached_d, cached_params = lil_cache
if modelName == cached_mn and device == cached_d:
return cached_params
# free the old params before initializing a model to conserve memory
lil_cache = ('', '', [])
torch.cuda.empty_cache()
Model = load_model_by_name(modelName)
# some (usually quantized) models do not support eval on CPU, but since we
# only care about params + randomly generate grads, eval vs train doesn't matter
try:
params = _get_model_params(Model(device=device, test='train', batch_size=1))
except:
try:
params = _get_model_params(Model(device=device, test='eval', batch_size=1))
except:
try:
params = _get_model_params(Model(device=device, test='train'))
except:
params = _get_model_params(Model(device=device, test='eval'))
finally:
del Model
lil_cache = (modelName, device, params)
return params
# This fakes a model forward & backward--we are not concerned about
# accuracy here, but about the perf of optim on particular shapes and
# dtypes of commonly used models!
def generate_random_gradients(parameters):
for p in parameters:
p.grad = torch.rand_like(p)
def optimizer_step(optimizer):
optimizer.step()
def pt2_optimizer_step(optimizer):
@torchdynamo.optimize('inductor')
def f():
optimizer.step()
f()
def defaults_to_str(defaults: Dict[str, Any]) -> str:
# We define lr for SGD, but we don't currently vary lr so it is effectively the default.
defaults.pop('lr', None)
if len(defaults) == 0:
return 'default'
def entry_to_str(k, v) -> str:
if isinstance(v, bool):
return 'no_' + k if not v else k
return f'{k}={v}'
return ', '.join([entry_to_str(k, v) for k, v in defaults.items()])
def is_excluded(mn: str, d: str, on: str, func_str: str, defaults: Dict[str, Any]) -> bool:
return any([('model' not in e or e['model'] == mn) and
('device' not in e or e['device'] == d) and
('optim' not in e or e['optim'] == on) and
('funct_str' not in e or e['func_str'] == func_str) and
('defaults' not in e or all(f in defaults_to_str(defaults) for f in e['defaults'])) for e in EXCLUSIONS])
def run_model(modelName, device, Optim, defaults, maybe_pt2_):
try:
params = get_model_params(modelName, device)
print('getting params: ', params[0].size(), params[0].dtype, len(params), params[0].device)
if Optim.__name__ == 'SGD':
defaults['lr'] = 1e-2
optim = Optim(params, **defaults)
generate_random_gradients(params)
pt2_description = '' if maybe_pt2_ == '' else '(pt2) '
print(f'{datetime.datetime.now()} {modelName}, {device}, {Optim}, {defaults_to_str(defaults)}, {maybe_pt2_}')
r = benchmark.Timer(
stmt=f'{maybe_pt2_}optimizer_step(optim)',
globals={'optim': optim, 'optimizer_step': optimizer_step, 'pt2_optimizer_step': pt2_optimizer_step},
sub_label=f'{modelName}, {optim.__class__.__name__}, {device}',
description=pt2_description + defaults_to_str(defaults),
).blocked_autorange()
if maybe_pt2_:
# Clears the cache that dynamo had accumulated to prevent OOMs
# See https://github.com/pytorch/pytorch/issues/100264
torchdynamo.reset()
gc.collect()
return r
except Exception as e:
if not continue_on_error:
raise e
print(e)
with open('errors.txt', 'a') as f:
f.write(f'{datetime.datetime.now().timestamp()} {modelName}, {device}, {Optim}, {defaults_to_str(defaults)}, {maybe_pt2_}, {str(e)}\n')
return None
def run_benchmarks(optims: List[str], func_strs: List[str], models: List[str], devices: List[str],
flags: List[str]) -> List[torch.utils.benchmark.utils.common.Measurement]:
results = []
optim_cfgs = [(O, defaults) for (O, defaults) in OPTIMIZERS if O.__name__ in optims and all(f in defaults_to_str(defaults) for f in flags)]
if run_on_subset:
models = [m for m in SUBSET_OF_MODEL_NAMES if m in models]
optim_cfgs = [(O, defaults) for (O, defaults) in optim_cfgs if (all([x in ['foreach', 'fused', 'lr'] for x in defaults]))]
for mn, d, (O, defaults), func_str in itertools.product(models, devices, optim_cfgs, func_strs):
if (not ignore_skips and is_excluded(mn, d, O.__name__, func_str, defaults)):
continue
bm = run_model(mn, d, O, defaults, func_str)
if bm is not None:
results.append(bm)
return results
def parse_args(args: List[str]):
parser = argparse.ArgumentParser()
parser.add_argument(
'--optims', '-o',
nargs='*',
default=OPTIM_NAMES,
choices=OPTIM_NAMES,
help='List of optimizers to run tests on')
parser.add_argument(
'--funcs', '-f',
nargs='*',
default=FUNC_STRS,
choices=FUNC_STRS,
help='What optimizer.step() function variations to benchmark. NOTE: there is an underscore ' +
'for "pt2_"!'
)
parser.add_argument(
'--models', '-m',
nargs='*',
default=MODEL_NAMES,
choices=MODEL_NAMES,
help='List of models to run tests on')
parser.add_argument(
'--subset', '-s',
action='store_true',
help='Run benchmarks on a standard subset of models. If the --models (-m) is set, we will ' +
'take the intersection of the requested models and the defined subset. For example, ' +
'`...-s -m llama yolov3` will ONLY run yolov3.'
)
parser.add_argument(
'--devices', '-d',
nargs='*',
default=DEVICES,
choices=DEVICES,
help='List of devices to run tests on')
parser.add_argument(
'--default-flags', '--df',
nargs='*',
default=[],
choices=['foreach', 'no_foreach', 'fused', 'maximize', 'capturable', 'differentiable', 'default',
'amsgrad', 'momentum', 'nesterov'],
help='List of flag descriptions to run tests on. We serialize the configs to a string (see ' +
'defaults_to_str()) and test for inclusion of the flag description in the string. ' +
'For example, "foreach" will enable all default configs with "foreach", including ' +
'those with other flags and also "no_foreach". Effectually, passing in more flags ' +
'will further limit the default configs run.\n'
)
parser.add_argument(
'--continue-on-error', '-c',
action='store_true',
help='Continue running benchmarks on failure, errors will be written to errors.txt'
)
parser.add_argument(
'--output-dir', '--od', default=None, type=str,
help='name of directory path in which to dump the metrics json, e.g., "./.userbenchmark/optim/tmp". ' +
'If None, we will dump output the metrics json to "REPO_ROOT/.userbenchmark/optim".'
)
parser.add_argument(
'--ignore-skips', '-i', action='store_true',
help='Runs ALL benchmarks ignoring any skips. This allows for easy testing of current skipped ' +
'benchmarks once one believes they should be fixed. Beware though! You may run into errors ' +
'that were previously hidden by the exclusions.'
)
args = parser.parse_args(args)
return args
# convert results into a JSON of description to mean time in seconds
def get_metrics(results: List[torch.utils.benchmark.utils.common.Measurement]) -> Dict[str, float]:
metrics = {}
for r in results:
ts: torch.utils.benchmark.utils.common.TaskSpec = r.task_spec
metrics[f'{ts.sub_label}, {ts.description}'] = r.mean
return metrics
def run(args: List[str]):
args = parse_args(args)
global continue_on_error, run_on_subset, ignore_skips
continue_on_error = args.continue_on_error
run_on_subset = args.subset
ignore_skips = args.ignore_skips
target_dir = Path(args.output_dir) if args.output_dir is not None else None
if target_dir is not None:
target_dir.mkdir(exist_ok=True, parents=True)
results = run_benchmarks(args.optims, args.funcs, args.models, args.devices, args.default_flags)
metrics: Dict[str, float] = get_metrics(results)
dump_output(BM_NAME, get_output_json(BM_NAME, metrics), target_dir=target_dir)
compare = benchmark.Compare(results)
compare.trim_significant_figures()
compare.colorize(rowwise=True)
compare.print()
if __name__ == '__main__':
run(sys.argv[1:])
|
#!/bin/bash python3
'''
This script is intended for the CI context only! The whole purpose behind this script is to enable
process/context/memory isolation across different models and devices. The OG script (which this
script calls) is the userbenchmark/optim/__init__.py script, which is better documented and what is
intended to be used locally. The current script is simply a wrapper that dispatches serial
subprocesses to run the OG script and handles the metrics.json merging afterwards.
WARNING! Running this script will wipe clean the OUTPUT_DIR, .userbenchmark/optim/tmp!
'''
from pathlib import Path
import shutil
import subprocess
from typing import Any, List, Dict, Tuple
import argparse
import sys
import itertools
import json
from userbenchmark.utils import REPO_PATH, add_path, dump_output, get_output_json
with add_path(REPO_PATH):
from torchbenchmark.util.experiment.instantiator import list_models
BM_NAME: str = 'optim'
MODEL_NAMES: List[str] = list_models()
DEVICES: List[str] = ['cuda', 'cpu']
OUTPUT_DIR: Path = REPO_PATH.joinpath('.userbenchmark/optim/tmp')
# Capture the specified models and devices we want to run to avoid redundant work,
# but send the rest of the user arguments to the underlying optim benchmark runner.
def parse_args() -> Tuple[Dict[Any, Any], Dict[Any, Any]]:
parser = argparse.ArgumentParser(description='Run optim benchmarks per model and device')
parser.add_argument(
'--models', '-m',
nargs='*',
default=MODEL_NAMES,
choices=MODEL_NAMES,
help='List of models to run tests on')
parser.add_argument(
'--devices', '-d',
nargs='*',
default=DEVICES,
choices=DEVICES,
help='List of devices to run tests on')
return parser.parse_known_args()
def main() -> None:
args, optim_bm_args = parse_args()
assert not OUTPUT_DIR.exists() or not any(OUTPUT_DIR.glob("*")), \
f'{OUTPUT_DIR} must be empty or nonexistent. Its contents will be wiped by this script.'
# Run benchmarks in subprocesses to take isolate contexts and memory
for m, d in itertools.product(args.models, args.devices):
command = [sys.executable, '-m', 'userbenchmark.optim.__init__', '--continue-on-error',
'--output-dir', OUTPUT_DIR, '--models', m, '--devices', d] + optim_bm_args
# Use check=True to force this process to go serially since our capacity
# only safely allows 1 model at a time
completed_process = subprocess.run(command, check=True)
# While it is certainly unexpected for a subprocess to fail, we don't want to halt entirely
# as there can be valuable benchmarks to gather from the other subprocesses.
if completed_process.returncode != 0:
print(f'OH NO, the subprocess for model {m} and device {d} exited with {completed_process.returncode}!')
# Nightly CI expects ONE metrics json in .userbenchmark/optim, but we may have multiple, so
# consolidate them into one file.
aggregated_metrics = {}
for file_path in Path(OUTPUT_DIR).glob("metrics*.json"):
with open(file_path, 'r') as f:
json_data = json.load(f)
aggregated_metrics.update(json_data['metrics'])
dump_output(BM_NAME, get_output_json(BM_NAME, aggregated_metrics))
# Gotta delete the tmp folder--otherwise the nightly CI will think there are multiple metrics jsons!
shutil.rmtree(OUTPUT_DIR)
if __name__ == '__main__':
main()
|
from typing import Optional
from ..utils import TorchBenchABTestResult, TorchBenchABTestMetric
DEFAULT_REGRESSION_DELTA_THRESHOLD = 0.07
def run(control, treatment) -> Optional[TorchBenchABTestResult]:
control_env = control["environ"]
treatment_env = treatment["environ"]
control_metrics = control["metrics"]
treatment_metrics = treatment["metrics"]
details = {}
for control_metric_name, control_metric in control_metrics.items():
if control_metric_name in treatment_metrics:
treatment_metric = treatment_metrics[control_metric_name]
delta = (treatment_metric - control_metric) / control_metric
if delta > DEFAULT_REGRESSION_DELTA_THRESHOLD:
details[control_metric_name] = TorchBenchABTestMetric(control=control_metric, treatment=treatment_metric, delta=delta)
return TorchBenchABTestResult(control_env=control_env, \
treatment_env=treatment_env, \
details=details, \
bisection=None)
|
import argparse
from datetime import datetime
import git
import numpy as np
import os
import json
import subprocess
import sys
import time
import shutil
from pathlib import Path
from ..utils import dump_output, get_output_dir, get_output_json, REPO_PATH
from typing import List
BM_NAME = "instruction-count"
RESULT_JSON = "ubenchmark_results.json"
PYTORCH_SRC_URL = "https://github.com/pytorch/pytorch.git"
def translate_result_metrics(json_path: Path):
metrics = {}
with open(json_path, "r") as j:
raw_result = json.load(j)
raw_values = raw_result["values"]
for key in raw_values:
times = raw_values[key]["times"]
counts = raw_values[key]["counts"]
metrics[f"{key}_count_min"] = min(counts)
metrics[f"{key}_count_max"] = max(counts)
metrics[f"{key}_count_p25"] = int(np.percentile(counts, 25))
metrics[f"{key}_count_median"] = int(np.median(counts))
metrics[f"{key}_count_p75"] = int(np.percentile(counts, 75))
metrics[f"{key}_t_min"] = min(times)
metrics[f"{key}_t_max"] = max(times)
metrics[f"{key}_t_mean"] = float(np.mean(times))
metrics[f"{key}_t_p01"] = float(np.percentile(times, 1))
metrics[f"{key}_t_p25"] = float(np.percentile(times, 25))
metrics[f"{key}_t_median"] = float(np.median(times))
metrics[f"{key}_t_75"] = float(np.percentile(times, 75))
metrics[f"{key}_t_99"] = float(np.percentile(times, 99))
metrics[f"{key}_t_stddev"] = float(np.std(times))
return metrics
def get_timestamp():
return datetime.fromtimestamp(time.time()).strftime("%Y%m%d%H%M%S")
def get_work_dir(output_dir):
work_dir = output_dir.joinpath(f"run-{get_timestamp()}")
work_dir.mkdir(exist_ok=True, parents=True)
return work_dir
def get_run_env(env):
env["BENCHMARK_USE_DEV_SHM"] = "1"
return env
def checkout_pytorch_repo(pytorch_repo: str, pytorch_branch: str):
git.Repo.clone_from(PYTORCH_SRC_URL, pytorch_repo, depth=1, branch=pytorch_branch)
def cleanup_pytorch_repo(pytorch_repo: str):
pytorch_repo_path = Path(pytorch_repo)
if pytorch_repo_path.exists():
shutil.rmtree(pytorch_repo_path)
def run_benchmark(pytorch_src_path: Path, output_json_path: Path):
benchmark_path = pytorch_src_path.joinpath("benchmarks", "instruction_counts")
runtime_env = get_run_env(os.environ.copy())
command = [sys.executable, "main.py", "--mode", "ci", "--destination", str(output_json_path.resolve())]
subprocess.check_call(command, cwd=benchmark_path, env=runtime_env)
def parse_args(args: List[str], work_dir: Path):
parser = argparse.ArgumentParser()
parser.add_argument("--pytorch-src", default=str(work_dir.resolve()),
help="Location of PyTorch source repo")
parser.add_argument("--pytorch-branch", default="master",
help="The branch of pytorch to check out")
parser.add_argument("--analyze-json", type=str, default=None, help="Only analyze an existing result")
args = parser.parse_args(args)
return args
def run(args: List[str]):
output_dir = get_output_dir(BM_NAME)
work_dir = get_work_dir(output_dir)
args = parse_args(args, work_dir)
if args.analyze_json:
json_path = Path(args.analyze_json)
metrics = translate_result_metrics(json_path)
result = get_output_json(BM_NAME, metrics)
dump_output(BM_NAME, result)
return
cleanup_pytorch_repo(args.pytorch_src)
checkout_pytorch_repo(args.pytorch_src, args.pytorch_branch)
pytorch_src_path = Path(args.pytorch_src)
output_json_path = work_dir.joinpath(RESULT_JSON)
run_benchmark(pytorch_src_path, output_json_path)
metrics = translate_result_metrics(output_json_path)
result = get_output_json(BM_NAME, metrics)
dump_output(BM_NAME, result)
cleanup_pytorch_repo(args.pytorch_src)
|
import argparse
import csv
import functools
import gc
import io
import itertools
import logging
import numpy as np
import os
import re
import sys
import time
import torch
from torch import nn
from torch.jit import fuser, optimized_execution
from os.path import abspath
from scipy.stats import ttest_ind
import importlib
import glob
import collections
import random
import torch._lazy
import torch._lazy.metrics as metrics
import torch._lazy.ts_backend
def set_seeds(seed=1337):
torch.manual_seed(seed)
random.seed(seed)
np.random.seed(seed)
torch.backends.cudnn.deterministic = True
torch.cuda.manual_seed_all(seed)
def get_unique_suffix():
return f"{time.time()}_{os.getpid()}"
def get_benchmark_cls(model_name):
if ("Benchmark(dims=[" in model_name):
# just evaluate the model name + args
# it should create a model with the right dim
return eval(model_name)
try:
module = importlib.import_module(f'.models.{model_name}', package="torchbenchmark")
Model = getattr(module, 'Model', None)
if Model is None:
raise RuntimeError(f"{module} does not define attribute Model, skip it")
if not hasattr(Model, 'name'):
Model.name = model_name
return Model
except ModuleNotFoundError as e:
raise RuntimeError(f"Could not find dependent module {e.name} for Model {model_name}, skip it")
# from caffe2.python import workspace
# workspace.GlobalInit(['caffe2', '--caffe2_log_level=-5'])
import torch._lazy.metrics
torch._lazy.ts_backend.init()
os.environ["KALDI_ROOT"] = "/tmp" # avoids some spam
log = logging.getLogger(__name__)
# Models that are known to crash or otherwise not work with lazy tensor are
# disabled, but should be removed from these lists once fixed
SKIP = {
"densenet121": "Disabled by torchbench upstream due to OOM on T4 CI machine",
"timm_nfnet": "Disabled by torchbench upstream due to OOM on T4 CI machine",
"moco": "Distributed/ProcessGroupNCCL: Tensors must be CUDA and dense",
"tacotron2": "Disabled by torchbench upstream due to OOM on T4 CI machine",
}
SKIP_TRAIN_ONLY = {
"squeezenet1_1": "Disabled by torchbench upstream due to OOM on T4 CI machine",
"demucs": "Disabled by torchbench upstream due to OOM on T4 CI machine",
}
current_name = ""
current_device = ""
@functools.lru_cache(maxsize=None)
def output_csv(name, headers):
output = csv.writer(
io.TextIOWrapper(
open(name, "wb", buffering=0),
"utf-8",
write_through=True,
),
delimiter=",",
quotechar='"',
quoting=csv.QUOTE_MINIMAL
)
output.writerow(headers)
return output
class HardSwishBenchmark:
def __init__(self, dims):
self.name = "HardSwishBenchmark(dims=[" + ','.join([str(d) for d in dims]) + '])'
self.dims = dims
# test and extra_args are placeholders to match TorchBench API
def __call__(self, device, jit, test, extra_args):
return HardSwish(self.dims, device, jit)
class HardSwish(nn.Module):
def __init__(self, dims, device='cuda', jit=False):
super(HardSwish, self).__init__()
self.name = "HardSwish[" + ','.join([str(d) for d in dims]) + ']'
self.example_inputs = (
torch.randn(*dims, device=device, dtype=torch.float32),
)
def get_module(self):
return self, self.example_inputs
def name(self):
return self.name
def forward(self, x):
return x * torch.clamp(x + 3.0, 0.0, 6.0) / 6.0
class DivAddMulBenchmark:
"""This wrapper helps interface with the same iterator as torchbench models
"""
def __init__(self, dims):
self.name = "DivAddMulBenchmark(dims=[" + ','.join([str(d) for d in dims]) + '])'
self.dims = dims
# test and extra_args are placeholders to match TorchBench API
def __call__(self, device, jit, test, extra_args):
return DivAddMul(self.dims, device, jit)
class DivAddMul(nn.Module):
def __init__(self, dims, device='cuda', jit=False):
super(DivAddMul, self).__init__()
self.attention_head_size = dims[1]
self.W = torch.ones(*dims[-2:], device=device, dtype=torch.float32)
self.name = "DivAddMul[" + ','.join([str(d) for d in dims]) + ']'
self.example_inputs = (
torch.ones(*dims, device=device, dtype=torch.float32),
torch.randn(*dims, device=device, dtype=torch.float32),
)
def get_module(self):
return self, self.example_inputs
def name(self):
return self.name
def forward(self, inputs, mask):
out3 = ((inputs / 0.1) + mask) * 2.0
out5 = out3.matmul(self.W)
out8 = ((out5 / 0.1) + mask) * 2.00
return out8
toy_models = [
HardSwishBenchmark,
DivAddMulBenchmark,
]
toy_dims = [
[1, 1, 1, 1],
[32, 16, 128, 128],
[128, 16, 128, 128],
[256, 16, 128, 128],
]
for dims in toy_dims:
# The toy benchmarks don't support training..
# and it's too late to add it inside the generator func below...
SKIP_TRAIN_ONLY["DivAddMulBenchmark(dims=[" + ','.join([str(d) for d in dims]) + '])'] = "This model has no train()"
SKIP_TRAIN_ONLY["HardSwishBenchmark(dims=[" + ','.join([str(d) for d in dims]) + '])'] = "This model has no train()"
def iter_toy_model_names():
for dims in toy_dims:
for model in toy_models:
yield model(dims=dims).name
def pick_grad(args, name):
if args.test == 'train':
return torch.enable_grad()
if name in ("maml",):
return torch.enable_grad()
else:
return torch.no_grad()
def short_name(name, limit=20):
"""Truncate a model name to limit chars"""
return name if len(name) <= limit else f"{name[:limit - 3].rstrip('_')}..."
def iter_torchbench_model_names():
from torchbenchmark import _list_model_paths
for model_path in _list_model_paths():
model_name = os.path.basename(model_path)
yield model_name
def iter_models(args, dirpath):
for name in itertools.chain(iter_toy_model_names(), iter_torchbench_model_names()):
if (
(len(args.filter) and (not re.search("|".join(args.filter), name, re.I)))
or (len(args.exclude) and re.search("|".join(args.exclude), name, re.I))
):
save_error(name, args.test, "disabled via cmdline filter/exclude", dirpath)
continue
if name in SKIP:
save_error(name, args.test, f"SKIP because {SKIP[name]}", dirpath)
continue
if name in SKIP_TRAIN_ONLY and args.test == "train":
save_error(name, args.test, f"SKIP_TRAIN_ONLY because {SKIP_TRAIN_ONLY[name]}", dirpath)
continue
yield name
def call_model_with(model, inputs):
if isinstance(inputs, tuple) or isinstance(inputs, list):
return model(*inputs)
elif isinstance(inputs, dict):
return model(**inputs)
elif isistance(inputs, torch.Tensor):
return model(inputs)
raise RuntimeError("invalid example inputs ", inputs)
class CudaSync:
def __init__(self, sync_every_iter=False):
self.sync_every_iter = sync_every_iter
def iter_sync(self):
if self.sync_every_iter:
torch.cuda.synchronize()
def final_sync(self):
torch.cuda.synchronize()
class NoOpSync:
def __init__(self, sync_every_iter=False):
pass
def iter_sync(self):
pass
def final_sync(self):
pass
class LazySync:
def __init__(self, sync_every_iter=False, skip_final_sync=False):
self.sync_every_iter = sync_every_iter
self.skip_final_sync = skip_final_sync
def iter_sync(self):
torch._lazy.mark_step()
if self.sync_every_iter:
torch._lazy.wait_device_ops()
if current_device == 'cuda':
torch.cuda.synchronize()
def final_sync(self):
torch._lazy.mark_step()
if self.skip_final_sync:
return
torch._lazy.wait_device_ops()
if current_device == 'cuda':
torch.cuda.synchronize()
def dump_lazy_metrics(reset=False):
met = {name: int(metrics.counter_value(name)) for name in metrics.counter_names() if int(metrics.counter_value(name) > 0)}
if reset:
metrics.reset()
return met
def timed(args, benchmark, sync, times=1):
results = None
sync.final_sync()
set_seeds()
if args.test == 'eval':
model, example_inputs = benchmark.get_module()
if current_device == 'lazy':
torch.cuda.set_sync_debug_mode(2)
elif current_device == 'cuda':
torch.cuda.set_sync_debug_mode(0)
# keep the lazy tensor results alive until the final sync
t0 = time.perf_counter()
for i in range(times):
if args.test == 'eval':
results = call_model_with(model, example_inputs)
elif args.test == 'train':
benchmark.train()
# for the last i, let final_sync take care of it
if i < times - 1:
# may be just an async 'mark_step' for lazy, or no-op for cuda
sync.iter_sync()
if current_device in ['lazy', 'cuda']:
# don't assume torch.cuda present unless using cuda
torch.cuda.set_sync_debug_mode(0)
# should be a hard sync for lazy and cuda
# unless strictly measuring lazy trace overhead, then no-op
sync.final_sync()
t1 = time.perf_counter()
return results, t1 - t0
def to_device(tensors, device):
"""Handles moving tensor or tensors (in various containers) to a new device.
Used for various purposes (either correctness checking, or even as an impromptu
means of synchronization.) Note: this method doesn't apply a cuda sync, do that outside.
"""
try:
import transformers.modeling_outputs
if (
isinstance(tensors, transformers.modeling_outputs.MaskedLMOutput) or
isinstance(tensors, transformers.modeling_outputs.Seq2SeqLMOutput)
):
# huggingface transformers return classes as model output with many attributes
# we don't want to sync (such as hidden states of every layer) - just sync the logits
tensors = tensors.logits
except ImportError:
pass
try:
import torchbenchmark.models.soft_actor_critic.nets
import torchbenchmark.models.drq.utils
if (
isinstance(tensors, torchbenchmark.models.soft_actor_critic.nets.SquashedNormal) or
isinstance(tensors, torchbenchmark.models.drq.utils.SquashedNormal)
):
# a SquashedNormal is a py class that holds a loc and scale torch tensor,
# so convert it to a tuple for compatibility with downstream check_results
tensors = (tensors.loc, tensors.scale)
except ImportError:
pass
if isinstance(tensors, tuple) or isinstance(tensors, list):
return tuple(to_device(i, device) for i in tensors)
elif isinstance(tensors, dict):
return {k: to_device(tensors[k], device) for k in tensors}
elif isinstance(tensors, torch.Tensor):
return tensors.to(device)
raise RuntimeError("invalid example tensors ", tensors)
def lazy_overhead_experiment(args, results, benchmark, lazy_benchmark):
timings = np.zeros((args.repeat, 2), np.float64)
ref_sync = CudaSync if current_device == 'cuda' else NoOpSync
warmup0 = time.perf_counter()
for rep in range(args.warmup):
# interleave the runs to handle frequency scaling and load changes
timed(args, benchmark, sync=ref_sync(sync_every_iter=True))
timed(args, lazy_benchmark, sync=LazySync(sync_every_iter=True))
warmup_time = time.perf_counter() - warmup0
bench0 = time.perf_counter()
dump_lazy_metrics(reset=True)
for rep in range(args.repeat):
# interleave the runs to handle frequency scaling and load changes
_, timings[rep, 0] = timed(args, benchmark, sync=ref_sync(sync_every_iter=True))
_, timings[rep, 1] = timed(args, lazy_benchmark, sync=LazySync(skip_final_sync=True))
torch._lazy.wait_device_ops()
if current_device == 'cuda':
torch.cuda.synchronize()
lazy_metrics = dump_lazy_metrics(reset=True)
bench_time = time.perf_counter() - bench0
pvalue = ttest_ind(timings[:, 0], timings[:, 1]).pvalue
median = np.median(timings, axis=0)
fallbacks = ";".join([f"{m}:{lazy_metrics[m]}" for m in lazy_metrics if "aten::" in m])
ops = int(sum([lazy_metrics[m] for m in lazy_metrics if 'lazy::' in m or 'aten::' in m]) / args.repeat)
trace_us = median[1] / 1e-6
us_per_op = trace_us / ops
overhead = median[1] / median[0]
results.append(overhead)
output_csv(
os.path.join(args.output_dir, f"lazy-overheads_{args.test}_{get_unique_suffix()}.csv"),
("dev", "name", "test", "overhead", "pvalue", "ops", "trace_us", "us_per_op", "fallbacks"),
).writerow([current_device, current_name, args.test, f"{overhead:.4f}", f"{pvalue:.4e}",
f"{ops}", f"{trace_us:.4f}", f"{us_per_op:.4f}", f"{fallbacks}"])
print(f"{short_name(current_name, limit=30):<30} {current_device:<4} {args.test:<5} "
f"{'trace overheads':<20} overhead: {overhead:.3f} pvalue: {pvalue:.2e} us_per_op {us_per_op:.3f}")
if args.verbose:
print(f"CIDEBUGOUTPUT,lazy_overhead_experiment,"
f"{current_name},{args.test},{current_device},{overhead:.4f},"
f"{pvalue:.4e},{args.warmup},{args.repeat},{warmup_time:.2f},{bench_time:.2f}")
return (overhead, pvalue)
def lazy_compute_experiment(args, experiment, results, benchmark, lazy_benchmark, sync_every_iter=False):
timings = np.zeros((args.repeat, 2), np.float64)
ref_sync = CudaSync(sync_every_iter=sync_every_iter) if current_device == 'cuda' else NoOpSync()
lazy_sync = LazySync(sync_every_iter=sync_every_iter)
# interleave the runs to handle frequency scaling and load changes
warmup0 = time.perf_counter()
for rep in range(args.warmup):
# warmup
timed(args, benchmark, sync=ref_sync)
timed(args, lazy_benchmark, sync=lazy_sync)
warmup_time = time.perf_counter() - warmup0
# fresh metrics for each timed run
dump_lazy_metrics(reset=True)
bench0 = time.perf_counter()
for rep in range(args.repeat):
# measure
_, timings[rep, 0] = timed(args, benchmark, times=args.inner_loop_repeat, sync=ref_sync)
_, timings[rep, 1] = timed(args, lazy_benchmark, times=args.inner_loop_repeat, sync=lazy_sync)
bench_time = time.perf_counter() - bench0
lazy_metrics = dump_lazy_metrics(reset=True)
if 'CachedCompile' not in lazy_metrics or lazy_metrics['CachedCompile'] != args.repeat * args.inner_loop_repeat:
print("WARNING: lazy cached compile count indicates fallbacks, or something else")
fallbacks = {k: v for (k, v) in lazy_metrics.items() if 'aten::' in k}
if len(fallbacks):
print(f"WARNING: lazy-eager fallbacks detected for [{fallbacks}]")
if args.dump_lazy_counters:
print(lazy_metrics)
pvalue = ttest_ind(timings[:, 0], timings[:, 1]).pvalue
median = np.median(timings, axis=0)
speedup = median[0] / median[1]
results.append(speedup)
output_csv(
os.path.join(args.output_dir, f"lazy-compute_{args.test}_{get_unique_suffix()}.csv"),
("name", "dev", "experiment", "test", "speedup", "pvalue"),
).writerow([current_name, current_device, experiment, args.test, f"{speedup:.4f}", f"{pvalue:.2e}"])
print(f"{short_name(current_name, limit=30):<30} {current_device:<4} "
f"{args.test:<5} {experiment:<20} speedup: {speedup:.3f} pvalue: {pvalue:.2e}")
if args.verbose:
print(f"CIDEBUGOUTPUT,lazy_compute_experiment,"
f"{current_name},{current_device},{experiment},{args.test},{speedup:.4f},"
f"{pvalue:.2e},{args.warmup},{args.repeat},{warmup_time:.2f},{bench_time:.2f}")
return (speedup, pvalue)
def check_eval_correctness(args, benchmark, lazy_benchmark, name):
try:
set_seeds()
model, example_inputs = benchmark.get_module()
model.eval()
correct_result = call_model_with(model, example_inputs)
set_seeds()
lazy_model, lazy_inputs = lazy_benchmark.get_module()
lazy_model.eval()
lazy_result = call_model_with(lazy_model, lazy_inputs)
if not check_results(correct_result, lazy_result, args.device, args.allclose_atol):
print(f"INCORRECT: {name}")
save_error(name, args.test, "Incorrect results.", args.output_dir)
return False
except Exception as e:
print(f"ERROR: {name}: {e}")
save_error(name, args.test, e, args.output_dir)
return False
return True
def just_run_once(args, lazy_benchmark):
set_seeds()
if args.test == 'eval':
model, example_inputs = lazy_benchmark.get_module()
results.append(call_model_with(model, example_inputs))
elif args.test == 'train':
lazy_benchmark.train()
torch._lazy.mark_step()
torch._lazy.wait_device_ops()
if current_device == 'cuda':
torch.cuda.synchronize()
def check_results_impl(correct_result, lazy_result, atol):
# recursive helper for dealing with nested data structures
if type(correct_result) is tuple:
for c, l in zip(correct_result, lazy_result):
return check_results_impl(c, l, atol)
if type(correct_result) is dict:
print(correct_result.keys())
for k in correct_result:
assert k in lazy_result
return check_results_impl(correct_result[k], lazy_result[k], atol)
assert type(correct_result) is torch.Tensor, f"Expect torch.Tensor but got {type(correct_result)}."
ans = torch.allclose(correct_result, lazy_result, atol=atol)
if not ans:
print(f"correct_result:\n{correct_result}, lazy_result:\n{lazy_result}")
return ans
def check_results(correct_result, lazy_result, device, atol):
# to_device has recursive logic and special handling for
# extracting relevant tensors from huggingface data structures
correct_result = to_device(correct_result, device)
lazy_result = to_device(lazy_result, device)
return check_results_impl(correct_result, lazy_result, atol)
def check_fuser(args):
if args.fuser == 'noopt':
return
if args.fuser is None:
args.fuser = 'fuser1' if args.device == 'cpu' else 'fuser2'
if args.device == 'cpu':
assert args.fuser in ['fuser0', 'fuser1']
if args.fuser == 'fuser1':
assert torch._C._llvm_enabled(), "Can't use fuser1 (nnc) for CPU without building torch with llvm."
if args.device == 'cuda':
assert args.fuser in ['fuser0', 'fuser1', 'fuser2']
def run_tracing_execute_noops(test, lazy_benchmark):
ltm.set_noop_execution_mode(True)
if test == 'eval':
model, example_inputs = lazy_benchmark.get_module()
# doesn't actualyl collect a profile, but runs just the lazy trace
# so you can use a profiler on top of the program.
# note: depends on making the backend do a 'no-op' for executecomputation
results = []
for i in range(300):
if test == 'eval':
results.append(call_model_with(model, example_inputs))
elif test == 'train':
lazy_benchmark.train()
# we still do a mark step, to preserve the ratio of how often we split the graph
# and run through the process of 'compile and execute' (even though these are now noops)
torch._lazy.mark_step()
ltm.set_noop_execution_mode(False)
def merge_with_prefix(prefix, tmp_dir, out_dir, headers):
results = []
rfnames = glob.glob(os.path.join(tmp_dir, prefix + "*"))
for rfname in rfnames:
results.extend(open(rfname).readlines()[1:]) # skip header
# the header shouldn't require quotations and the results should already be properly
# quoted via output_csv
with open(os.path.join(out_dir, prefix + "acc.csv"), "a+") as acc_csv:
acc_csv.write(",".join(headers) + "\n")
for l in results:
acc_csv.write(l)
def merge_reformat(tmp_dir, out_dir, table):
out_dir = args.output_dir
# depending on the type of an experiment, fields can be in a different order
# `get_field` deals with all three types including `error`
def get_field(row, name, file_type):
headers = {
"error": ("name", "test", "error"),
"lazy-compute" : ("name", "dev", "experiment", "test", "speedup", "pvalue"),
"lazy-overheads" : ("dev", "name", "test", "overhead", "pvalue", "ops", "trace_us", "us_per_op", "fallbacks")
}
header = headers[file_type]
r = row[header.index(name)] if name in header else "N/A"
return r
csv_files = glob.glob(os.path.join(tmp_dir, "*.csv"))
for csvf in csv_files:
with open(csvf, "r") as csvfile:
prefix = os.path.basename(csvf).split("_")[0]
csvreader = csv.reader(csvfile, delimiter=",", quotechar='"')
# This skips the first row of the CSV file.
next(csvreader)
for r in csvreader:
key = (get_field(r, "name", prefix), get_field(r, "test", prefix))
entry = table[key]
if prefix == "error":
entry["error"] = f'{entry.get("error", "")} {get_field(r, "error", prefix)}'
elif prefix == "lazy-overheads":
entry["overhead"] = get_field(r, "overhead", prefix)
entry["ops"] = get_field(r, "ops", prefix)
entry["trace_us"] = get_field(r, "trace_us", prefix)
entry["us_per_op"] = get_field(r, "us_per_op", prefix)
entry["fallbacks"] = get_field(r, "fallbacks", prefix)
else:
entry[get_field(r, "experiment", prefix)] = get_field(r, "speedup", prefix)
amortized_header = f"amortized {args.inner_loop_repeat}x"
headers = ("name", "test", amortized_header, "unamortized", "overhead", "error", "rc",
"ops", "trace_us", "us_per_op", "fallbacks")
cw = output_csv(
os.path.join(out_dir, f"{args.test}_reformat.csv"),
headers
)
for k, v in table.items():
cw.writerow((k[0], k[1], v.get(amortized_header, 'N/A'),
v.get('unamortized', 'N/A'), v.get('overhead', 'N/A'), v.get('error', 'N/A'), v.get('rc'),
v.get('ops', 'N/A'), v.get('trace_us', 'N/A'), v.get('us_per_op', 'N/A'), v.get('fallbacks', 'N/A')))
def save_error(name, test, error, dir):
output_csv(
os.path.join(dir, f"error_{get_unique_suffix()}.csv"),
("name", "test", "error"),
).writerow([name, test, error])
if __name__ == "__main__" :
parser = argparse.ArgumentParser()
parser.add_argument("--filter", "-k", action="append", default=[], help="filter benchmarks")
parser.add_argument("--exclude", "-x", action="append", default=[], help="filter benchmarks")
parser.add_argument("--device", "-d", default='cuda', help="cpu or cuda")
parser.add_argument("--warmup", type=int, default=4, help="number of warmup runs")
parser.add_argument("--timeout", type=int, default=60 * 10, help="time allocated to each model")
parser.add_argument("--repeat", "-n", type=int, default=4, help="number of timing runs (samples)")
parser.add_argument("--inner_loop_repeat", type=int, default=10, help="repeat the computation this many times per sample")
parser.add_argument("--fuser", type=str, choices=['noopt', 'fuser0', 'fuser1', 'fuser2'], help="0=legacy, 1=nnc, 2=nvfuser")
parser.add_argument("--test", type=str, choices=['eval', 'train'], default='eval')
parser.add_argument("--verbose", action='store_true')
parser.add_argument("--torchbench_dir", type=str, help="path to torchbenchmark repo")
parser.add_argument("--output_dir", type=str, default=".", help="path to write output files")
parser.add_argument("--dump_lazy_counters", action='store_true', help="dump lazy counter values after each timing run")
parser.add_argument("--just_run_once", action="store_true")
parser.add_argument("--run_tracing_execute_noops", action='store_true',
help="Run the tracing portion only, with noop backend, useful for running under a profiler.")
parser.add_argument("--run_in_subprocess", "-s", type=str,
help="which model run in subprocess. This will ignore filter and exclude")
parser.add_argument("--allclose_atol", type=float, default=1e-4,
help="Absolute tolerance to check lazy result again the correct result")
parser.add_argument("--precision", choices=["fp32", "fp16", "amp"], default="fp32", help="enable fp16 modes from: fp32, fp16/half, or amp")
args = parser.parse_args()
results = []
check_fuser(args)
# torchbench_dir = abspath(args.torchbench_dir) if args.torchbench_dir else abspath("../../benchmark")
# assert os.path.exists(os.path.join(torchbench_dir, "torchbenchmark")), "set --torchbench_dir to installed torchbench repo"
# sys.path.append(torchbench_dir)
copy_argv = [] + sys.argv
if args.run_in_subprocess:
try:
from fastNLP.core import logger
logger.setLevel(logging.WARNING)
current_name = args.run_in_subprocess
benchmark_cls = get_benchmark_cls(args.run_in_subprocess)
current_device = args.device
if args.device == 'cuda':
assert 'LTC_TS_CUDA' in os.environ and bool(os.environ['LTC_TS_CUDA']), "set LTC_TS_CUDA for cuda device"
with pick_grad(args, current_name):
with fuser(args.fuser) if args.fuser != 'noopt' else optimized_execution(False):
if args.fuser == 'noopt':
# TODO(whc) cleaner way to configure the fusers; seems i have to set both optimized_execution(False)
# _and_ disable fusers to get no-optimization
torch._C._jit_override_can_fuse_on_cpu(False)
torch._C._jit_override_can_fuse_on_gpu(False)
torch._C._jit_set_texpr_fuser_enabled(False)
torch._C._jit_set_nvfuser_enabled(False)
if args.fuser == 'fuser2':
# special case to disable nvfuser horizontal fusion as it is currently broken
# TODO(whc) remove this once it's fixed
torch._C._jit_set_nvfuser_horizontal_mode(False)
# no try since we should've already filtered out models we can't create
set_seeds()
benchmark = benchmark_cls(test=args.test, device=args.device, jit=False, extra_args=["--precision", args.precision])
set_seeds()
lazy_benchmark = benchmark_cls(test=args.test, device='lazy', jit=False, extra_args=["--precision", args.precision])
# TODO: might be redundant
gc.collect()
if args.run_tracing_execute_noops:
print(f"Profiling {current_name}")
run_tracing_execute_noops(args.test, lazy_benchmark)
# when profiling, we really don't want to do anything else
exit(0)
if args.just_run_once:
just_run_once(args, lazy_benchmark)
exit(0)
if args.test == 'eval':
if not check_eval_correctness(args, benchmark, lazy_benchmark, current_name):
exit(3)
lazy_overhead_experiment(args, results, benchmark, lazy_benchmark)
lazy_compute_experiment(args, f"amortized {args.inner_loop_repeat}x", results, benchmark, lazy_benchmark)
lazy_compute_experiment(args, "unamortized", results, benchmark, lazy_benchmark, sync_every_iter=True)
except Exception as e:
print(f"ERROR: {current_name}: {e}")
save_error(current_name, args.test, e, args.output_dir)
exit(13)
exit(0)
import psutil
import subprocess
import tempfile
dirpath = tempfile.mkdtemp()
table = collections.defaultdict(dict)
for model_name in iter_models(args, dirpath):
# if `--run_in_subprocess` is specified, it will override any filters and excludes
# pass the rest of arguments intact such as device, test, repeat, etc
# note, the latest output_dir will override the original one and this is exactly what we want
# for child processes
launch_command = f"python {' '.join(copy_argv)} --run_in_subprocess '{model_name}' --output_dir={dirpath}"
env = os.environ
env["LTC_TS_CUDA"] = "1" if args.device == "cuda" else "0"
rc = 0
try:
if args.verbose:
cp = subprocess.run("nvidia-smi --query-gpu=timestamp,utilization.memory,memory.total,memory.free,memory.used"
" --format=csv,noheader",
capture_output=True, text=True, shell=True)
print(f"CIDEBUGOUTPUT,BEFORE subprocess.run,{model_name},{cp.stdout}")
proc = subprocess.Popen(launch_command,
env=env,
shell=True,
stderr=subprocess.STDOUT)
outs, errs = proc.communicate(timeout=args.timeout)
rc = proc.poll()
except subprocess.TimeoutExpired:
print(f"{model_name} timed out after {args.timeout // 60} minutes! Include it in SKIP or SKIP_TRAIN_ONLY")
save_error(model_name, args.test, "Timed out.", dirpath)
# to visualize highlight timeouts, they will also have
# "timed out" in the error column
rc = 17
process = psutil.Process(proc.pid)
for p in process.children(recursive=True):
p.kill()
process.kill()
if args.verbose:
cp = subprocess.run("nvidia-smi --query-gpu=timestamp,utilization.memory,memory.total,memory.free,memory.used"
" --format=csv,noheader",
capture_output=True, text=True, shell=True)
print(f"CIDEBUGOUTPUT,AFTER subprocess.run,{model_name},{args.test},{cp.stdout}")
entry = table[(model_name, args.test)]
entry["rc"] = rc
merge_with_prefix("lazy-overheads_", dirpath, args.output_dir, ("dev", "name", "test", "overhead", "pvalue"))
merge_with_prefix("lazy-compute_", dirpath, args.output_dir, ("name", "dev", "experiment", "test", "speedup", "pvalue"))
merge_with_prefix("error_", dirpath, args.output_dir, ("name", "test", "error"))
merge_reformat(dirpath, args, table)
|
"""
Test user-customized invoke function.
"""
import argparse
from typing import List
from ..utils import REPO_PATH, add_path, get_output_json, dump_output
with add_path(REPO_PATH):
from torchbenchmark.util.experiment.instantiator import list_models, load_model_isolated, TorchBenchModelConfig, \
list_devices, list_tests, inject_model_invoke
from torchbenchmark.util.experiment.metrics import TorchBenchModelMetrics, get_model_test_metrics
from typing import Optional
def user_defined_invoke(self):
print(f"Model {self.name} invoke has been replaced!")
self.output_metrics_list = [1.0, 2.0, 3.0, 4.0]
self.output_metrics_dict ={
"m1": 1.0,
"m2": 2.0,
"m3": 3.0,
}
def parse_args(args):
parser = argparse.ArgumentParser()
parser.add_argument("--device", "-d", default="cuda", help="Devices to run, splited by comma.")
parser.add_argument("--test", "-t", default="eval", help="Tests to run, splited by comma.")
parser.add_argument("--bs", type=int, default=1, help="Test batch size")
parser.add_argument("--model", "-m", default=None, type=str, help="Only run the specifice models, splited by comma.")
parser.add_argument("--inject", action="store_true", help="Inject user defined invoke function to the model.")
return parser.parse_args(args)
def get_metrics(_config: TorchBenchModelConfig) -> List[str]:
return ["latencies"]
def run_config(config: TorchBenchModelConfig, dryrun: bool=False) -> Optional[TorchBenchModelMetrics]:
"""This function only handles NotImplementedError, all other errors will fail."""
metrics = get_metrics(config)
print(f"Running {config} ...", end='')
if dryrun:
return None
# We do not allow RuntimeError in this test
result ={}
try:
# load the model instance within the same process
model = load_model_isolated(config)
inject_model_invoke(model, user_defined_invoke)
# get the model test metrics
model.invoke()
result["list_result"] = model.get_model_attribute("output_metrics_list")
result["dict_output"] = model.get_model_attribute("output_metrics_dict")
except NotImplementedError as e:
print(" [NotImplemented]")
return None
print(" [Done]")
return result
def run(args: List[str]):
args = parse_args(args)
config = TorchBenchModelConfig(
name=args.model,
device=args.device,
test=args.test,
batch_size=args.bs,
jit=False,
extra_args=[],
extra_env=None,
)
result = run_config(config)
print(result)
|
import torch
import torch.nn as nn
from functorch import vmap, jacfwd, jacrev
from .util import BenchmarkCase
# batched hessians of fully connected layers is a popular quantity
# in physics-related models.
# This test case is from https://github.com/pytorch/functorch/issues/989
# We haven't been able to get the full model yet, so, this test case
# is going into the functorch userbenchmark instead of torchbenchmark.
class VmapHessianFC(BenchmarkCase):
def __init__(self):
device = 'cuda'
D1 = 2 # x, y
D2 = 3 # u, v, p
B = 10000
x = torch.randn(B, D1).to(device)
model = nn.Sequential(
nn.Linear(D1, 512),
nn.ReLU(),
nn.Linear(512, 512),
nn.ReLU(),
nn.Linear(512, 512),
nn.ReLU(),
nn.Linear(512, 512),
nn.ReLU(),
nn.Linear(512, 512),
nn.ReLU(),
nn.Linear(512, 512),
nn.ReLU(),
nn.Linear(512, D2),
).to(device)
self.model = model
self.x = x
def name(self):
return 'vmap_hessian_fc_cuda'
def run(self):
def predict(x):
out = self.model(x)
return out, out
hessian, pred = vmap(
jacfwd(jacrev(predict, argnums=0, has_aux=True), argnums=0, has_aux=True),
in_dims=0,
)(
self.x
)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.