python_code
stringlengths 0
679k
| repo_name
stringlengths 9
41
| file_path
stringlengths 6
149
|
---|---|---|
# Copyright 2020-2022 NVIDIA Corporation. All rights reserved.
#
# Licensed under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
class CachedInstanceMeta(type):
__instances = {}
def __call__(self, *args, **kwargs):
arg_tuple = args + tuple(kwargs.values())
if arg_tuple in self.__instances:
return self.__instances[arg_tuple]
else:
obj = super().__call__(*args, **kwargs)
self.__instances[arg_tuple] = obj
return obj
| NVTX-release-v3 | python/nvtx/utils/cached.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| DCGM-master | sdk_samples/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| DCGM-master | sdk_samples/scripts/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import argparse
try:
from dcgm_structs import dcgmExceptionClass
import pydcgm
import dcgm_structs
import dcgm_fields
import dcgm_agent
import dcgmvalue
except:
pass
print("Unable to find python bindings, please refer to the exmaple below: ")
print("PYTHONPATH=/usr/local/dcgm/bindings python dcgm_example.py")
sys.exit(1)
## Look at __name__ == "__main__" for entry point to the script
## Helper method to convert DCGM value to string
def convert_value_to_string(value):
v = dcgmvalue.DcgmValue(value)
try:
if (v.IsBlank()):
return "N/A"
else:
return v.__str__()
except:
## Exception is generally thorwn when int32 is
## passed as an input. Use additional methods to fix it
sys.exc_clear()
v = dcgmvalue.DcgmValue(0)
v.SetFromInt32(value)
if (v.IsBlank()):
return "N/A"
else:
return v.__str__()
## Helper method to investigate the status handler
def helper_investigate_status(statusHandle):
"""
Helper method to investigate status handle
"""
errorCount = 0
errorInfo = dcgm_agent.dcgmStatusPopError(statusHandle)
while (errorInfo != None):
errorCount += 1
print("Error%d" % errorCount)
print((" GPU Id: %d" % errorInfo.gpuId))
print((" Field ID: %d" % errorInfo.fieldId))
print((" Error: %d" % errorInfo.status))
errorInfo = dcgm_agent.dcgmStatusPopError(statusHandle)
## Helper method to convert enum to system name
def helper_convert_system_enum_to_sytem_name(system):
if system & (1 << dcgm_structs.DCGM_HEALTH_WATCH_PCIE):
return "PCIe"
if system & (1 << dcgm_structs.DCGM_HEALTH_WATCH_NVLINK):
return "NvLink"
if system & (1 << dcgm_structs.DCGM_HEALTH_WATCH_PMU):
return "PMU"
if system & (1 << dcgm_structs.DCGM_HEALTH_WATCH_MCU):
return "MCU"
if system & (1 << dcgm_structs.DCGM_HEALTH_WATCH_MEM):
return "MEM"
if system & (1 << dcgm_structs.DCGM_HEALTH_WATCH_SM):
return "SM"
if system & (1 << dcgm_structs.DCGM_HEALTH_WATCH_INFOROM):
return "Inforom"
if system & (1 << dcgm_structs.DCGM_HEALTH_WATCH_THERMAL):
return "Thermal"
if system & (1 << dcgm_structs.DCGM_HEALTH_WATCH_POWER):
return "Power"
if system & (1 << dcgm_structs.DCGM_HEALTH_WATCH_DRIVER):
return "Driver"
## helper method to convert helath return to a string for display purpose
def convert_overall_health_to_string(health):
if health == dcgm_structs.DCGM_HEALTH_RESULT_PASS:
return "Pass"
elif health == dcgm_structs.DCGM_HEALTH_RESULT_WARN:
return "Warn"
elif health == dcgm_structs.DCGM_HEALTH_RESULT_FAIL:
return "Fail"
else :
return "N/A"
def nvvs_installed():
return os.path.isfile('/usr/share/nvidia-validation-suite/nvvs')
def dcgm_diag_test_didnt_pass(rc):
if rc == dcgm_structs.DCGM_HEALTH_RESULT_FAIL or rc == dcgm_structs.DCGM_HEALTH_RESULT_WARN:
return True
else:
return False
def dcgm_diag_test_index_to_name(index):
if index == dcgm_structs.DCGM_SWTEST_DENYLIST:
return "denylist"
elif index == dcgm_structs.DCGM_SWTEST_NVML_LIBRARY:
return "nvmlLibrary"
elif index == dcgm_structs.DCGM_SWTEST_CUDA_MAIN_LIBRARY:
return "cudaMainLibrary"
elif index == dcgm_structs.DCGM_SWTEST_CUDA_RUNTIME_LIBRARY:
return "cudaRuntimeLibrary"
elif index == dcgm_structs.DCGM_SWTEST_PERMISSIONS:
return "permissions"
elif index == dcgm_structs.DCGM_SWTEST_PERSISTENCE_MODE:
return "persistenceMode"
elif index == dcgm_structs.DCGM_SWTEST_ENVIRONMENT:
return "environment"
elif index == dcgm_structs.DCGM_SWTEST_PAGE_RETIREMENT:
return "pageRetirement"
elif index == dcgm_structs.DCGM_SWTEST_GRAPHICS_PROCESSES:
return "graphicsProcesses"
elif index == dcgm_structs.DCGM_SWTEST_INFOROM:
return "inforom"
else:
raise dcgm_structs.DCGMError(dcgm_structs.DCGM_ST_BADPARAM)
# Returns true if the error here should be ignored
def should_ignore_error(diagException):
if diagException.info:
if diagException.info.find("MIG configuration is incompatible with the diagnostic because it prevents access to the entire GPU."
) != -1:
return True
if diagException.info.find("Cannot run diagnostic: CUDA does not support enumerating GPUs with MIG mode enabled") == 0:
return True
return False
def main(manualOpMode=False, embeddedHostengine=True):
if manualOpMode:
## Initialize the DCGM Engine as manual operation mode. This implies that it's execution is
## controlled by the monitoring agent. The user has to periodically call APIs such as
## dcgmEnginePolicyTrigger and dcgmEngineUpdateAllFields which tells DCGM to wake up and
## perform data collection and operations needed for policy management.
## Manual operation mode is only possible on an "embedded" hostengine.
opMode = dcgm_structs.DCGM_OPERATION_MODE_MANUAL
else:
## Initialize the DCGM Engine as automatic operation mode. This is required when connecting
## to a "standalone" hostengine (one that is running separately) but can also be done on an
## embedded hostengine. In this mode, fields are updated
## periodically based on their configured frequency. When watching new fields you must still manually
## trigger an update if you wish to view these new fields' values right away.
opMode = dcgm_structs.DCGM_OPERATION_MODE_AUTO
if embeddedHostengine:
print(("Running an embedded hostengine with %s opmode..." %
('manual' if manualOpMode else 'auto')))
## create embedded hostengine by leaving ipAddress as None
dcgmHandle = pydcgm.DcgmHandle(opMode=opMode)
else:
print(("Connecting to a standalone hostengine with %s opmode..." %
('manual' if manualOpMode else 'auto')))
dcgmHandle = pydcgm.DcgmHandle(ipAddress='127.0.0.1', opMode=opMode)
print("")
## Get a handle to the system level object for DCGM
dcgmSystem = dcgmHandle.GetSystem()
supportedGPUs = dcgmSystem.discovery.GetAllSupportedGpuIds()
## Create an empty group. Let's call the group as "one_gpus_group".
## We will add the first supported GPU in the system to this group.
dcgmGroup = pydcgm.DcgmGroup(dcgmHandle, groupName="one_gpu_group", groupType=dcgm_structs.DCGM_GROUP_EMPTY)
#Skip the test if no supported gpus are available
if len(supportedGPUs) < 1:
print("Unable to find supported GPUs on this system")
sys.exit(0)
dcgmGroup.AddGpu(supportedGPUs[0])
## Invoke method to get gpu IDs of the members of the newly-created group
groupGpuIds = dcgmGroup.GetGpuIds()
## Trigger field updates since we just started DCGM (always necessary in MANUAL mode to get recent values)
dcgmSystem.UpdateAllFields(waitForUpdate=True)
## Get the current configuration for the group
config_values = dcgmGroup.config.Get(dcgm_structs.DCGM_CONFIG_CURRENT_STATE)
## Display current configuration for the group
for x in range(0, len(groupGpuIds)):
print("GPU Id : %d" % (config_values[x].gpuId))
print("Ecc Mode : %s" % (convert_value_to_string(config_values[x].mEccMode)))
print("Sync Boost : %s" % (convert_value_to_string(config_values[x].mPerfState.syncBoost)))
print("Mem Clock : %s" % (convert_value_to_string(config_values[x].mPerfState.targetClocks.memClock)))
print("SM Clock : %s" % (convert_value_to_string(config_values[x].mPerfState.targetClocks.smClock)))
print("Power Limit : %s" % (convert_value_to_string(config_values[x].mPowerLimit.val)))
print("Compute Mode: %s" % (convert_value_to_string(config_values[x].mComputeMode)))
print("\n")
## Add the health watches
dcgmGroup.health.Set(dcgm_structs.DCGM_HEALTH_WATCH_ALL)
## Ensure that the newly watched health fields are updated since we wish to access them right away.
## Needed in manual mode and only needed in auto mode if we want to see the values right away
dcgmSystem.UpdateAllFields(waitForUpdate=True)
## Invoke Health checks
try:
group_health = dcgmGroup.health.Check()
print("Overall Health for the group: %s" % convert_overall_health_to_string(group_health.overallHealth))
for index in range (0, group_health.incidentCount):
print("GPU ID : %d" % group_health.incidents[index].entityInfo.entityId)
print("system tested : %d" % group_health.incidents[index].system)
print("system health : %s" % convert_overall_health_to_string(group_health.incidents[index].health))
print("system health err : %s" % group_health.incidents[index].error.msg)
print("\n")
except dcgm_structs.DCGMError as e:
errorCode = e.value
print("dcgmHealthCheck returned error %d: %s" % (errorCode, e))
sys.exc_clear()
print("")
if nvvs_installed():
## This will go ahead and perform a "prologue" diagnostic
## to make sure everything is ready to run
## currently this calls an outside diagnostic binary but eventually
## that binary will be merged into the DCGM framework
## The "response" is a dcgmDiagResponse structure that can be parsed for errors.
try:
response = dcgmGroup.action.RunDiagnostic(dcgm_structs.DCGM_DIAG_LVL_SHORT)
except dcgmExceptionClass(dcgm_structs.DCGM_ST_NOT_CONFIGURED):
print("One of the GPUs on your system is not supported by NVVS")
except dcgmExceptionClass(dcgm_structs.DCGM_ST_GROUP_INCOMPATIBLE):
print("GPUs in the group are not compatible with each other for running diagnostics")
except dcgmExceptionClass(dcgm_structs.DCGM_ST_NVVS_ERROR) as e:
if not should_ignore_error(e):
raise(e)
else:
print(str(e))
else:
isHealthy = True
for i in range(0, response.levelOneTestCount):
if dcgm_diag_test_didnt_pass(response.levelOneResults[i].result):
print("group failed validation check for %s" % dcgm_diag_test_index_to_name(i))
isHealthy = False
if not isHealthy:
print("System is not healthy")
else:
print("not running short group validation because NVIDIA Validation Suite is not installed")
print("")
## Add process watches so that DCGM can start watching process info
dcgmGroup.stats.WatchPidFields(1000000, 3600, 0)
####################################################################
# Start a CUDA process at this point and get the PID for the process
## Wait until it completes
## dcgmGroup.health.Check() is a low overhead check and can be performed
## in parallel to the job without impacting application's performance
####################################################################
# Initialized to 0 for now. Change it to PID of the CUDA process if there is a process to run
pid = 0
try:
pidInfo = dcgmGroup.stats.GetPidInfo(pid)
## Display some process statistics (more may be desired)
print("Process ID : %d" % pid)
print("Start time : %d" % pidInfo.summary.startTime)
print("End time : %d" % pidInfo.summary.endTime)
print("Energy consumed : %d" % pidInfo.summary.energyConsumed)
print("Max GPU Memory : %d" % pidInfo.summary.maxGpuMemoryUsed)
print("Avg. SM util : %d" % pidInfo.summary.smUtilization.average)
print("Avg. mem util : %d" % pidInfo.summary.memoryUtilization.average)
except:
print("There was no CUDA job running to collect the stats")
pass
# Nvidia Validation Suite is required when performing "validate" actions
if nvvs_installed():
## Now that the process has completed we perform an "epilogue" diagnostic that will stress the system
try:
response = dcgmGroup.action.RunDiagnostic(dcgm_structs.DCGM_DIAG_LVL_MED)
except dcgmExceptionClass(dcgm_structs.DCGM_ST_NOT_CONFIGURED):
print("One of the GPUs on your system is not supported by NVVS")
except dcgmExceptionClass(dcgm_structs.DCGM_ST_NVVS_ERROR) as e:
if not should_ignore_error(e):
raise(e)
else:
print(str(e))
else:
## Check the response and do any actions desired based on the results.
pass
else:
print("not running medium group validation because NVIDIA Validation Suite is not installed")
print("")
## Delete the group
dcgmGroup.Delete()
del(dcgmGroup)
dcgmGroup = None
## disconnect from the hostengine by deleting the DcgmHandle object
del(dcgmHandle)
dcgmHandle = None
## Entry point for this script
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Script for showing off how to use DCGM python bindings')
parser.add_argument('-o', '--opmode',
choices=['manual', 'auto'],
default='manual',
help='Operation mode for the hostengine. Must be auto if a standalone hostengine ' +
'is used. Defaults to auto.')
parser.add_argument('-t', '--type',
choices=['embedded', 'standalone'],
default='embedded',
help='Type of hostengine. Embedded mode starts a hostengine within the ' +
'same process. Standalone means that a separate hostengine process ' +
'is already running that will be connected to. '
)
args = parser.parse_args()
manualOpMode = args.opmode == 'manual'
embeddedHostengine = args.type == 'embedded'
main(manualOpMode, embeddedHostengine)
| DCGM-master | sdk_samples/scripts/dcgm_example.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from DcgmReader import *
import dcgm_fields
class FieldHandlerReader(DcgmReader):
'''
Override just this method to do something different per field.
This method is called once for each field for each GPU each
time that its Process() method is invoked, and it will be skipped
for blank values and fields in the ignore list.
'''
def CustomFieldHandler(self, gpuId, fieldId, fieldTag, val):
print('GPU %d %s(%d) = %s' % (gpuId, fieldTag, fieldId, val.value))
class DataHandlerReader(DcgmReader):
'''
Override just this method to handle the entire map of data in your own way. This
might be used if you want to iterate by field id and then GPU or something like that.
This method is called once for each time the Process() method is invoked.
'''
def CustomDataHandler(self, fvs):
for fieldId in self.m_publishFieldIds:
if fieldId in self.m_dcgmIgnoreFields:
continue
out = 'Values for %s:' % (self.m_fieldIdToInfo[fieldId].tag)
wasBlank = True
for gpuId in list(fvs.keys()):
gpuFv = fvs[gpuId]
val = gpuFv[fieldId][-1]
#Skip blank values. Otherwise, we'd have to insert a placeholder blank value based on the fieldId
if val.isBlank:
continue
wasBlank = False
append = " GPU%d=%s" % (gpuId, val.value)
out = out + append
if wasBlank == False:
print(out)
'''
field_ids : List of the field ids to publish. If it isn't specified, our default list is used.
update_frequency : Frequency of update in microseconds. Defauls to 10 seconds or 10000000 microseconds
keep_time : Max time to keep data from NVML, in seconds. Default is 3600.0 (1 hour)
ignores : List of the field ids we want to query but not publish.
'''
def DcgmReaderDictionary(field_ids=defaultFieldIds, update_frequency=10000000, keep_time=3600.0, ignores=[], field_groups='dcgm_fieldgroupdata'):
# Instantiate a DcgmReader object
dr = DcgmReader(fieldIds=field_ids, updateFrequency=update_frequency, maxKeepAge=keep_time, ignoreList=ignores, fieldGroupName=field_groups)
# Get the default list of fields as a dictionary of dictionaries:
# gpuId -> field name -> field value
data = dr.GetLatestGpuValuesAsFieldNameDict()
# Print the dictionary
for gpuId in data:
for fieldName in data[gpuId]:
print("For gpu %s field %s=%s" % (str(gpuId), fieldName, data[gpuId][fieldName]))
def main():
print('Some examples of different DcgmReader usages')
print('\n\nThe default interaction')
dr = DcgmReader()
dr.Process()
print('\n\nUsing custom fields through the dictionary interface...')
customFields = [dcgm_fields.DCGM_FI_DEV_MEM_COPY_UTIL, dcgm_fields.DCGM_FI_DEV_GPU_UTIL, dcgm_fields.DCGM_FI_DEV_POWER_USAGE]
DcgmReaderDictionary(field_ids=customFields)
print('\n\nProcessing in field order by overriding the CustomerDataHandler() method')
cdr = DataHandlerReader()
cdr.Process()
print('\n\nPrinting a little differently by overriding the CustomFieldHandler() method')
fhr = FieldHandlerReader()
fhr.Process()
if __name__ == '__main__':
main()
| DCGM-master | sdk_samples/scripts/DcgmReaderExample.py |
#!/usr/bin/env python3
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import csv
import re
import sys
import xml.etree.cElementTree as ET
try:
import yaml
except ImportError:
print('Please install pyyaml')
raise
NAME_ID = ['name', 'id']
CSV = 'csv'
DITA = 'dita'
def main():
parser = argparse.ArgumentParser(description='Generate Diag support table')
parser.add_argument('--in', help='Diag config YAML',
dest='in_', required=True)
parser.add_argument('--out', help='Destination CSV', required=True)
args = parser.parse_args()
config_name = args.in_
destination = args.out
extension_match = re.search(r'\.(\w+)$', destination)
if extension_match is None:
print('No extension found')
sys.exit(1)
out_type = str.lower(extension_match.group(1))
with open(config_name) as config_yaml, open(destination, 'w') as out_file:
config = yaml.safe_load(config_yaml)
rows = []
all_tests = set()
parse_skus(rows, all_tests, config['skus'])
all_tests_list = sorted(all_tests)
if out_type == DITA:
print('Outputting dita xml')
print_dita(out_file, rows, all_tests_list)
elif out_type == CSV:
print('Outputting csv')
print_csv(out_file, rows, all_tests_list)
else:
print('Unrecognized extension')
sys.exit(1)
def parse_skus(rows, all_tests, skus):
for sku in skus:
if 'name' not in sku:
continue
row = {'tests': set()}
for prop in sku:
if prop == 'id':
raw_id = str(sku[prop])
# If len(id) > 4, split with a colon for readability
_id = raw_id if len(
raw_id) <= 4 else raw_id[:4] + ':' + raw_id[4:]
row[prop] = _id
elif prop == 'name':
row[prop] = str(sku[prop])
else:
row['tests'].add(prop)
all_tests.add(prop)
rows.append(row)
def print_csv(out_file, skus, all_tests_list):
out_csv = csv.writer(out_file)
out_csv.writerow(NAME_ID + all_tests_list)
for sku in skus:
tests = sku['tests']
sku_list = [sku['name'], sku['id']] + \
['x' if test in tests else '' for test in all_tests_list]
out_csv.writerow(sku_list)
def print_dita(out_file, skus, all_tests_list):
row = None
n_cols = len(NAME_ID + all_tests_list)
table = ET.Element('table')
tgroup = ET.SubElement(table, 'tgroup', cols=str(n_cols))
# Metadata
for col_name in (NAME_ID + all_tests_list):
ET.SubElement(tgroup, 'colspec', colname=col_name)
# Header
thead = ET.SubElement(tgroup, 'thead')
row = ET.SubElement(thead, 'row')
for col_name in (NAME_ID + all_tests_list):
ET.SubElement(row, 'entry').text = col_name
# Body
tbody = ET.SubElement(tgroup, 'tbody')
for sku in skus:
row = ET.SubElement(tbody, 'row')
ET.SubElement(row, 'entry').text = sku['name']
ET.SubElement(row, 'entry').text = sku['id']
for test in all_tests_list:
ET.SubElement(
row, 'entry').text = 'x' if test in sku['tests'] else ''
table_tree = ET.ElementTree(table)
# Pretty-print
ET.indent(table_tree)
table_tree.write(out_file, encoding='unicode')
if __name__ == '__main__':
main()
| DCGM-master | nvvs/generate_support_table.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import sys
def VerifyAllowlistParameterRanges(allowlistFile):
pattern = r"(?P<value>[\d\.]+), (?P<min>[\d\.]+), (?P<max>[\d\.]+)"
f = open(allowlistFile)
lines = f.readlines()
errorCount = 0
print("Verifying parameter ranges in allowlist file...")
for i, line in enumerate(lines):
match = re.search(pattern, line)
if match:
val = float(match.group('value'))
min_val = float(match.group('min'))
max_val = float(match.group('max'))
if val < min_val or val > max_val:
errorCount += 1
print("Line %s: invalid range or value: %s" % (i+1, line.rstrip()))
if errorCount:
print("Errors found. Please fix errors before committing.")
sys.exit(1)
print("Success!")
if __name__ == '__main__':
if len(sys.argv) != 2:
print("Script called with args: %s" % (sys.argv[1:]))
print("Invalid arguments. Script should be called with path to allowlist file only.")
sys.exit(1)
VerifyAllowlistParameterRanges(sys.argv[1])
| DCGM-master | nvvs/verify_allowlist_parameter_range.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Find all of the function symbols in the passed-in ptx file and append them as variables to
# the passed in output file
import sys
if len(sys.argv) < 3:
print "USAGE: find_ptx_symbols.py <input.ptx> <output.h>\nThere must be two arguments supplied to this script"
sys.exit(1)
ptxFilename = sys.argv[1]
outFilename = sys.argv[2]
ptxFp = open(ptxFilename, "rt")
outFp = open(outFilename, "at")
outFp.write("\n\n")
for line in ptxFp.readlines():
if line.find(".entry") < 0:
continue
lineParts = line.split()
funcName = lineParts[2][0:-1]
outFp.write("const char *%s_func_name = \"%s\";\n" % (funcName, funcName))
| DCGM-master | nvvs/plugin_src/diagnostic/find_ptx_symbols.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import fileinput
allowlist = 'allowlist.txt'
tests = ['quick', 'long']
template = 'template.txt'
f = open(allowlist, 'r')
tuples = list()
for line in f:
line = line.replace("\n", "")
tuples.append(line)
f.close()
for test in tests:
for tuple in tuples:
splitTuple = tuple.split(", ")
outFileName = splitTuple[0] + "_" + test + ".conf"
outFileName = outFileName.replace(" ", "_")
try:
outFile = open(outFileName, 'w')
except IOError as e:
print "Unable to open %s for writing. Skipping." % outFileName
continue
for line in fileinput.input(template):
if '%DEVICE%' in line:
outFile.write (line.replace('%DEVICE%', splitTuple[0]))
elif '%SETNAME%' in line:
outFile.write (line.replace('%SETNAME%', "All " + splitTuple[0]))
elif '%ID%' in line:
outFile.write (line.replace('%ID%', splitTuple[1]))
elif '%TEST%' in line:
outFile.write (line.replace('%TEST%', test.capitalize()))
else:
outFile.write (line)
outFile.close()
| DCGM-master | nvvs/configfile_examples/fill_configs.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import pygal
from pygal.style import Style # pylint: disable=import-error
import sys
import getopt
import string
from pprint import pprint
inputfile = ''
outputfile = ''
keys = ''
def printUsage():
print str(sys.argv[0]) + ' [-i <inputfile>] -o <outputfile> -k <keys (comma separated)>'
def parseArgs(argv):
global inputfile
global outputfile
global keys
try:
opts, args = getopt.getopt(argv,"hi:o:k:",["ifile=","ofile=","keys="])
except getopt.GetoptError:
printUsage()
sys.exit(2)
outputArg = False
keysArg = False
for opt, arg in opts:
if opt == '-h':
printUsage()
sys.exit()
elif opt in ("-i", "--ifile"):
inputfile = arg
elif opt in ("-o", "--ofile"):
outputfile = arg
outputArg = True
elif opt in ("-k", "--keys"):
keysArg = True
keys = arg
if not outputArg or not keysArg:
printUsage()
sys.exit()
def cleanup():
global jsonFile
if jsonFile is not sys.stdin:
jsonFile.close()
if __name__ == "__main__":
parseArgs(sys.argv[1:])
jsonFile = open(inputfile) if inputfile is not "" else sys.stdin
jsonData = json.load(jsonFile)
keyList = keys.split(",")
gpusData = jsonData["gpus"]
custom_style = Style(
colors=('#76B900', '#feed6c', '#8cedff', '#9e6ffe',
'#899ca1', '#f8f8f2', '#bf4646', '#516083', '#f92672',
'#82b414', '#fd971f', '#56c2d6', '#808384', '#8c54fe',
'#465457'))
lineChart = pygal.Line(x_labels_major_every=10, show_minor_x_labels=False, show_dots=False, legend_font_size=10, \
legend_at_bottom=True, style=custom_style, include_x_axis=False)
try:
key = keyList[0]
lineChart.x_labels = map(str, range(0, len(gpusData["0"][key])))
except KeyError:
print 'Key \"' + key + '\" not found in JSON file.'
for gpu in gpusData:
try:
for key in keyList:
line = list()
secondaryAxis = False
for entry in gpusData[gpu][key]:
line.append(entry["value"]);
if key == "gpu_temperature" or key == "power_violation":
secondaryAxis = True
lineChart.add(str(key) + ' ' + str(gpu), line, secondary=secondaryAxis)
except KeyError:
print 'Key \"' + key + '\" not found in JSON file.'
lineChart.render_to_file(outputfile)
cleanup()
| DCGM-master | nvvs/python_examples/json2svg.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import csv
import sys
import getopt
import string
inputfile = ''
outputfile = ''
keys = ''
def printUsage():
print str(sys.argv[0]) + ' [-i <inputfile>] [-o <outputfile>] -k <keys (comma separated)>'
def parseArgs(argv):
global inputfile
global outputfile
global keys
try:
opts, args = getopt.getopt(argv,"hi:o:k:",["ifile=","ofile=","keys="])
except getopt.GetoptError:
printUsage()
sys.exit(2)
keyArg = False
for opt, arg in opts:
if opt == '-h':
printUsage()
sys.exit()
elif opt in ("-i", "--ifile"):
inputfile = arg
elif opt in ("-o", "--ofile"):
outputfile = arg
elif opt in ("-k", "--keys"):
keys = arg
keyArg = True
if not keyArg:
printUsage()
sys.exit()
def cleanup():
global jsonFile
global outHandle
if jsonFile is not sys.stdin:
jsonFile.close()
if outHandle is not sys.stdout:
outHandle.close()
if __name__ == "__main__":
parseArgs(sys.argv[1:])
jsonFile = open(inputfile) if inputfile is not "" else sys.stdin
jsonData = json.load(jsonFile)
outHandle = open(outputfile, 'wb') if outputfile is not "" else sys.stdout
csvWriter = csv.writer(outHandle, quotechar='"', quoting=csv.QUOTE_ALL, delimiter=",")
keyList = keys.split(",")
gpusData = jsonData["gpus"]
header = ["GPU#", "time"]
for key in keyList:
header.append(str(key))
csvWriter.writerow(header)
for gpu in gpusData:
try:
key = keyList[0]
for i in range(len(gpusData[gpu][key])):
row = [gpu]
row.append(str(i))
for key in keyList:
entry = gpusData[gpu][key][i]
row.append(str(entry["value"]))
csvWriter.writerow(row)
except KeyError:
print 'Key \"' + key + '\" not found in JSON file.'
cleanup()
| DCGM-master | nvvs/python_examples/json2csv.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import dcgm_structs
import dcgm_agent
class DcgmDiag:
# Maps version codes to simple version values for range comparisons
_versionMap = {
dcgm_structs.dcgmRunDiag_version: 5
}
def __init__(self, gpuIds=None, testNamesStr='', paramsStr='', verbose=True,
version=dcgm_structs.dcgmRunDiag_version):
# Make sure version is valid
if version not in DcgmDiag._versionMap:
raise ValueError("'%s' is not a valid version for dcgmRunDiag." % version)
self.version = version
if self.version == dcgm_structs.dcgmRunDiag_version7:
self.runDiagInfo = dcgm_structs.c_dcgmRunDiag_v7()
else:
self.runDiagInfo = dcgm_structs.c_dcgmRunDiag_t()
self.numTests = 0
self.numParams = 0
self.SetVerbose(verbose)
if testNamesStr == '':
# default to a level 1 test
self.runDiagInfo.validate = 1
elif testNamesStr == '1':
self.runDiagInfo.validate = 1
elif testNamesStr == '2':
self.runDiagInfo.validate = 2
elif testNamesStr == '3':
self.runDiagInfo.validate = 3
elif testNamesStr == '4':
self.runDiagInfo.validate = 4
else:
# Make sure no number other that 1-4 were submitted
if testNamesStr.isdigit():
raise ValueError("'%s' is not a valid test name." % testNamesStr)
# Copy to the testNames portion of the object
names = testNamesStr.split(',')
if len(names) > dcgm_structs.DCGM_MAX_TEST_NAMES:
err = 'DcgmDiag cannot initialize: %d test names were specified exceeding the limit of %d.' %\
(len(names), dcgm_structs.DCGM_MAX_TEST_NAMES)
raise ValueError(err)
for testName in names:
self.AddTest(testName)
if paramsStr != '':
params = paramsStr.split(';')
if len(params) >= dcgm_structs.DCGM_MAX_TEST_PARMS:
err = 'DcgmDiag cannot initialize: %d parameters were specified, exceeding the limit of %d.' %\
(len(params), dcgm_structs.DCGM_MAX_TEST_PARMS)
raise ValueError(err)
for param in params:
self.AddParameter(param)
if gpuIds:
first = True
for gpu in gpuIds:
if first:
self.runDiagInfo.gpuList = str(gpu)
first = False
else:
self.runDiagInfo.gpuList = "%s,%s" % (self.runDiagInfo.gpuList, str(gpu))
def SetVerbose(self, val):
if val == True:
self.runDiagInfo.flags |= dcgm_structs.DCGM_RUN_FLAGS_VERBOSE
else:
self.runDiagInfo.flags &= ~dcgm_structs.DCGM_RUN_FLAGS_VERBOSE
def UseFakeGpus(self):
self.runDiagInfo.fakeGpuList = self.runDiagInfo.gpuList
def GetStruct(self):
return self.runDiagInfo
def AddParameter(self, parameterStr):
if len(parameterStr) >= dcgm_structs.DCGM_MAX_TEST_PARMS_LEN:
err = 'DcgmDiag cannot add parameter \'%s\' because it exceeds max length %d.' % \
(parameterStr, dcgm_structs.DCGM_MAX_TEST_PARMS_LEN)
raise ValueError(err)
index = 0
for c in parameterStr:
self.runDiagInfo.testParms[self.numParams][index] = ord(c)
index += 1
self.numParams += 1
def AddTest(self, testNameStr):
if len(testNameStr) >= dcgm_structs.DCGM_MAX_TEST_NAMES_LEN:
err = 'DcgmDiag cannot add test name \'%s\' because it exceeds max length %d.' % \
(testNameStr, dcgm_structs.DCGM_MAX_TEST_NAMES_LEN)
raise ValueError(err)
index = 0
for c in testNameStr:
self.runDiagInfo.testNames[self.numTests][index] = ord(c)
index += 1
self.numTests += 1
def SetStatsOnFail(self, val):
if val == True:
self.runDiagInfo.flags |= dcgm_structs.DCGM_RUN_FLAGS_STATSONFAIL
def SetThrottleMask(self, value):
if DcgmDiag._versionMap[self.version] < 3:
raise ValueError("Throttle mask requires minimum version 3 for dcgmRunDiag.")
if isinstance(value, str) and len(value) >= dcgm_structs.DCGM_THROTTLE_MASK_LEN:
raise ValueError("Throttle mask value '%s' exceeds max length %d."
% (value, dcgm_structs.DCGM_THROTTLE_MASK_LEN - 1))
self.runDiagInfo.throttleMask = str(value)
def SetFailEarly(self, enable=True, checkInterval=5):
if DcgmDiag._versionMap[self.version] < 5:
raise ValueError("Fail early requires minimum version 5 for dcgmRunDiag.")
if not isinstance(checkInterval, int):
raise ValueError("Invalid checkInterval value: %s" % checkInterval)
if enable:
self.runDiagInfo.flags |= dcgm_structs.DCGM_RUN_FLAGS_FAIL_EARLY
self.runDiagInfo.failCheckInterval = checkInterval
else:
self.runDiagInfo.flags &= ~dcgm_structs.DCGM_RUN_FLAGS_FAIL_EARLY
def Execute(self, handle):
return dcgm_agent.dcgmActionValidate_v2(handle, self.runDiagInfo, self.version)
def SetStatsPath(self, statsPath):
if len(statsPath) >= dcgm_structs.DCGM_PATH_LEN:
err = "DcgmDiag cannot set statsPath '%s' because it exceeds max length %d." % \
(statsPath, dcgm_structs.DCGM_PATH_LEN)
raise ValueError(err)
self.runDiagInfo.statsPath = statsPath
def SetConfigFileContents(self, configFileContents):
if len(configFileContents) >= dcgm_structs.DCGM_MAX_CONFIG_FILE_LEN:
err = "Dcgm Diag cannot set config file contents to '%s' because it exceeds max length %d." \
% (configFileContents, dcgm_structs.DCGM_MAX_CONFIG_FILE_LEN)
raise ValueError(err)
self.runDiagInfo.configFileContents = configFileContents
def SetDebugLogFile(self, logFileName):
if len(logFileName) >= dcgm_structs.DCGM_FILE_LEN:
raise ValueError("Cannot set debug file to '%s' because it exceeds max length %d."\
% (logFileName, dcgm_structs.DCGM_FILE_LEN))
self.runDiagInfo.debugLogFile = logFileName
def SetDebugLevel(self, debugLevel):
if debugLevel < 0 or debugLevel > 5:
raise ValueError("Cannot set debug level to %d. Debug Level must be a value from 0-5 inclusive.")
self.runDiagInfo.debugLevel = debugLevel
| DCGM-master | testing/python3/DcgmDiag.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from common.dcgm_client_main import main
from DcgmJsonReader import DcgmJsonReader
from socket import socket, AF_INET, SOCK_DGRAM
# Displayed to the user
TELEGRAF_NAME = 'Telegraf'
DEFAULT_TELEGRAF_PORT = 8094
# Telegraf Configuration
# ======================
#
# In order for Telegraf to understand the format of the data sent by this
# module, it needs to be configured with the input plugin below
#
# If you modify the list of published fields, you will need to add non-numeric
# ones as tag_keys for Telegraf to store them
#
# [[inputs.socket_listener]]
# name_override = "dcgm"
# service_address = "udp://:8094"
# data_format = "json"
# tag_keys = [
# "compute_pids",
# "driver_version",
# "gpu_uuid",
# "nvml_version",
# "process_name",
# "xid_errors"
# ]
class DcgmTelegraf(DcgmJsonReader):
###########################################################################
def __init__(self, publish_hostname, publish_port, **kwargs):
self.m_sock = socket(AF_INET, SOCK_DGRAM)
self.m_dest = (publish_hostname, publish_port)
super(DcgmTelegraf, self).__init__(**kwargs)
###########################################################################
def SendToTelegraf(self, payload):
self.m_sock.sendto(payload, self.m_dest)
###########################################################################
def CustomJsonHandler(self, outJson):
self.SendToTelegraf(outJson)
if __name__ == '__main__': # pragma: no cover
main(DcgmTelegraf, TELEGRAF_NAME, DEFAULT_TELEGRAF_PORT, add_target_host=True)
| DCGM-master | testing/python3/dcgm_telegraf.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import dcgm_agent
import dcgm_structs
'''
Class for managing a group of field IDs in the host engine.
'''
class DcgmFieldGroup:
'''
Constructor
dcgmHandle - DcgmHandle() instance to use for communicating with the host engine
name - Name of the field group to use within DCGM. This must be unique
fieldIds - Fields that are part of this group
fieldGroupId - If provided, this is used to initialize the object from an existing field group ID
'''
def __init__(self, dcgmHandle, name="", fieldIds=None, fieldGroupId=None):
fieldIds = fieldIds or []
self.name = name
self.fieldIds = fieldIds
self._dcgmHandle = dcgmHandle
self.wasCreated = False
#If the user passed in an ID, the field group already exists. Fetch live info
if fieldGroupId is not None:
self.fieldGroupId = fieldGroupId
fieldGroupInfo = dcgm_agent.dcgmFieldGroupGetInfo(self._dcgmHandle.handle, self.fieldGroupId)
self.name = fieldGroupInfo.fieldGroupName
self.fieldIds = fieldGroupInfo.fieldIds
else:
self.fieldGroupId = None #Assign here so the destructor doesn't fail if the call below fails
self.fieldGroupId = dcgm_agent.dcgmFieldGroupCreate(self._dcgmHandle.handle, fieldIds, name)
self.wasCreated = True
'''
Remove this field group from DCGM. This object can no longer be passed to other APIs after this call.
'''
def Delete(self):
if self.wasCreated and self.fieldGroupId is not None:
try:
try:
dcgm_agent.dcgmFieldGroupDestroy(self._dcgmHandle.handle, self.fieldGroupId)
except dcgm_structs.dcgmExceptionClass(dcgm_structs.DCGM_ST_NO_DATA):
# someone may have deleted the group under us. That's ok.
pass
except dcgm_structs.dcgmExceptionClass(dcgm_structs.DCGM_ST_CONNECTION_NOT_VALID):
# We lost our connection, but we're destructing this object anyway.
pass
except AttributeError as ae:
# When we're cleaning up at the end, dcgm_agent and dcgm_structs have been unloaded and we'll
# get an AttributeError: "'NoneType' object has no 'dcgmExceptionClass'" Ignore this
pass
except TypeError as te:
# When we're cleaning up at the end, dcgm_agent and dcgm_structs have been unloaded and we might
# get a TypeError: "'NoneType' object is not callable'" Ignore this
pass
self.fieldGroupId = None
self._dcgmHandle = None
#Destructor
def __del__(self):
self.Delete()
| DCGM-master | testing/python3/DcgmFieldGroup.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Base value for integer blank. can be used as an unspecified blank
DCGM_INT32_BLANK = 0x7ffffff0
DCGM_INT64_BLANK = 0x7ffffffffffffff0
# Base value for double blank. 2 ** 47. FP 64 has 52 bits of mantissa,
#so 47 bits can still increment by 1 and represent each value from 0-15
DCGM_FP64_BLANK = 140737488355328.0
DCGM_STR_BLANK = "<<<NULL>>>"
# Represents an error where data was not found
DCGM_INT32_NOT_FOUND = (DCGM_INT32_BLANK+1)
DCGM_INT64_NOT_FOUND = (DCGM_INT64_BLANK+1)
DCGM_FP64_NOT_FOUND = (DCGM_FP64_BLANK+1.0)
DCGM_STR_NOT_FOUND = "<<<NOT_FOUND>>>"
# Represents an error where fetching the value is not supported
DCGM_INT32_NOT_SUPPORTED = (DCGM_INT32_BLANK+2)
DCGM_INT64_NOT_SUPPORTED = (DCGM_INT64_BLANK+2)
DCGM_FP64_NOT_SUPPORTED = (DCGM_FP64_BLANK+2.0)
DCGM_STR_NOT_SUPPORTED = "<<<NOT_SUPPORTED>>>"
# Represents and error where fetching the value is not allowed with our current credentials
DCGM_INT32_NOT_PERMISSIONED = (DCGM_INT32_BLANK+3)
DCGM_INT64_NOT_PERMISSIONED = (DCGM_INT64_BLANK+3)
DCGM_FP64_NOT_PERMISSIONED = (DCGM_FP64_BLANK+3.0)
DCGM_STR_NOT_PERMISSIONED = "<<<NOT_PERM>>>"
###############################################################################
# Functions to check if a value is blank or not
def DCGM_INT32_IS_BLANK(val):
if val >= DCGM_INT32_BLANK:
return True
else:
return False
def DCGM_INT64_IS_BLANK(val):
if val >= DCGM_INT64_BLANK:
return True
else:
return False
def DCGM_FP64_IS_BLANK(val):
if val >= DCGM_FP64_BLANK:
return True
else:
return False
#Looks for <<< at first position and >>> inside string
def DCGM_STR_IS_BLANK(val):
if 0 != val.find("<<<"):
return False
elif 0 > val.find(">>>"):
return False
return True
###############################################################################
class DcgmValue:
def __init__(self, value):
self.value = value #Contains either an integer (int64), string, or double of the actual value
###########################################################################
def SetFromInt32(self, i32Value):
'''
Handle the special case where our source data was an int32 but is currently
stored in a python int (int64), dealing with blanks
'''
value = int(i32Value)
if not DCGM_INT32_IS_BLANK(i32Value):
self.value = value
return
if value == DCGM_INT32_NOT_FOUND:
self.value = DCGM_INT64_NOT_FOUND
elif value == DCGM_INT32_NOT_SUPPORTED:
self.value = DCGM_INT64_NOT_SUPPORTED
elif value == DCGM_INT32_NOT_PERMISSIONED:
self.value = DCGM_INT64_NOT_PERMISSIONED
else:
self.value = DCGM_INT64_BLANK
###########################################################################
def IsBlank(self):
'''
Returns True if the currently-stored value is a blank value. False if not
'''
if self.value is None:
return True
elif type(self.value) == int or type(self.value) == int:
return DCGM_INT64_IS_BLANK(self.value)
elif type(self.value) == float:
return DCGM_FP64_IS_BLANK(self.value)
elif type(self.value) == str:
return DCGM_STR_IS_BLANK(self.value)
else:
raise Exception("Unknown type: %s") % str(type(self.value))
###########################################################################
def __str__(self):
return str(self.value)
###########################################################################
###############################################################################
def self_test():
v = DcgmValue(1.0)
assert(not v.IsBlank())
assert(v.value == 1.0)
v = DcgmValue(100)
assert(not v.IsBlank())
assert(v.value == 100)
v = DcgmValue(DCGM_INT64_NOT_FOUND)
assert(v.IsBlank())
v = DcgmValue(DCGM_FP64_NOT_FOUND)
assert(v.IsBlank())
v.SetFromInt32(DCGM_INT32_NOT_SUPPORTED)
assert(v.IsBlank())
assert(v.value == DCGM_INT64_NOT_SUPPORTED)
print("Tests passed")
return
###############################################################################
if __name__ == "__main__":
self_test()
###############################################################################
| DCGM-master | testing/python3/dcgmvalue.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import sys
import logging
import json
import os
try:
import pydcgm
import dcgm_agent
import dcgm_structs
import dcgm_errors
import dcgm_fields
import DcgmSystem
except:
# If we don't find the bindings, add the default path and try again
if 'PYTHONPATH' in os.environ:
os.environ['PYTHONPATH'] = os.environ['PYTHONPATH'] + ":/usr/local/dcgm/bindings"
else:
os.environ['PYTHONPATH'] = '/usr/local/dcgm/bindings'
import pydcgm
import dcgm_agent
import dcgm_structs
import dcgm_fields
import DcgmSystem
BR_ST_HEALTHY = 0x0000
BR_ST_NOT_DETECTED = 0x0001
BR_ST_FAILED_PASSIVE_HEALTH = 0x0002
BR_ST_FAILED_ACTIVE_HEALTH = 0x0004
BR_HEALTH_WATCH_BITMAP = dcgm_structs.DCGM_HEALTH_WATCH_ALL
DIAG_MEMTEST_DURATION = 90.0
DIAG_CONSTANT_POWER_DURATION = 120.0
DIAG_CONSTANT_STRESS_DURATION = 120.0
DIAG_DIAGNOSTIC_DURATION = 300.0
global g_gpus
global g_switches
g_gpus = []
g_switches = []
class Entity(object):
def __init__(self, entityId, entityType=dcgm_fields.DCGM_FE_GPU, uuid=None, bdf=None):
self.health = BR_ST_HEALTHY
self.entityType = entityType
self.entityId = entityId
self.reasonsUnhealthy = []
if uuid:
self.uuid = uuid
if bdf:
self.bdf = bdf
def IsHealthy(self):
return self.health == BR_ST_HEALTHY
def MarkUnhealthy(self, failCondition, reason):
self.health = self.health | failCondition
self.reasonsUnhealthy.append(reason)
def WhyUnhealthy(self):
return self.reasonsUnhealthy
def SetEntityId(self, entityId):
self.entityId = entityId
def GetEntityId(self):
return self.entityId
def GetUUID(self):
return self.uuid
def GetBDF(self):
return self.bdf
def mark_entity_unhealthy(entities, entityId, code, reason):
found = False
for entity in entities:
if entityId == entity.GetEntityId():
entity.MarkUnhealthy(code, reason)
found = True
return found
def addParamString(runDiagInfo, paramIndex, paramStr):
strIndex = 0
for c in paramStr:
runDiagInfo.testParms[paramIndex][strIndex] = c
strIndex = strIndex + 1
def setTestDurations(runDiagInfo, timePercentage):
# We only are reducing the test time for the default case
if runDiagInfo.validate != 3:
return
stressDuration = int(DIAG_MEMTEST_DURATION * timePercentage)
powerDuration = int(DIAG_CONSTANT_POWER_DURATION * timePercentage)
constantStressDuration = int(DIAG_CONSTANT_STRESS_DURATION * timePercentage)
diagDuration = int(DIAG_DIAGNOSTIC_DURATION * timePercentage)
smParam = "memtest.test_duration=%d" % (stressDuration)
powerParam = "targeted power.test_duration=%d" % (powerDuration)
constantStressParam = "targeted stress.test_duration=%d" % (constantStressDuration)
diagParam = "diagnostic.test_duration=%d" % (diagDuration)
addParamString(runDiagInfo, 0, diagParam)
addParamString(runDiagInfo, 1, smParam)
addParamString(runDiagInfo, 2, constantStressParam)
addParamString(runDiagInfo, 3, powerParam)
def initialize_run_diag_info(settings):
runDiagInfo = dcgm_structs.c_dcgmRunDiag_v7()
runDiagInfo.version = dcgm_structs.dcgmRunDiag_version7
runDiagInfo.flags = dcgm_structs.DCGM_RUN_FLAGS_VERBOSE
testNamesStr = settings['testNames']
if testNamesStr == '1':
runDiagInfo.validate = 1
elif testNamesStr == '2':
runDiagInfo.validate = 2
elif testNamesStr == '3':
runDiagInfo.validate = 3
else:
# Make sure no number other that 1-3 were submitted
if testNamesStr.isdigit():
raise ValueError("'%s' is not a valid test name" % testNamesStr)
# Copy to the testNames portion of the object
names = testNamesStr.split(',')
testIndex = 0
if len(names) > dcgm_structs.DCGM_MAX_TEST_NAMES:
err = 'Aborting DCGM Diag because %d test names were specified exceeding the limit of %d' %\
(len(names), dcgm_structs.DCGM_MAX_TEST_NAMES)
raise ValueError(err)
for testName in names:
testNameIndex = 0
if len(testName) >= dcgm_structs.DCGM_MAX_TEST_NAMES_LEN:
err = 'Aborting DCGM Diag because test name %s exceeds max length %d' % \
(testName, dcgm_structs.DCGM_MAX_TEST_NAMES_LEN)
raise ValueError(err)
for c in testName:
runDiagInfo.testNames[testIndex][testNameIndex] = c
testNameIndex = testNameIndex + 1
testIndex = testIndex + 1
if 'timePercentage' in settings:
setTestDurations(runDiagInfo, settings['timePercentage'])
activeGpuIds = []
first = True
for gpuObj in g_gpus:
if gpuObj.IsHealthy():
activeGpuIds.append(gpuObj.GetEntityId())
if first:
runDiagInfo.gpuList = str(gpuObj.GetEntityId())
first = False
else:
to_append = ',%s' % (str(gpuObj.GetEntityId()))
runDiagInfo.gpuList = runDiagInfo.gpuList + to_append
return runDiagInfo, activeGpuIds
def mark_all_unhealthy(activeGpuIds, reason):
for gpuId in activeGpuIds:
mark_entity_unhealthy(g_gpus, gpuId, BR_ST_FAILED_ACTIVE_HEALTH, reason)
def result_to_str(result):
if result == dcgm_structs.DCGM_DIAG_RESULT_PASS:
return 'PASS'
elif result == dcgm_structs.DCGM_DIAG_RESULT_SKIP:
return 'SKIP'
elif result == dcgm_structs.DCGM_DIAG_RESULT_WARN:
return 'WARN'
elif result == dcgm_structs.DCGM_DIAG_RESULT_FAIL:
return 'FAIL'
else:
return 'NOT RUN'
def check_passive_health_checks(response, activeGpuIds):
unhealthy = False
for i in range(0, dcgm_structs.DCGM_SWTEST_COUNT):
if response.levelOneResults[i].result == dcgm_structs.DCGM_DIAG_RESULT_FAIL:
mark_all_unhealthy(activeGpuIds, response.levelOneResults[i].error.msg)
unhealthy = True
break
return unhealthy
def check_gpu_diagnostic(handleObj, settings):
runDiagInfo, activeGpuIds = initialize_run_diag_info(settings)
if len(activeGpuIds) == 0:
return
response = dcgm_agent.dcgmActionValidate_v2(handleObj.handle, runDiagInfo)
sysError = response.systemError
if (sysError.code != dcgm_errors.DCGM_FR_OK):
raise ValueError(sysError)
if check_passive_health_checks(response, activeGpuIds) == False:
for gpuIndex in range(response.gpuCount):
for testIndex in range(dcgm_structs.DCGM_PER_GPU_TEST_COUNT_V8):
if response.perGpuResponses[gpuIndex].results[testIndex].result == dcgm_structs.DCGM_DIAG_RESULT_FAIL:
gpuId = response.perGpuResponses[gpuIndex].gpuId
mark_entity_unhealthy(g_gpus, gpuId, BR_ST_FAILED_ACTIVE_HEALTH,
response.perGpuResponses[gpuIndex].results[testIndex].result.error.msg)
# NVVS marks all subsequent tests as failed so there's no point in continuing
break
def query_passive_health(handleObj, desired_watches):
dcgmGroup = handleObj.GetSystem().GetDefaultGroup()
watches = dcgmGroup.health.Get()
# Check for the correct watches to be set and set them if necessary
if watches != desired_watches:
dcgmGroup.health.Set(desired_watches)
return dcgmGroup.health.Check()
def denylist_from_passive_health_check(response):
for incidentIndex in range(response.incidentCount):
if response.incidents[incidentIndex].health != dcgm_structs.DCGM_HEALTH_RESULT_FAIL:
# Only add to the denylist for failures; ignore warnings
continue
entityId = response.incidents[incidentIndex].entityInfo.entityId
entityGroupId = response.incidents[incidentIndex].entityInfo.entityGroupId
errorString = response.incidents[incidentIndex].error.msg
if entityGroupId == dcgm_fields.DCGM_FE_GPU:
mark_entity_unhealthy(g_gpus, entityId, BR_ST_FAILED_PASSIVE_HEALTH, errorString)
else:
mark_entity_unhealthy(g_switches, entityId, BR_ST_FAILED_PASSIVE_HEALTH, errorString)
def check_passive_health(handleObj, watches):
response = query_passive_health(handleObj, watches)
if response.overallHealth != dcgm_structs.DCGM_HEALTH_RESULT_PASS:
denylist_from_passive_health_check(response)
def initialize_devices(handle, flags):
gpuIds = dcgm_agent.dcgmGetEntityGroupEntities(handle, dcgm_fields.DCGM_FE_GPU, flags)
switchIds = dcgm_agent.dcgmGetEntityGroupEntities(handle, dcgm_fields.DCGM_FE_SWITCH, flags)
i = 0
for gpuId in gpuIds:
attributes = dcgm_agent.dcgmGetDeviceAttributes(handle, gpuId)
gpuObj = Entity(gpuId, entityType=dcgm_fields.DCGM_FE_GPU, uuid=attributes.identifiers.uuid, bdf=attributes.identifiers.pciBusId)
g_gpus.append(gpuObj)
i = i + 1
i = 0
for switchId in switchIds:
switchObj = Entity(switchId, entityType=dcgm_fields.DCGM_FE_SWITCH)
g_switches.append(switchObj)
i = i + 1
# Process command line arguments
def __process_command_line__(settings):
parser = argparse.ArgumentParser()
parser.add_argument('-g', '--num-gpus', dest='num_gpus', type=int,
help='The expected number of GPUs.')
parser.add_argument('-s', '--num-switches', dest='num_switches', type=int,
help='The expected number of NvSwitches.')
parser.add_argument('-n', '--hostname', dest='hostname', type=str,
help='The hostname of the nv-hostengine we want to query.')
parser.add_argument('-d', '--detect', dest='detect', action='store_true',
help='Run on whatever GPUs can be detected. Do not check counts.')
parser.add_argument('-l', '--log-file', dest='logfileName', type=str,
help='The name of the log file where details should be stored. Default is stdout')
parser.add_argument('-u', '--unsupported-too', dest='unsupported', action='store_true',
help='Get unsupported devices in addition to the ones DCGM supports')
parser.add_argument('-f', '--full-report', dest='fullReport', action='store_true',
help='Print a health status for each GPU')
parser.add_argument('-c', '--csv', dest='outfmtCSV', action='store_true',
help='Write output in csv format. By default, output is in json format.')
parser.add_argument('-w', '--watches', dest='watches', type=str,
help='Specify which health watches to monitor. By default, all are watched. Any list of the following may be specified:\n\ta = All watches\n\tp = PCIE\n\tm = Memory\n\ti = Inforom\n\tt = Thermal and Power\n\tn = NVLINK')
group = parser.add_mutually_exclusive_group()
group.add_argument('-r', '--specified-test', dest='testNames', type=str,
help='Option to specify what tests are run in dcgmi diag.')
group.add_argument('-i', '--instantaneous', dest='instant', action='store_true',
help='Specify to skip the longer tests and run instantaneously')
group.add_argument('-t', '--time-limit', dest='timeLimit', type=int,
help='The time limit in seconds that all the tests should not exceed. Diagnostics will be reduced in their time to meet this boundary.')
parser.set_defaults(instant=False, detect=False, fullReport=False)
args = parser.parse_args()
if args.num_gpus is not None and args.num_switches is not None:
settings['numGpus'] = args.num_gpus
settings['numSwitches'] = args.num_switches
elif args.detect == False:
raise ValueError('Must specify either a number of gpus and switches with -g and -s or auto-detect with -d')
if args.hostname:
settings['hostname'] = args.hostname
else:
settings['hostname'] = 'localhost'
if args.unsupported:
settings['entity_get_flags'] = 0
else:
settings['entity_get_flags'] = dcgm_structs.DCGM_GEGE_FLAG_ONLY_SUPPORTED
settings['instant'] = args.instant
settings['fullReport'] = args.fullReport
if args.testNames:
settings['testNames'] = args.testNames
else:
settings['testNames'] = '3'
if args.timeLimit:
settings['timePercentage'] = float(args.timeLimit) / 840.0
if args.logfileName:
logging.basicConfig(filename=args.logfileName)
if args.outfmtCSV:
settings['outfmtCSV'] = 1
if args.watches:
health_watches = 0
for c in args.watches:
if c == 'p':
health_watches |= dcgm_structs.DCGM_HEALTH_WATCH_PCIE
elif c == 'm':
health_watches |= dcgm_structs.DCGM_HEALTH_WATCH_MEM
elif c == 'i':
health_watches |= dcgm_structs.DCGM_HEALTH_WATCH_INFOROM
elif c == 't':
health_watches |= dcgm_structs.DCGM_HEALTH_WATCH_THERMAL
health_watches |= dcgm_structs.DCGM_HEALTH_WATCH_POWER
elif c == 'n':
health_watches |= dcgm_structs.DCGM_HEALTH_WATCH_NVLINK
elif c == 'a':
health_watches |= dcgm_structs.DCGM_HEALTH_WATCH_ALL
else:
print(("Unrecognized character %s found in watch string '%s'" % (c, args.watches)))
sys.exit(-1)
settings['watches'] = health_watches
else:
settings['watches'] = BR_HEALTH_WATCH_BITMAP
def get_entity_id_list(entities):
ids = ""
first = True
for entity in entities:
if first:
ids = str(entity.GetEntityId())
else:
ids += ",%d" % (entity.GetEntityId())
first = False
return ids
def check_health(handleObj, settings, error_list):
initialize_devices(handleObj.handle, settings['entity_get_flags'])
if 'numGpus' in settings:
if len(g_gpus) != settings['numGpus']:
error_list.append("%d GPUs were specified but only %d were detected with ids '%s'" %
(settings['numGpus'], len(g_gpus), get_entity_id_list(g_gpus)))
if 'numSwitches' in settings:
if len(g_switches) != settings['numSwitches']:
error_list.append("%d switches were specified but only %d were detected with ids '%s'" %
(settings['numSwitches'], len(g_switches), get_entity_id_list(g_switches)))
check_passive_health(handleObj, settings['watches']) # quick check
if settings['instant'] == False:
check_gpu_diagnostic(handleObj, settings)
def process_command_line(settings):
try:
__process_command_line__(settings)
except ValueError as e:
return str(e)
def main():
# Parse the command line
settings = {}
error_list = []
exitCode = 0
jsonTop = {}
error = process_command_line(settings)
if error:
# If we had an error processing the command line, don't attempt to check anything
error_list.append(error)
else:
try:
handleObj = pydcgm.DcgmHandle(None, settings['hostname'], dcgm_structs.DCGM_OPERATION_MODE_AUTO)
check_health(handleObj, settings, error_list)
except dcgm_structs.DCGMError as e:
# Catch any exceptions from DCGM and add them to the error_list so they'll be printed as JSON
error_list.append(str(e))
except ValueError as e:
error_list.append(str(e))
if 'outfmtCSV' in settings: # show all health, then all un-healthy
for gpuObj in g_gpus:
if gpuObj.IsHealthy() == True:
print("healthy,%s,%s" %(gpuObj.GetBDF(), gpuObj.GetUUID()))
for gpuObj in g_gpus:
if gpuObj.IsHealthy() == False:
print("unhealthy,%s,%s,\"%s\"" %(gpuObj.GetBDF(), gpuObj.GetUUID(),gpuObj.WhyUnhealthy()))
else: # build obj that can be output in json
denylistGpus = {}
healthyGpus = {}
for gpuObj in g_gpus:
if gpuObj.IsHealthy() == False:
details = {}
details['UUID'] = gpuObj.GetUUID()
details['BDF'] = gpuObj.GetBDF()
details['Failure Explanation'] = gpuObj.WhyUnhealthy()
denylistGpus[gpuObj.GetEntityId()] = details
elif settings['fullReport']:
details = {}
details['UUID'] = gpuObj.GetUUID()
details['BDF'] = gpuObj.GetBDF()
healthyGpus[gpuObj.GetEntityId()] = details
jsonTop['denylistedGpus'] = denylistGpus
if settings['fullReport']:
jsonTop['Healthy GPUs'] = healthyGpus
if len(error_list): # had error processing the command line
exitCode = 1
if 'outfmtCSV' in settings: # json output
if len(error_list):
for errObj in error_list:
print("errors,\"%s\"" %(errObj))
else:
jsonTop['errors'] = error_list
if 'outfmtCSV' in settings: # show all health, then all un-healthy
pass
else:
print(json.dumps(jsonTop, indent=4, separators=(',', ': ')))
sys.exit(exitCode)
if __name__ == '__main__':
main()
| DCGM-master | testing/python3/denylist_recommendations.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import wraps
import inspect
import os
import platform
import string
import traceback
from collections import namedtuple
import stat
import time
import apps
import re
import ctypes
import sys
from shutil import which as find_executable
from progress_printer import *
import logger
import option_parser
import utils
import apps
import dcgm_agent
import dcgm_structs
import dcgm_agent_internal
import dcgm_structs_internal
import dcgm_fields
import DcgmDiag
import dcgmvalue
import pydcgm
import version
from dcgm_structs import DCGM_ST_INIT_ERROR, dcgmExceptionClass
import nvidia_smi_utils
import errno
import shlex
import xml.etree.ElementTree as ET
import subprocess
from subprocess import Popen, check_call, CalledProcessError, PIPE
test_directory = 'tests'
noLogging = True
noLoggingBackup = noLogging
reRunning = False
loggingLevel = "DEBUG" #Level to use for logging. These come from DcgmLogging.h
DIAG_SMALL_FB_MODE_VAR = '__DCGM_DIAG_SMALL_FB_MODE'
smallFbModeEnv = {DIAG_SMALL_FB_MODE_VAR : '1'}
def check_output(*args, **kwargs):
kwargs['universal_newlines'] = True
return subprocess.check_output(*args, **kwargs)
def set_tests_directory(testDir):
'''
Set the directory where test .py files will be looked for (ex: 'tests' for DCGM)
'''
global test_directory
test_directory = testDir
def verify_dcgmi_executible_visible_for_all_users():
# We don't run it if not on this platform
if utils.platform_identifier not in apps.dcgmi_app.DcgmiApp.paths:
logger.info('Skip unsupported platform')
return False
dcgmi_path = apps.dcgmi_app.DcgmiApp.paths[utils.platform_identifier]
abs_path = os.path.realpath(dcgmi_path)
import stat
while True:
mode = os.stat(abs_path).st_mode
if (not(bool(mode & stat.S_IXOTH) and bool(mode & stat.S_IROTH))):
logger.error("dcgmi tests cannot run because of insufficient perms on %s, need o:rx" % abs_path)
return False
if abs_path == "/":
break
abs_path = os.path.split(abs_path)[0]
return True
def is_nvswitch_detected():
""" Tries to detect if nvswitch is present """
try:
lsPciOutput = check_output("lspci | grep -i nvidia", shell=True)
except CalledProcessError as e:
logger.info("lspci did not successfully execute. Ignoring {_e}".format(_e=str(e)))
return True
# pylint: disable=unsupported-membership-test
if "Bridge: NVIDIA Corporation Device" in lsPciOutput:
return True
else:
return False
def is_hostengine_running():
""" Helper function to detect if there is an existing host engine running """
processList = check_output(["ps", "-ef"])
# pylint: disable=unsupported-membership-test
if "nv-hostengine" in processList:
return True
else:
return False
def check_for_running_hostengine_and_log_details(quiet):
"""
Helper function to check if there is an existing hostengine running.
Logs entries (level INFO) from `ps -ef` output which correspond to running hostengine processes.
If no hostengine process is found, logs "No hostengine process found" (level INFO)
Returns True if a running host engine was found, and False otherwise.
"""
header = "*************** List of nv-hostengine processes ***************"
ps_output = check_output(["ps", "-ef"])
processes_list = ps_output.split("\n")
process_ids = []
for process in processes_list:
if "nv-hostengine" in process:
if header != None:
if not quiet:
logger.info(header)
header = None
if not quiet:
logger.info(process)
fields = process.split(' ')
if len(fields) > 1 and fields[1]:
process_ids.append(fields[1])
if header is None:
if not quiet:
logger.info("*************** End list of nv-hostengine processes ***************")
elif not quiet:
logger.info("No hostengine process found")
return process_ids
def run_p2p_bandwidth_app(args):
""" Helper function to run the p2p_bandwidth test """
p2p_app = apps.RunP2Pbandwidth(args)
p2p_app.start()
pid = p2p_app.getpid()
ret = p2p_app.wait()
p2p_app.validate()
logger.info("The p2p_bandwidth pid is %s" % pid)
return ret, p2p_app.stdout_lines, p2p_app.stderr_lines
def run_nvpex2_app(args):
""" Helper function to run the nvpex2 app for error injection """
nvpex2_app = apps.RunNVpex2(args)
nvpex2_app.start()
pid = nvpex2_app.getpid()
ret = nvpex2_app.wait()
nvpex2_app.validate()
logger.info("The nvpex2 pid is %s" % pid)
return ret, nvpex2_app.stdout_lines, nvpex2_app.stderr_lines
def get_cuda_visible_devices_str(handle, entityGroupId, entityId):
entities = []
fieldIds = [ dcgm_fields.DCGM_FI_DEV_CUDA_VISIBLE_DEVICES_STR ]
entities.append(dcgm_structs.c_dcgmGroupEntityPair_t(entityGroupId, entityId))
flags = dcgm_structs.DCGM_FV_FLAG_LIVE_DATA
fieldValues = dcgm_agent.dcgmEntitiesGetLatestValues(handle, entities, fieldIds, flags)
return fieldValues[0].value.str
def get_cuda_driver_version(handle, gpuId):
'''
Returns cuda driver version value as a tuple of integes like [10, 1] (10.1)
'''
entities = []
fieldIds = [ dcgm_fields.DCGM_FI_CUDA_DRIVER_VERSION ]
entities.append(dcgm_structs.c_dcgmGroupEntityPair_t(dcgm_fields.DCGM_FE_GPU, gpuId))
flags = dcgm_structs.DCGM_FV_FLAG_LIVE_DATA
fieldValues = dcgm_agent.dcgmEntitiesGetLatestValues(handle, entities, fieldIds, flags)
majorVersion = fieldValues[0].value.i64 / 1000
minorVersion = (fieldValues[0].value.i64 - majorVersion * 1000) / 10
return [majorVersion, minorVersion]
def cuda_visible_devices_required(handle, gpuId):
# We need to have a cuda_visible_devices value if this GPU has any MIG entities
try:
hierarchy = dcgm_agent.dcgmGetGpuInstanceHierarchy(handle)
except dcgm_structs.DCGMError_NotSupported:
return False
for i in range(0, hierarchy.count):
entity = hierarchy.entityList[i]
if entity.entity.entityGroupId == dcgm_fields.DCGM_FE_GPU_I:
if entity.parent.entityId == gpuId:
return True
return False
def get_cuda_visible_devices_env(handle, gpuId):
env = {}
# If we have no MIG entities, then don't bother passing a CUDA_VISIBLE_DEVICES value - it is error prone
if cuda_visible_devices_required(handle, gpuId):
if 'REQUESTED_CUDA_VISIBLE_DEVICES' not in os.environ:
skip_test("This test relies on a requested CUDA_VISIBLE_DEVICES value, but none was found. Please set it in the environment before running")
else:
env['CUDA_VISIBLE_DEVICES'] = os.environ['REQUESTED_CUDA_VISIBLE_DEVICES']
return env
class FilePrivilegesReduced(object):
def __init__(self, devnode):
self.devnode = devnode
def __enter__(self):
if not self.devnode: # For ease of programming, support case when devnode is None
return # See for_all_device_nodes for more context
with RunAsRoot(reload_driver=False):
self.old_st_mode = st_mode = os.stat(self.devnode).st_mode
self.new_st_mode = st_mode & ~utils.stat_everyone_read_write
logger.debug("setting %s chmod to %s" % (self.devnode, bin(self.new_st_mode)))
os.chmod(self.devnode, self.new_st_mode)
def __exit__(self, exception_type, exception, trace):
if not self.devnode:
return
with RunAsRoot(reload_driver=False):
current_st_mode = os.stat(self.devnode).st_mode
if current_st_mode != self.new_st_mode:
logger.warning("Some other entity changed permission of %s from requested %s to %s" %
(self.devnode, self.new_st_mode, current_st_mode))
logger.debug("restoring %s chmod to %s" % (self.devnode, bin(self.old_st_mode)))
os.chmod(self.devnode, self.old_st_mode) # restore
def run_as_root_and_non_root():
def decorator(fn):
@wraps(fn)
def wrapper(*args, **kwds):
with SubTest("As root", quiet=True):
RunAsRoot.is_supported(skip_if_not_supported=True)
with RunAsRoot():
fn(*args, **kwds)
with SubTest("As non-root", quiet=True):
RunAsNonRoot.is_supported(skip_if_not_supported=True)
with RunAsNonRoot():
fn(*args, **kwds)
return wrapper
return decorator
def run_only_as_root():
def decorator(fn):
@wraps(fn)
def wrapper(*args, **kwds):
RunAsRoot.is_supported(skip_if_not_supported=True)
with RunAsRoot():
fn(*args, **kwds)
return wrapper
return decorator
def run_only_as_non_root():
def decorator(fn):
@wraps(fn)
def wrapper(*args, **kwds):
RunAsNonRoot.is_supported(skip_if_not_supported=True)
with RunAsNonRoot():
fn(*args, **kwds)
return wrapper
return decorator
def run_only_on_windows():
def decorator(fn):
@wraps(fn)
def wrapper(*args, **kwds):
if utils.is_windows():
result = fn(*args, **kwds)
else:
skip_test("This test is to run only on Windows.")
return wrapper
return decorator
def run_only_on_x86():
"""
Run only on x86 based machines
"""
def decorator(fn):
@wraps(fn)
def wrapper(*args, **kwds):
if utils.platform_identifier in ["Linux_32bit", "Linux_64bit", "Windows_64bit"]:
result = fn(*args, **kwds)
else:
skip_test("This test is to run only on x86 platform")
return wrapper
return decorator
def run_only_on_ppc():
"""
Run only on ppc Platform
"""
def decorator(fn):
@wraps(fn)
def wrapper(*args, **kwds):
if utils.platform_identifier in ["Linux_ppc64le"]:
result = fn(*args, **kwds)
else:
skip_test("This test is to run only on ppc64le platform")
return wrapper
return decorator
def run_only_on_linux():
def decorator(fn):
@wraps(fn)
def wrapper(*args, **kwds):
if utils.is_linux():
result = fn(*args, **kwds)
else:
skip_test("This test is to run only on Linux")
return wrapper
return decorator
def run_only_on_bare_metal():
"""
Run only on bare metal systems
"""
def decorator(fn):
@wraps(fn)
def wrapper(*args, **kwds):
if utils.is_bare_metal_system():
result = fn(*args, **kwds)
else:
skip_test("This test is only supported on bare metal systems")
return wrapper
return decorator
def run_only_on_architecture(arch):
"""
Run only on the specified architectures
"""
def decorator(fn):
@wraps(fn)
def wrapper(*args, **kwds):
framework_path = utils.get_testing_framework_library_path()
match = re.search(arch, framework_path)
if match is None:
skip_test("The plugin we're testing doesn't exist on this platform.")
else:
result = fn(*args, **kwds)
return wrapper
return decorator
def run_only_with_minimum_cuda_version(major_ver, minor_ver):
"""
Run only if we're on the specified version or higher
"""
def decorator(fn):
@wraps(fn)
def wrapper(*args, **kwds):
if 'handle' not in kwds:
skip_test("Can't guarantee the cuda version without a valid handle to DCGM, skipping test.")
if 'gpuIds' not in kwds:
skip_test("Can't guarantee the cuda version without a GPU list, skipping test.")
handle = kwds['handle']
gpuIds = kwds['gpuIds']
major, minor = test_utils.get_cuda_driver_version(handle, gpuIds[0])
if major < major_ver:
test_utils.skip_test("The plugin we're testing is only supported for CUDA %d.%d and higher" \
% (major_ver, minor_ver))
elif major == major_ver and minor < minor_ver:
test_utils.skip_test("The plugin we're testing is only supported for CUDA %d.%d and higher" \
% (major_ver, minor_ver))
return wrapper
return decorator
def run_first():
"""
Forces get_test_content to move this test at the top of the list.
Note: can coexist with run_last. Test is just duplicated.
"""
def decorator(fn):
fn.run_first = True
return fn
return decorator
def run_last():
"""
Forces get_test_content to move this test at the bottom of the list
Note: can coexist with run_first. Test is just duplicated.
"""
def decorator(fn):
fn.run_last = True
return fn
return decorator
def needs_cuda():
"""
Skips the test on platforms that don't support CUDA (e.g. VMkernel).
"""
def decorator(fn):
@wraps(fn)
def wrapper(*args, **kwds):
if utils.is_cuda_supported_system():
result = fn(*args, **kwds)
else:
skip_test("This test requires CUDA which is not supported on this platform")
return wrapper
return decorator
def is_xorg_running():
if utils.is_windows():
return False
try:
processes = apps.LsofApp("/dev/nvidiactl").get_processes()
except OSError as xxx_todo_changeme:
errno.ENOENT = xxx_todo_changeme
return False
for (pid, pname) in processes:
if pname == "Xorg":
return True
return False
def is_driver_in_use():
"""
Returns True if testing2 is the only process keeping the driver loaded.
Note: doesn't take Persistence Mode into account!
"""
# !!! Keep in sync with run_only_if_driver_unused decorator !!!
if utils.is_windows():
return True
if is_xorg_running():
return True
processes = apps.LsofApp("/dev/nvidiactl").get_processes()
if processes:
return True
return False
def run_only_if_driver_unused():
"""
Skips the test if driver is in use (e.g. some other application, except for current testing framework)
is using nvidia driver.
"""
def decorator(fn):
@wraps(fn)
def wrapper(*args, **kwds):
# !!! Keep in sync with is_driver_in_use function !!!
if utils.is_windows():
skip_test("Can't run this test when other processes are using the GPU. (This can run only on Linux)")
if is_xorg_running():
skip_test("Can't run this test when X server is running.")
processes = apps.LsofApp("/dev/nvidiactl").get_processes()
if processes:
skip_test("Can't run this test when other processes (%s) are using the GPU." % processes)
result = fn(*args, **kwds)
return wrapper
return decorator
class assert_raises(object):
def __init__(self, expected_exception):
assert not (expected_exception is None), "expected_exception can't be None"
self.expected_exception = expected_exception
def __enter__(self):
pass
def __exit__(self, exception_type, exception, trace):
if isinstance(exception, KeyboardInterrupt):
return False
#If we weren't expecting a connection exception and we get one, pass it up the stack rather than the assertion exception
notConnectedClass = dcgm_structs.dcgmExceptionClass(dcgm_structs.DCGM_ST_CONNECTION_NOT_VALID)
if (not self.expected_exception == notConnectedClass) and isinstance(exception, notConnectedClass):
return False
assert not exception is None, \
"This code block didn't return ANY exception (expected %s exception)" % self.expected_exception.__name__
assert isinstance(exception, self.expected_exception), \
"Expected that this code block will return exception of type %s but it returned exception of type " \
"%s instead:\n %s" % \
(
self.expected_exception.__name__,
exception_type.__name__,
" ".join(traceback.format_exception(exception_type, exception, trace))
)
return isinstance(exception, self.expected_exception)
def helper_check_for_duplicate_test_names(test_content):
'''
Iterates over every test in the list that will be returned from get_test_content and
throws an exception if any duplicate test names are detected. This is needed because
DVS-SC only knows about the test name, not the module + test name.
'''
seenTestNames = {}
for module in test_content:
for testObj in module[1]:
testName = testObj.__name__
if testName in seenTestNames:
raise Exception("Found duplicate test name %s in module %s. Change the name to something unique." % (testName, module[0]))
seenTestNames[testName] = True
def get_test_content():
'''
Searches for all modules with name "test*" and all functions with name "test*" in each module.
Returns list of pairs [(module, test functions in module), ...]
'''
# Get all test names
test_module_files = utils.find_files(os.path.join(utils.script_dir, test_directory), mask = "test*.py", recurse=True)
test_module_names = [os.path.splitext(os.path.relpath(fname, utils.script_dir))[0].replace(os.path.sep, ".") for fname in test_module_files]
test_module_names.sort()
# see help(__import__) for info on __import__ fromlist parameter
test_modules = [__import__(name,
fromlist=("non-empty list has a side effect of import loading the module.submodule instead of module"))
for name in test_module_names]
def test_functions_in_module(module):
attributes = dir(module)
attributes.sort()
for attr_name in attributes:
if not attr_name.startswith("test"):
continue
attr = getattr(module, attr_name)
if not inspect.isfunction(attr):
continue
if option_parser.options.filter_tests:
if option_parser.options.filter_tests.search(module.__name__ + "." + attr_name) is None:
# Skip tests that don't match provided filter test regex
continue
yield attr
test_content = [(module, list(test_functions_in_module(module))) for module in test_modules]
# split into 3 groups (some tests might show in two groups)
# split into run_first, normal and run_last
filter_run_first = lambda x: hasattr(x, "run_first") and x.run_first
filter_run_last = lambda x: hasattr(x, "run_last") and x.run_last
filter_run_normal = lambda x: not filter_run_first(x) and not filter_run_last(x)
test_content_first = [(module, list(filter(filter_run_first, test_funcs))) for (module, test_funcs) in test_content]
test_content_normal = [(module, list(filter(filter_run_normal, test_funcs))) for (module, test_funcs) in test_content]
test_content_last = [(module, list(filter(filter_run_last, test_funcs))) for (module, test_funcs) in test_content]
test_content = test_content_first + test_content_normal + test_content_last
# return modules with at least one test function
test_content = [x for x in test_content if x[1] != []]
#Check for duplicate test names
helper_check_for_duplicate_test_names(test_content)
return test_content
class TestSkipped(Exception):
pass
def skip_test(reason):
raise TestSkipped(reason)
def skip_test_notsupported(feature_name):
raise TestSkipped("Test runs only on devices that don't support %s." % feature_name)
def skip_test_supported(feature_name):
raise TestSkipped("Test runs only on devices that support %s." % feature_name)
class _RunAsUser(object):
"""
Switches euid, egid and groups to target_user and later restores the old settings.
"""
def __init__(self, target_user, reload_driver):
self._target_user = target_user
self._reload_driver = reload_driver
if utils.is_linux():
ids = utils.get_user_idinfo(target_user)
self._target_uid = ids.uid
self._target_gid = ids.gid
self._orig_uid = None
self._orig_gid = None
self._orig_user = None
else:
# on non-linux switching user is not supported
assert (self._target_user == "root") == utils.is_root()
def __enter__(self):
if utils.is_linux():
self._orig_uid = os.geteuid()
self._orig_gid = os.getegid()
self._orig_user = utils.get_name_by_uid(self._orig_uid)
if self._target_user == self._orig_user:
return # Nothing to do
logger.debug("Switching current user from %s (uid %d gid %d) to %s (uid %d gid %d)" %
(self._orig_user, self._orig_uid, self._orig_gid,
self._target_user, self._target_uid, self._target_gid))
logger.debug("Groups before: %s" % os.getgroups())
if os.geteuid() == 0:
# initgroups can be called only while effective user is root
# before seteuid effective user is root
os.initgroups(self._target_user, self._target_gid)
os.setegid(self._target_gid)
os.seteuid(self._target_uid)
if os.geteuid() == 0:
os.initgroups(self._target_user, self._target_gid)
os.setegid(self._target_gid)
logger.debug("Groups after: %s" % os.getgroups())
def __exit__(self, exception_type, exception, trace):
if utils.is_linux():
if self._target_user == self._orig_user:
return # Nothing to do
logger.debug("Switching back current user from %s (uid %d gid %d) to %s (uid %d gid %d)" %
(self._target_user, self._target_uid, self._target_gid,
self._orig_user, self._orig_uid, self._orig_gid))
logger.debug("Groups before: %s" % os.getgroups())
if os.geteuid() == 0:
os.initgroups(self._orig_user, self._orig_gid)
os.setegid(self._orig_gid)
os.seteuid(self._orig_uid)
if os.geteuid() == 0:
os.initgroups(self._orig_user, self._orig_gid)
os.setegid(self._orig_gid)
logger.debug("Groups after: %s" % os.getgroups())
class RunAsNonRoot(_RunAsUser):
"""
Switches euid to option_parser.options.non_root_user.
"""
def __init__(self, reload_driver=True):
non_root_user = option_parser.options.non_root_user
if not non_root_user and utils.is_linux() and not utils.is_root():
non_root_user = utils.get_name_by_uid(os.getuid())
super(RunAsNonRoot, self).__init__(non_root_user, reload_driver)
@classmethod
def is_supported(cls, skip_if_not_supported=False):
if not utils.is_root():
return True # if current user is non-root then running as non-root is supported
if not utils.is_linux():
if skip_if_not_supported:
skip_test("Changing user mid way is only supported on Linux")
return False
if not option_parser.options.non_root_user:
if skip_if_not_supported:
skip_test("Please run as non-root or as root with --non-root-user flag")
return False
return True
class RunAsRoot(_RunAsUser):
"""
Switches euid to root (possible only if real uid is root) useful e.g. when euid is non-root.
"""
def __init__(self, reload_driver=True):
super(RunAsRoot, self).__init__("root", reload_driver)
@classmethod
def is_supported(cls, skip_if_not_supported=False):
if utils.is_root():
return True # if current user is root then running as root is supported
if not utils.is_linux():
if skip_if_not_supported:
skip_test("Changing user mid way is only supported on Linux")
return False
if not utils.is_real_user_root():
if skip_if_not_supported:
skip_test("Run as root user.")
return False
return True
def tryRunAsNonRoot():
if RunAsNonRoot.is_supported():
return RunAsNonRoot()
return _DoNothingBlock()
def tryRunAsRoot():
if RunAsRoot.is_supported():
return RunAsRoot()
return _DoNothingBlock()
class SubTest(object):
_stack = [None]
_log = []
SUCCESS,SKIPPED,FAILED,FAILURE_LOGGED,NOT_CONNECTED = ("SUCCESS", "SKIPPED", "FAILED", "FAILURE_LOGGED", "NOT_CONNECTED")
ResultDetailsRaw = namedtuple("ResultDetailsRaw", "exception_type, exception, trace")
def __init__(self, name, quiet=False, supress_errors=True, disconnect_is_failure=True):
"""
Set quiet to True if you want the test to be removed from the logs if it succeeded.
Useful when test is minor and you don't want to clobber the output with minor tests.
"""
self.name = name
self.result = None
self.result_details = None
self.result_details_raw = None
self.parent = None
self.depth = None
self.subtests = []
self.quiet = quiet
self.stats = dict([(SubTest.SUCCESS, 0), (SubTest.SKIPPED, 0), (SubTest.FAILED, 0), (SubTest.FAILURE_LOGGED, 0), (SubTest.NOT_CONNECTED, 0)])
self.supress_errors = supress_errors
self.disconnect_is_failure = disconnect_is_failure
def __enter__(self):
self.parent = SubTest._stack[-1]
self.depth = len(SubTest._stack)
SubTest._stack.append(self)
SubTest._log.append(self)
if self.parent:
self.parent.subtests.append(self)
# pylint: disable=undefined-variable
progress_printer.subtest_start(self)
# returns the current subtest
return self
def __exit__(self, exception_type, exception, trace):
SubTest._stack.pop()
for subtest in self.subtests:
self.stats[SubTest.SUCCESS] += subtest.stats[SubTest.SUCCESS]
self.stats[SubTest.SKIPPED] += subtest.stats[SubTest.SKIPPED]
self.stats[SubTest.FAILED] += subtest.stats[SubTest.FAILED]
self.stats[SubTest.FAILURE_LOGGED] += subtest.stats[SubTest.FAILURE_LOGGED]
if exception is None:
self.result = SubTest.SUCCESS
elif isinstance(exception, TestSkipped):
self.result = SubTest.SKIPPED
elif isinstance(exception, KeyboardInterrupt):
self.result = SubTest.SKIPPED
elif isinstance(exception, dcgm_structs.dcgmExceptionClass(dcgm_structs.DCGM_ST_CONNECTION_NOT_VALID)):
if self.disconnect_is_failure:
self.result = SubTest.FAILED
else:
self.result = SubTest.NOT_CONNECTED
elif reRunning == True:
self.result = SubTest.FAILURE_LOGGED
else:
self.result = SubTest.FAILED
self.result_details = " ".join(traceback.format_exception(exception_type, exception, trace))
self.result_details_raw = SubTest.ResultDetailsRaw(exception_type, exception, trace)
self.stats[self.result] += 1
if self.quiet and self.result == SubTest.SUCCESS and self.subtests == []:
SubTest._log.remove(self)
if self.parent:
self.parent.subtests.remove(self)
# pylint: disable=undefined-variable
progress_printer.subtest_finish(self)
# terminate on KeyboardInterrupt exceptions
if isinstance(exception, KeyboardInterrupt):
return False
if self.result == SubTest.FAILED and option_parser.options.break_at_failure:
try:
import debugging
debugging.break_after_exception()
except ImportError:
logger.warning("Unable to find Python Debugging Module - \"-b\" option is unavailable")
return self.supress_errors
def __str__(self):
# traverse the entire path from node to parent
# to retrieve all the names of the subtests
path_to_parent = [self]
while path_to_parent[-1].parent:
path_to_parent.append(path_to_parent[-1].parent)
path_to_parent.reverse()
return "Test %s - %s" % ("::".join([s.name for s in path_to_parent]), self.result)
@staticmethod
def get_all_subtests():
return SubTest._log
class _IgnoreExceptions(object):
def __init__(self, dontignore=None):
"""
dontignore = optional argument, list of exception types that shouldn't be ignored
"""
self.dontignore = dontignore
def __enter__(self):
pass
def __exit__(self, exception_type, exception, trace):
if isinstance(exception, KeyboardInterrupt):
return False
if self.dontignore:
for ex in self.dontignore:
if isinstance(exception, ex):
return False
return True
class ExceptionAsWarning(object):
"""
Block wrapper used to "mark" known issues as warnings.
As reason pass a string with explanation (e.g. describe that issue is tracked in a bug X).
"""
def __init__(self, reason):
self.reason = reason
def __enter__(self):
pass
def __exit__(self, exception_type, exception, trace):
if isinstance(exception, KeyboardInterrupt):
return False
if isinstance(exception, TestSkipped):
return False
if exception:
logger.warning("Exception treated as warning: %s\nOriginal issue: %s" % (self.reason, str(exception)))
logger.debug(" ".join(traceback.format_exception(exception_type, exception, trace)))
return True
class _DoNothingBlock(object):
"""
Class that can be used in "with" statement that has completely NO effect.
Used as a fall back if some other class is not supported.
"""
def __enter__(self):
pass
def __exit__(self, exception_type, exception, trace):
pass
class RestoreDefaultEnvironment(object):
"""
Class that should be used in "with" clause. It stores some values before the block executes and
then restores the state to know predefined state after the block finishes (even if block returned with exceptions)
"""
def __init__(self):
pass
def __enter__(self):
return
def __exit__(self, exception_type, exception, trace):
logger.debug("Restoring default environment - START")
# Restore env variables
RestoreDefaultEnvironment.restore_env()
# Turn off all processes started by the test
apps.AppRunner.clean_all()
# with _IgnoreExceptions():
logger.debug("Restoring default environment - END")
@classmethod
def restore(cls):
"""
Restores environmental variables and NVML state to predefined default state.
e.g.
all device settings pending == current
persistence mode on
"""
with RestoreDefaultEnvironment():
pass
@classmethod
def restore_dev_node_permissions(cls):
if not utils.is_linux():
return # nothing to do
# make sure that current user can access /dev/nvidiactl, /dev/nvidia-uvm and the [0-9] nodes
with tryRunAsRoot():
for fname in utils.find_files("/dev/", "nvidia*"):
st_mode = os.stat(fname).st_mode
if st_mode & utils.stat_everyone_read_write != utils.stat_everyone_read_write:
try:
logger.warning("Device node %s permissions (%s) are not set as read/write for everyone (%s)."
" Framework will try to fix that" % (fname, bin(st_mode), bin(utils.stat_everyone_read_write)))
os.chmod(fname, st_mode | utils.stat_everyone_read_write)
except OSError:
logger.warning("Failed to change permission of %s. This might cause some failures down the line" % fname)
@classmethod
def restore_env(cls):
unset_envs = ['CUDA_VISIBLE_DEVICES', ]
for env in unset_envs:
if os.getenv(env) is not None:
if env == 'CUDA_VISIBLE_DEVICES':
# Save this for use in tests
os.environ['REQUESTED_CUDA_VISIBLE_DEVICES'] = os.environ['CUDA_VISIBLE_DEVICES']
os.environ['CUDA_DEVICE_ORDER'] = 'PCI_BUS_ID'
logger.warning("%s env is set (value: %s) and is about to be unset." % (env, os.getenv(env)))
os.unsetenv(env)
del os.environ[env]
warn_envs = []
for env in warn_envs:
if os.getenv(env) is not None:
logger.warning("%s is set (value: %s)" % (env, os.getenv(env)))
return True
knownWordDict = None
def _loadWordList():
global knownWordDict
if knownWordDict is None:
with open('./data/wordlist', 'r') as f:
knownWordDict = dict((s.strip().lower(), True) for s in f.readlines())
def check_spelling(text):
_loadWordList()
global knownWordDict
# split into words, remove special characters
text = text.translate(None, '0123456789%*$[]()<>\"\'|')
tokens = re.split(' |\t|\n|-|_|/|:|\.|=|\?|\!|,', text)
words = [ s.strip().lower() for s in tokens ]
unknownWords = []
for word in words:
if word not in knownWordDict:
unknownWords.append(word)
assert 0 == len(unknownWords), "Unknown words: " + str(unknownWords)
def _busIdRemoveDomain(busId):
return ":".join(string.split(busId, ':')[-2:])
class RunCudaAppInBackGround:
"""
This class is used as part of "with" clause. It creates a CUDA app leading to GPU utilization and
memory usage. Starts the app for the specified time.
Usage:
with RunCudaAppInBackGround(busId, timeInMilliSeconds):
# Code to run when the app is running
# Cuda app is terminated
"""
def __init__(self, busId, timeToRun):
'''
Initializes cuda context
'''
#self.busId = _busIdRemoveDomain(busId)
self.busId = busId
self.timeToRun = timeToRun
#self.app = apps.CudaCtxCreateAdvancedApp(["--ctxCreate", self.busId, "--busyGpu", self.busId, timeToRun, "--getchar"])
self.app = apps.CudaCtxCreateAdvancedApp(["--ctxCreate", self.busId, "--busyGpu", self.busId, self.timeToRun])
def __enter__(self):
'''
Runs the CUDA app for the specified amount of time
'''
## Start the app and change the default timeout (in secs)
self.app.start(timeout=apps.default_timeout + float(self.timeToRun)/1000.0)
self.app.stdout_readtillmatch(lambda s: s.find("Calling cuInit") != -1)
def __exit__(self, exception_type, exception, trace):
'''
Wait for completion of CUDA app
'''
self.app.wait()
self.app.validate()
"""
Helper functions for setting/getting connection mode. These are needed for other helpers to know
if we are in embedded/remote mode
"""
DCGM_CONNECT_MODE_UNKNOWN = 0 #Not connected
DCGM_CONNECT_MODE_EMBEDDED = 1 #Connected to an embedded host engine
DCGM_CONNECT_MODE_REMOTE = 2 #Connected to a remote host engine. Note that this doesn't guarantee a tcp connection, just that the HE process is running
def set_connect_mode(connectMode):
global dcgm_connect_mode
dcgm_connect_mode = connectMode
def get_connect_mode():
global dcgm_connect_mode
return dcgm_connect_mode
class RunEmbeddedHostEngine:
"""
This class is used as part of a "with" clause to start and stop an embedded host engine
"""
def __init__(self, opmode=dcgm_structs.DCGM_OPERATION_MODE_AUTO, startTcpServer=False):
self.hostEngineStarted = False
self.opmode = opmode
self.handle = None
self.startTcpServer = startTcpServer
if option_parser.options.use_running_hostengine:
skip_test("Skipping embedded test due to option --use-running-hostengine")
def __enter__(self):
dcgm_agent.dcgmInit() #Will throw an exception on error
self.handle = dcgm_agent.dcgmStartEmbedded(self.opmode)
logger.info("embedded host engine started")
self.hostEngineStarted = True
if self.startTcpServer:
dcgm_agent_internal.dcgmServerRun(5555, '127.0.0.1', 1)
self.handle = dcgm_agent.dcgmConnect('127.0.0.1:5555')
logger.info("Started TCP server")
set_connect_mode(DCGM_CONNECT_MODE_EMBEDDED)
return self.handle
def __exit__(self, exception_type, exception, trace):
if self.hostEngineStarted:
logger.info("Stopping embedded host engine")
try:
dcgm_agent.dcgmShutdown()
except dcgm_structs.dcgmExceptionClass(dcgm_structs.DCGM_ST_UNINITIALIZED):
logger.info("embedded host engine was already stopped")
self.hostEngineStarted = False
else:
logger.info("Skipping dcgmEngineShutdown. Host engine was not running")
set_connect_mode(DCGM_CONNECT_MODE_UNKNOWN)
def run_with_embedded_host_engine(opmode=dcgm_structs.DCGM_OPERATION_MODE_AUTO, startTcpServer=False):
"""
Run this test with an embedded host engine. This will start the host engine before the test
and stop the host engine after the test
"""
def decorator(fn):
@wraps(fn)
def wrapper(*args, **kwds):
with RunEmbeddedHostEngine(opmode=opmode, startTcpServer=startTcpServer) as handle:
kwds['handle'] = handle
fn(*args, **kwds)
return
return wrapper
return decorator
class RunStandaloneHostEngine:
"""
This class is used as part of a "with" clause to start and stop an standalone host engine process
"""
_nvswitches_detected = None
def __init__(self, timeout=15, heArgs = None, profile_dir=None, heEnv=None): #DCGM_HE_PORT_NUMBER
self.hostEngineStarted = False
self.timeout = timeout
if option_parser.options.use_running_hostengine:
self.nvhost_engine = None
elif heArgs is None:
self.nvhost_engine = apps.NvHostEngineApp(profile_dir=profile_dir, heEnv=heEnv)
else:
self.nvhost_engine = apps.NvHostEngineApp(heArgs, profile_dir=profile_dir, heEnv=heEnv)
def __enter__(self):
if self.nvhost_engine is not None:
self.nvhost_engine.start(self.timeout)
assert self.nvhost_engine.getpid() != None, "start hostengine failed"
logger.info("standalone host engine started with pid %d" % self.nvhost_engine.getpid())
self.hostEngineStarted = True
set_connect_mode(DCGM_CONNECT_MODE_REMOTE)
return self.nvhost_engine
def __exit__(self, exception_type, exception, trace):
if self.nvhost_engine is not None:
if self.hostEngineStarted:
if self.nvhost_engine.poll() is None:
logger.info("Stopping standalone host engine")
self.nvhost_engine.terminate()
self.nvhost_engine.validate()
self.hostEngineStarted = False
else:
logger.info("Skipping standalone host engine terminate. Host engine was not running")
set_connect_mode(DCGM_CONNECT_MODE_UNKNOWN)
def run_with_standalone_host_engine(timeout=15, heArgs=None, passAppAsArg=False, heEnv=None):
"""
Run this test with the standalone host engine. This will start the host engine process before the test
and stop the host engine process after the test
"""
def decorator(fn):
@wraps(fn)
def wrapper(*args, **kwds):
with RunStandaloneHostEngine(timeout, heArgs, profile_dir=fn.__name__, heEnv=heEnv) as hostengineApp:
# pass the hostengine app to the test function in case they want to interact with it
if passAppAsArg:
kwds['hostengineApp'] = hostengineApp
fn(*args, **kwds)
return
return wrapper
return decorator
INJECTION_MODE_VAR = 'NVML_INJECTION_MODE'
def run_with_injection_nvml():
"""
Have DCGM load injection NVML instead of normal NVML
"""
def decorator(fn):
@wraps(fn)
def wrapper(*args, **kwds):
# This environment variable tells DCGM to load injection NVML
os.environ[INJECTION_MODE_VAR] = 'True'
fn(*args, **kwds)
del os.environ[INJECTION_MODE_VAR]
return
return wrapper
return decorator
def run_with_diag_small_fb_mode():
"""
Have DCGM diag run with smaller FB allocations to speed up tests that don't rely on DCGM Diag running at full scale
"""
def decorator(fn):
@wraps(fn)
def wrapper(*args, **kwds):
# This environment variable tells DCGM to load injection NVML
os.environ[DIAG_SMALL_FB_MODE_VAR] = '1'
fn(*args, **kwds)
del os.environ[DIAG_SMALL_FB_MODE_VAR]
return
return wrapper
return decorator
def create_injection_nvml_gpus(dcgmHandle, count):
index = 0
created_indices = []
while len(created_indices) < count and index < dcgm_structs.DCGM_MAX_NUM_DEVICES:
try:
ret = dcgm_agent_internal.dcgmCreateNvmlInjectionGpu(dcgmHandle, index)
# We'll use the if statement, but really it throws an exception if it fails
if ret == dcgm_structs.DCGM_ST_OK:
created_indices.append(index)
index = index + 1
except Exception as e:
index = index + 1
return created_indices
class RunClientInitShutdown:
"""
This class is used as part of a "with" clause to initialize and shutdown the client API
"""
def __init__(self, pIpAddr = "127.0.0.1", persistAfterDisconnect=False):
self.clientAPIStarted = False
self.dcgm_handle = None
self.ipAddress = pIpAddr
self.persistAfterDisconnect = persistAfterDisconnect
def __enter__(self):
connectParams = dcgm_structs.c_dcgmConnectV2Params_v1()
if self.persistAfterDisconnect:
connectParams.persistAfterDisconnect = 1
else:
connectParams.persistAfterDisconnect = 0
dcgm_agent.dcgmInit()
for attempt in range(3):
try:
self.dcgm_handle = dcgm_agent.dcgmConnect_v2(self.ipAddress, connectParams)
except dcgm_structs.dcgmExceptionClass(dcgm_structs.DCGM_ST_CONNECTION_NOT_VALID):
pass
else:
break
if not self.dcgm_handle:
raise Exception('failed connection to dcgm hostengine')
self.clientAPIStarted = True
return self.dcgm_handle
def __exit__(self, exception_type, exception, trace):
if self.clientAPIStarted:
try:
dcgm_agent.dcgmShutdown()
except dcgmExceptionClass(DCGM_ST_INIT_ERROR):
logger.info("Client API is already shut down")
self.clientAPIStarted = False
def run_with_initialized_client(ipAddress = "127.0.0.1"):
"""
Run test having called client init and then call shutdown on exit
"""
def decorator(fn):
@wraps(fn)
def wrapper(*args, **kwds):
with RunClientInitShutdown(ipAddress) as handle:
kwds['handle'] = handle
fn(*args, **kwds)
return
return wrapper
return decorator
def get_live_gpu_ids(handle):
"""
Get the gpu ids of live GPUs on the system. This works in embedded or remote mode
"""
gpuIdList = []
dcgmHandle = pydcgm.DcgmHandle(handle=handle)
dcgmSystem = dcgmHandle.GetSystem()
gpuIdList = dcgmSystem.discovery.GetAllSupportedGpuIds()
return gpuIdList
def get_live_gpu_count(handle):
return len(get_live_gpu_ids(handle))
def run_only_with_live_gpus():
"""
Only run this test if live gpus are present on the system
"""
def decorator(fn):
@wraps(fn)
def wrapper(*args, **kwds):
if 'handle' in kwds:
gpuIds = get_live_gpu_ids(kwds['handle'])
else:
raise Exception("Not connected to remote or embedded host engine. Use appropriate decorator")
if len(gpuIds) < 1:
logger.warning("Skipping test that requires live GPUs. None were found")
else:
kwds['gpuIds'] = gpuIds
fn(*args, **kwds)
return
return wrapper
return decorator
def run_with_injection_gpus(gpuCount=1):
"""
Run this test with injection-only GPUs x gpuCount
"""
def decorator(fn):
@wraps(fn)
def wrapper(*args, **kwds):
if 'handle' not in kwds:
raise Exception("Not connected to remote or embedded host engine. Use approriate decorator")
numGpus = len(dcgm_agent.dcgmGetAllDevices(kwds['handle']))
if numGpus + gpuCount >= dcgm_structs.DCGM_MAX_NUM_DEVICES:
skip_test("unable to add fake Gpu with more than %d gpus" % dcgm_structs.DCGM_MAX_NUM_DEVICES)
cfe = dcgm_structs_internal.c_dcgmCreateFakeEntities_v2()
cfe.numToCreate = gpuCount
logger.info("Injecting %u fake GPUs" % (gpuCount))
for i in range(0, gpuCount):
cfe.entityList[i].entity.entityGroupId = dcgm_fields.DCGM_FE_GPU
updated = dcgm_agent_internal.dcgmCreateFakeEntities(kwds['handle'], cfe)
gpuIds = []
for i in range(0, updated.numToCreate):
gpuIds.append(updated.entityList[i].entity.entityId)
kwds['gpuIds'] = gpuIds
fn(*args, **kwds)
return
return wrapper
return decorator
def run_with_injection_gpu_instances(totalInstances=1):
"""
Run this test with injection-only <totalInstances> GPU instances
This does not inject hierarchy now but should in the future
"""
def decorator(fn):
@wraps(fn)
def wrapper(*args, **kwds):
if 'handle' not in kwds:
raise Exception("Not connected to remote or embedded host engine. Use approriate decorator")
if 'gpuIds' not in kwds:
raise Exception("Injected instances require GPU IDs")
gpuIds = kwds['gpuIds']
numGpus = len(dcgm_agent.dcgmGetAllDevices(kwds['handle']))
cfe = dcgm_structs_internal.c_dcgmCreateFakeEntities_v2()
cfe.numToCreate = totalInstances
for i in range(0, totalInstances):
cfe.entityList[i].entity.entityGroupId = dcgm_fields.DCGM_FE_GPU_I
# Set the parent to the first GPu in the test
cfe.entityList[i].parent.entityGroupId = dcgm_fields.DCGM_FE_GPU
cfe.entityList[i].parent.entityId = gpuIds[0]
updated = dcgm_agent_internal.dcgmCreateFakeEntities(kwds['handle'], cfe)
instanceIds = []
for i in range(0, updated.numToCreate):
instanceIds.append(updated.entityList[i].entity.entityId)
kwds['instanceIds'] = instanceIds
fn(*args, **kwds)
return
return wrapper
return decorator
def run_with_injection_gpu_compute_instances(totalCIs=1):
"""
Run this test with <totalCIs> compute instances
This does not inject hierarchy now but should in the future
"""
def decorator(fn):
@wraps(fn)
def wrapper(*args, **kwds):
if 'handle' not in kwds:
raise Exception("Not connected to remote or embedded host engine. Use appropriate decorator")
if 'instanceIds' not in kwds:
raise Exception("Injected CIs require instance IDs")
instanceIds = kwds['instanceIds']
numGpus = len(dcgm_agent.dcgmGetAllDevices(kwds['handle']))
cfe = dcgm_structs_internal.c_dcgmCreateFakeEntities_v2()
cfe.numToCreate = totalCIs
instanceIndex = 0
numInstances = len(instanceIds)
for i in range(0, totalCIs):
cfe.entityList[i].entity.entityGroupId = dcgm_fields.DCGM_FE_GPU_CI
# Set the parents so that this compute instance has a parent that is part of the test
cfe.entityList[i].parent.entityGroupId = dcgm_fields.DCGM_FE_GPU_I
cfe.entityList[i].parent.entityId = instanceIds[instanceIndex]
#Make sure we increment the instanceIndex correctly: same if there's just one and wrap the increment
#if there are more compute instances than instances
if numInstances > 1:
instanceIndex = instanceIndex + 1
if numInstances < totalCIs and instanceIndex == numInstances:
instanceIndex = 0
updated = dcgm_agent_internal.dcgmCreateFakeEntities(kwds['handle'], cfe)
ciIds = []
for i in range(0, updated.numToCreate):
ciIds.append(updated.entityList[i].entity.entityId)
kwds['ciIds'] = ciIds
fn(*args, **kwds)
return
return wrapper
return decorator
# Long timeout by default, but the app is terminated once the test has concluded
def run_with_cuda_app(timeout=10000):
def decorator(fn):
@wraps(fn)
def wrapper(*args, **kwds):
if 'handle' not in kwds:
raise Exception("Not connected to remote or embedded host engine. Use appropriate decorator")
if 'gpuIds' not in kwds or len(kwds['gpuIds']) < 1:
raise Exception("cuda_app requires GPU ID")
handle = kwds['handle']
gpuId = kwds['gpuIds'][0]
updateFreq = 1000000
maxKeepAge = 3600.0 #one hour
maxKeepEntries = 0 #no limit
fieldId = dcgm_fields.DCGM_FI_DEV_PCI_BUSID
dcgm_agent_internal.dcgmWatchFieldValue(handle, gpuId, fieldId, updateFreq, maxKeepAge, maxKeepEntries)
dcgm_agent.dcgmUpdateAllFields(handle, 1)
values = dcgm_agent_internal.dcgmGetLatestValuesForFields(handle, gpuId, [fieldId,])
busId = values[0].value.str
params = [ "--ctxCreate", busId,
"--busyGpu", busId, str(timeout),
"--ctxDestroy", busId ]
app = apps.CudaCtxCreateAdvancedApp(params, env=get_cuda_visible_devices_env(handle, gpuId))
app.start(timeout * 2)
kwds['cudaApp'] = app
fn(*args, **kwds)
app.terminate()
return
return wrapper
return decorator
def get_live_nvswitch_ids(handle):
"""
Get the entityIds of live NvSwitches on the system. This works in embedded or remote mode
"""
entityIdList = []
try:
dcgmHandle = pydcgm.DcgmHandle(handle=handle)
dcgmSystem = dcgmHandle.GetSystem()
entityIdList = dcgmSystem.discovery.GetEntityGroupEntities(dcgm_fields.DCGM_FE_SWITCH, True)
except dcgm_structs.DCGMError as e:
raise Exception("Not connected to remote or embedded host engine. Use appropriate decorator")
return entityIdList
def get_live_nvswitch_count(handle):
return len(get_live_nvswitch_ids(handle))
def run_only_with_live_nvswitches():
"""
Only run this test if live nvswitches are present on the system
"""
def decorator(fn):
@wraps(fn)
def wrapper(*args, **kwds):
if 'handle' in kwds:
entityIdList = get_live_nvswitch_ids(kwds['handle'])
else:
raise Exception("Not connected to remote or embedded host engine. Use appropriate decorator")
if len(entityIdList) < 1:
logger.warning("Skipping test that requires live NV Switches. None were found")
else:
kwds['switchIds'] = entityIdList
fn(*args, **kwds)
return
return wrapper
return decorator
def run_with_injection_nvswitches(switchCount=1):
"""
Run this test with injection-only NvSwitches x switchCount
"""
def decorator(fn):
@wraps(fn)
def wrapper(*args, **kwds):
if 'handle' not in kwds:
raise Exception("Not connected to remote or embedded host engine. Use approriate decorator")
numActiveSwitches = len(dcgm_agent.dcgmGetEntityGroupEntities(kwds['handle'], dcgm_fields.DCGM_FE_SWITCH, 0))
if numActiveSwitches + switchCount >= dcgm_structs.DCGM_MAX_NUM_SWITCHES:
skip_test("unable to add fake NvSwitch with more than %d NvSwitches" % dcgm_structs.DCGM_MAX_NUM_SWITCHES)
cfe = dcgm_structs_internal.c_dcgmCreateFakeEntities_v2()
cfe.numToCreate = switchCount
for i in range(0, switchCount):
cfe.entityList[i].entity.entityGroupId = dcgm_fields.DCGM_FE_SWITCH
updated = dcgm_agent_internal.dcgmCreateFakeEntities(kwds['handle'], cfe)
switchIds = []
for i in range(0, updated.numToCreate):
switchIds.append(updated.entityList[i].entity.entityId)
kwds['switchIds'] = switchIds
fn(*args, **kwds)
return
return wrapper
return decorator
def skip_unhealthy_mem(handle, gpuIds):
"""
Verifies that the DCGM health checks return healthy for all GPUs on live systems.
"""
handleObj = pydcgm.DcgmHandle(handle=handle)
systemObj = handleObj.GetSystem()
groupObj = systemObj.GetGroupWithGpuIds('testgroup', gpuIds)
groupObj.health.Set(dcgm_structs.DCGM_HEALTH_WATCH_MEM)
systemObj.UpdateAllFields(1)
responseV4 = groupObj.health.Check(dcgm_structs.dcgmHealthResponse_version4)
#Check that our response comes back clean
if responseV4.overallHealth != dcgm_structs.DCGM_HEALTH_RESULT_PASS:
test_utils.skip_test("bad response.overallHealth %d. Are these GPUs really healthy?" % responseV4.overallHealth)
def watch_all_fields(handle,
gpuIds,
updateFreq=1000, # 1ms
maxKeepAge=86400.0,
maxKeepEntries=1000,
startTimestamp=0):
'''
Watch every field in DCGM and return a list of the fields that are watched.
This also calls to make sure that the watched fields are updated at least once
before returning.
'''
watchedFields = set()
mig_enabled = is_mig_mode_enabled()
for gpuId in gpuIds:
for fieldId in range(1, dcgm_fields.DCGM_FI_MAX_FIELDS):
# Accounting cannot be enabled for MIG mode: CUDANVML-153
if mig_enabled and fieldId == dcgm_fields.DCGM_FI_DEV_ACCOUNTING_DATA:
continue
# can't tell ahead of time which field Ids are valid from the python API so we must try/except watching
try:
dcgm_agent_internal.dcgmWatchFieldValue(handle,
gpuId=gpuId,
fieldId=fieldId,
updateFreq=updateFreq,
maxKeepAge=maxKeepAge,
maxKeepEntries=maxKeepEntries)
except dcgm_structs.dcgmExceptionClass(dcgm_structs.DCGM_ST_REQUIRES_ROOT):
pass
except dcgm_structs.dcgmExceptionClass(dcgm_structs.DCGM_ST_BADPARAM):
pass
else:
watchedFields.add(fieldId)
dcgm_agent.dcgmUpdateAllFields(handle, True)
return watchedFields
def set_logging_state(enableLogging):
'''
Helper function to enable or disable logging. Call restore_logging_state() to
undo this call
'''
global noLogging, noLoggingBackup
noLoggingDesired = not enableLogging
#is our logging state already what we wnat?
if noLoggingDesired == noLogging:
return
noLogging = noLoggingDesired
logger.setup_environment()
def restore_logging_state():
#Restore the state of logging to what it was before set_logging_state()
global noLogging, noLoggingBackup
if noLogging == noLoggingBackup:
return
noLogging = noLoggingBackup
logger.setup_environment()
def run_subtest(subtestFn, *args, **kwargs):
#List that contains failings test to re-run with logging enabled
global noLogging
global reRunning
#Work around a race condition where the test framework can't connect to
#the host engine right away. See bug 200417787 for details.
maxDisconnectedRetries = 3
for retryCount in range(maxDisconnectedRetries+1):
if retryCount > 0:
logger.info("Retrying test %s time %d/%d due to not being connected to the host engine. War for bug 200417787" %
(subtestFn.__name__, retryCount, maxDisconnectedRetries))
disconnect_is_failure = False
if retryCount == maxDisconnectedRetries:
disconnect_is_failure = True #Fail if disconnected on the last retry
with SubTest("%s" % (subtestFn.__name__), disconnect_is_failure=disconnect_is_failure) as subtest:
subtestFn(*args, **kwargs)
if subtest.result != SubTest.NOT_CONNECTED:
break #Passed/failed for another reason. Break out of the loop
if subtest.result == SubTest.FAILED:
#Running failing tests with logging enabled
set_logging_state(True)
reRunning = True
logger.warning("Re-running failing test \"%s\" with logging enabled" % subtest.name)
with SubTest("%s" % (subtestFn.__name__)) as subtest:
subtestFn(*args, **kwargs)
restore_logging_state()
reRunning = False
def group_gpu_ids_by_sku(handle, gpuIds):
'''
Return a list of lists where the 2nd level list is each gpuId that is the same sku as each other
Example [[gpu0, gpu1], [gpu2, gpu3]]
In the above example, gpu0 and gpu1 are the same sku, and gpu2 and gpu3 are the same sku
'''
skuGpuLists = {}
for gpuId in gpuIds:
deviceAttrib = dcgm_agent.dcgmGetDeviceAttributes(handle, gpuId)
pciDeviceId = deviceAttrib.identifiers.pciDeviceId
if pciDeviceId in skuGpuLists:
skuGpuLists[pciDeviceId].append(gpuId)
else:
skuGpuLists[pciDeviceId] = [gpuId, ]
retList = []
for k in list(skuGpuLists.keys()):
retList.append(skuGpuLists[k])
#logger.info("skuGpuLists: %s, retList %s" % (str(skuGpuLists), str(retList)))
return retList
def exclude_non_compute_gpus():
'''
Exclude non-display GPUs on, for example, RedOctober (Nvidia T1000).
This decorator must come after a decorator that provides a list of gpuIds like run_only_with_live_gpus
'''
def decorator(fn):
@wraps(fn)
def wrapper(*args, **kwds):
gpuIds = []
for gpuId in kwds['gpuIds']:
deviceId = get_device_id(kwds['handle'], gpuId)
'''
Exclude non-compute GPUs here. Right now this is just
Nvidia T1000 GPU.
'''
if deviceId != 0x1fb0:
gpuIds.append(gpuId)
kwds['gpuIds'] = gpuIds
fn(*args, **kwds)
return
return wrapper
return decorator
def exclude_confidential_compute_gpus():
'''
Exclude Confidential Compute GPUs.
This decorator must come after a decorator that provides a list of gpuIds
like run_only_with_live_gpus. It may exclude ALL of them, and this should
be tested for in an other decorator.
'''
def decorator(fn):
@wraps(fn)
def wrapper(*args, **kwds):
gpuIds = []
for gpuId in kwds['gpuIds']:
deviceAttrib = dcgm_agent.dcgmGetDeviceAttributes(kwds['handle'], gpuId)
if deviceAttrib.settings.confidentialComputeMode == 0:
gpuIds.append(gpuId)
if len(gpuIds) == 0:
logger.warning("All selected GPUs have the confidential compute mode enabled, which is not supported by this test. Therefore, all GPUs are excluded from the test.")
kwds['gpuIds'] = gpuIds
fn(*args, **kwds)
return
return wrapper
return decorator
def run_only_if_gpus_available():
'''
Decorator to skip tests if kwds['gpuIds'] is missing or empty.
'''
def decorator(fn):
@wraps(fn)
def wrapper(*args, **kwds):
if 'gpuIds' in kwds and len(kwds['gpuIds']) > 0:
result = fn(*args, **kwds)
else:
skip_test("this test does nothing if no GPUs are available")
return wrapper
return decorator
def for_all_same_sku_gpus():
'''
Run a test multiple times, passing a list of gpuIds that are the same SKU each time
This decorator must come after a decorator that provides a list of gpuIds like run_only_with_live_gpus
'''
def decorator(fn):
@wraps(fn)
def wrapper(*args, **kwds):
gpuGroupList = group_gpu_ids_by_sku(kwds['handle'], kwds['gpuIds'])
for i, gpuIdList in enumerate(gpuGroupList):
with SubTest("GPU group %d. gpuIds: %s" % (i, str(gpuIdList))):
kwds2 = kwds
kwds2['gpuIds'] = gpuIdList
fn(*args, **kwds2)
return
return wrapper
return decorator
def set_max_power_limit(handle, gpuIds):
handleObj = pydcgm.DcgmHandle(handle=handle)
systemObj = handleObj.GetSystem()
groupObj = systemObj.GetEmptyGroup("test1")
for gpuId in gpuIds:
## Add first GPU to the group
groupObj.AddGpu(gpuId)
## Get Min and Max Power limit on the group
attributes = systemObj.discovery.GetGpuAttributes(gpuId)
## Verify that power is supported on the GPUs in the group
if dcgmvalue.DCGM_INT32_IS_BLANK(attributes.powerLimits.maxPowerLimit):
skip_test("Needs Power limit to be supported on the GPU")
##Get the max Power Limit for the GPU
maxPowerLimit = attributes.powerLimits.maxPowerLimit
config_values = dcgm_structs.c_dcgmDeviceConfig_v1()
config_values.mEccMode = dcgmvalue.DCGM_INT32_BLANK
config_values.mPerfState.syncBoost = dcgmvalue.DCGM_INT32_BLANK
config_values.mPerfState.targetClocks.memClock = dcgmvalue.DCGM_INT32_BLANK
config_values.mPerfState.targetClocks.smClock = dcgmvalue.DCGM_INT32_BLANK
config_values.mComputeMode = dcgmvalue.DCGM_INT32_BLANK
config_values.mPowerLimit.type = dcgm_structs.DCGM_CONFIG_POWER_CAP_INDIVIDUAL
config_values.mPowerLimit.val = maxPowerLimit
##Set the max Power Limit for the group
groupObj.config.Set(config_values)
##Remove the GPU from the group
groupObj.RemoveGpu(gpuId)
groupObj.Delete()
def run_with_max_power_limit_set():
'''
Sets the power limit of all the GPUs in the list to the max Power Limit.
'''
def decorator(fn):
@wraps(fn)
def wrapper(*args, **kwds):
set_max_power_limit(kwds['handle'], kwds['gpuIds'])
fn(*args, **kwds)
return
return wrapper
return decorator
def log_gpu_information(handle):
'''
Log information about the GPUs that DCGM is going to run against
Returns: Number of DCGM-supported GPUs in the system
'''
dcgmHandle = pydcgm.DcgmHandle(handle=handle)
dcgmSystem = dcgmHandle.GetSystem()
allGpuIds = dcgmSystem.discovery.GetAllGpuIds()
allDcgmGpuIds = dcgmSystem.discovery.GetAllSupportedGpuIds()
logger.info("All GPU IDs: %s" % str(allGpuIds))
logger.info("DCGM-Supported GPU IDs: %s" % str(allDcgmGpuIds))
logger.info("GPU Info:")
for gpuId in allGpuIds:
gpuAttrib = dcgmSystem.discovery.GetGpuAttributes(gpuId)
logger.info("gpuId %d, name %s, pciBusId %s" % (gpuId, gpuAttrib.identifiers.deviceName, gpuAttrib.identifiers.pciBusId))
log_instance_information(dcgmHandle.handle)
return len(allDcgmGpuIds)
def log_instance_information(handle):
hierarchy = dcgm_agent.dcgmGetGpuInstanceHierarchy(handle)
# See Query.cpp::TopologicalSort for sorting details
sorted(hierarchy.entityList, key=lambda id: (id.info.nvmlGpuIndex, id.info.nvmlInstanceId, id.entity.entityGroupId, id.info.nvmlComputeInstanceId))
for i in range(0, hierarchy.count):
entity = hierarchy.entityList[i];
entityId = entity.entity.entityId;
if entity.entity.entityGroupId == dcgm_fields.DCGM_FE_GPU_I:
logger.info("GPU[%d] GI[%d] entityId %d" % (entity.info.nvmlGpuIndex, entity.info.nvmlInstanceId, entityId))
if entity.entity.entityGroupId == dcgm_fields.DCGM_FE_GPU_CI:
logger.info("GPU[%d] GI[%d] CI[%d] entityId %d" % (entity.info.nvmlGpuIndex, entity.info.nvmlInstanceId, entity.info.nvmlComputeInstanceId, entityId))
def are_all_gpus_dcgm_supported(handle=None):
# type: (pydcgm.DcgmHandle) -> (bool, list[int])
"""
Determines if there are DCGM Supported GPUs
:param handle: DCGM handle or None
:return: Tuple of bool and list of ids. If all GPUs are supported then result is (True, [list of GPU ids]),
otherwise that is (False, None)
"""
dcgmHandle = pydcgm.DcgmHandle(handle=handle)
dcgmSystem = dcgmHandle.GetSystem()
allGpuIds = dcgmSystem.discovery.GetAllGpuIds()
allDcgmGpuIds = dcgmSystem.discovery.GetAllSupportedGpuIds()
if allGpuIds != allDcgmGpuIds:
return False, None
else:
return True, allDcgmGpuIds
def run_only_with_all_supported_gpus():
"""
This decorator skips a test if allGpus != supportedGpus.
This decorator provides gpuIds list of live GPUs to the wrapped function
"""
def decorator(fn):
@wraps(fn)
def wrapper(*args, **kwds):
(all_gpus_supported, gpu_ids) = are_all_gpus_dcgm_supported(kwds.get('handle', None))
if not all_gpus_supported:
skip_test("Unsupported GPU(s) detected, skipping test")
else:
if len(gpu_ids) < 1:
logger.warning("Skipping test that requires live GPUs. None were found")
else:
kwds['gpuIds'] = gpu_ids
fn(*args, **kwds)
return
return wrapper
return decorator
def get_device_names(gpu_ids, handle=None):
dcgm_handle = pydcgm.DcgmHandle(handle=handle)
dcgm_system = dcgm_handle.GetSystem()
for gpuId in gpu_ids:
attributes = dcgm_system.discovery.GetGpuAttributes(gpuId)
yield (str(attributes.identifiers.deviceName).lower(), gpuId)
def skip_denylisted_gpus(denylist=None):
"""
This decorator gets gpuIds list and excludes GPUs which names are on the denylist
:type denylist: [string]
:return: decorated function
"""
if denylist is None:
denylist = {}
else:
denylist = {b.lower() for b in denylist}
def decorator(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
if (denylist is not None) and ('gpuIds' in kwargs):
gpu_ids = kwargs['gpuIds']
passed_ids = []
for gpuName, gpuId in get_device_names(gpu_ids=gpu_ids, handle=kwargs.get('handle', None)):
if gpuName not in denylist:
passed_ids.append(gpuId)
else:
logger.info(
"GPU %s (id: %d) is on the denylist; it can't participate in the test." % (gpuName, gpuId))
kwargs['gpuIds'] = passed_ids
fn(*args, **kwargs)
return
return wrapper
return decorator
def run_with_developer_mode(msg="Use developer mode to enable this test."):
"""
Run test only when developer mode is set.
"""
def decorator(fn):
@wraps(fn)
def wrapper(*args, **kwds):
if not option_parser.options.developer_mode:
skip_test(msg)
fn(*args, **kwds)
return
return wrapper
return decorator
def are_any_nvlinks_down(handle):
handleObj = pydcgm.DcgmHandle(handle=handle)
systemObj = handleObj.GetSystem()
#Will throw an exception on API error
linkStatus = systemObj.discovery.GetNvLinkLinkStatus()
#Further sanity checks
for i in range(linkStatus.numGpus):
for j in range(dcgm_structs.DCGM_NVLINK_MAX_LINKS_PER_GPU):
ls = linkStatus.gpus[i].linkState[j]
if ls == dcgm_structs.DcgmNvLinkLinkStateDown:
return True
for i in range(linkStatus.numNvSwitches):
for j in range(dcgm_structs.DCGM_NVLINK_MAX_LINKS_PER_NVSWITCH):
ls = linkStatus.nvSwitches[i].linkState[j]
if ls == dcgm_structs.DcgmNvLinkLinkStateDown:
return True
return False
def skip_test_if_any_nvlinks_down(handle):
if are_any_nvlinks_down(handle):
skip_test("Skipping test due to a NvLink being down")
def is_nvidia_fabricmanager_running():
"""
Return True if nvidia-fabricmanager service is running on the system
"""
cmd = 'systemctl status nvidia-fabricmanager'
p = Popen(cmd, shell=True, stdout=PIPE, stderr=PIPE)
out_buf, _ = p.communicate()
out = out_buf.decode('utf-8')
if "running" in out.rstrip():
return True
else:
return False
def get_build_type():
"""
Return the build type: Debug or Release
"""
rawVersionInfo = dcgm_agent.dcgmVersionInfo()
for kv in str.split(rawVersionInfo.rawBuildInfoString, ';'):
if (kv):
key, value = str.split(kv, ':', 1)
if key == "buildtype":
return value
return ""
def is_framework_compatible():
"""
Checks whether the Test Framework is using the expected build version DCGM
"""
#initialize the DCGM library globally ONCE
try:
dcgm_structs._dcgmInit(utils.get_testing_framework_library_path())
except dcgm_structs.dcgmExceptionClass(dcgm_structs.DCGM_ST_LIBRARY_NOT_FOUND):
print("DCGM Library hasn't been found in the system, is the DCGM package correctly installed?", file=sys.stderr)
sys.exit(1)
rawVersionInfo = dcgm_agent.dcgmVersionInfo()
versionInfo = {}
for kv in str.split(rawVersionInfo.rawBuildInfoString, ';'):
if (kv):
key, value = str.split(kv, ':', 1)
versionInfo[key] = value
def validate_build_info(key, expected):
if key not in versionInfo:
logger.warning("Key %s was expected but not provided in the build info" % key)
return False
if versionInfo[key] != expected:
logger.warning(
"{key} equals to '{value}', but the test framework was built with {expected} instead".format(
key=key,
value=versionInfo[key],
expected=expected))
return False
return True
# version : DCGM Version.<br>
# arch : Target DCGM Architecture.<br>
# buildid : Build ID. Usually a sequential number.<br>
# commit : Commit ID (Usually a git commit hash).<br>
# branch : Branch (Usually a git branch that was used for the build).<br>
# builddate : Date of the build.<br>
# buildplatform : Platform where the build was made.<br>
# buildtype : Build Type (Debug, Release, etc.)
# DCGM_VERSION
# TARGET_ARCH
# BUILD_ID
# BUILD_DATE
# GIT_COMMIT
# GIT_BRANCH
# BUILD_PLATFORM
if not validate_build_info("version", version.DCGM_VERSION):
return False
if not validate_build_info("commit", version.GIT_COMMIT):
return False
if not validate_build_info("arch", version.TARGET_ARCH):
return False
if not validate_build_info("buildtype", version.BUILD_TYPE):
return False
return True
def is_test_environment_sane():
"""
Checks whether the SUT (system under test) has any obvious issues
before allowing the test framework to run
"""
print("\n########### VERIFYING DCGM TEST ENVIRONMENT ###########\n")
############## INFOROM CORRUPTION ##############
nvsmiObj = nvidia_smi_utils.NvidiaSmiJob()
inforomCorruption = nvsmiObj.CheckInforom()
if inforomCorruption:
logger.warning("Corrupted Inforom Detected, exiting framework...\n")
return False
############## PAGE RETIREMENT ##############
pageRetirementBad = nvsmiObj.CheckPageRetirementErrors()
if pageRetirementBad:
logger.warning("Page Retirement issues have been detected, exiting framework...\n")
return False
return True
def run_with_persistence_mode_on():
"""
Run this test with persistence mode on. This function runs "nvidia-smi -pm 0"
before the test is run
"""
def decorator(fn):
cmd = ['nvidia-smi', '-pm', '1']
@wraps(fn)
def wrapper(*args, **kwds):
fnull = open(os.devnull, 'w')
if not find_executable(cmd[0]):
logger.info("Could not find executable " + cmd[0] + ". Skipping enabling persistence mode")
elif not utils.is_root():
logger.info("Not running as root. Skipping enabling persistence mode")
else:
logger.info("Enabling persistence mode")
p = Popen(cmd, stdout=fnull, stderr=fnull, close_fds=True)
p.wait()
fn(*args, **kwds)
return
return wrapper
return decorator
def get_device_id(handle, gpuId):
attrs = dcgm_agent.dcgmGetDeviceAttributes(handle, gpuId)
return attrs.identifiers.pciDeviceId >> 16
def is_throttling_masked_by_nvvs(handle, gpuId, throttle_type):
deviceId = get_device_id(handle, gpuId)
if deviceId == 0x102d or deviceId == 0x1eb8:
return True
elif deviceId == 0x1df6:
return throttle_type == dcgm_fields.DCGM_CLOCKS_THROTTLE_REASON_SW_THERMAL
elif deviceId == 0x1e30:
ignored = [ dcgm_fields.DCGM_CLOCKS_THROTTLE_REASON_HW_SLOWDOWN,
dcgm_fields.DCGM_CLOCKS_THROTTLE_REASON_HW_THERMAL,
dcgm_fields.DCGM_CLOCKS_THROTTLE_REASON_SW_THERMAL ]
return throttle_type in ignored
return False
def is_mig_mode_enabled():
cmd = 'nvidia-smi mig -lgi'
p = Popen(cmd, shell=True, stdout=PIPE, stderr=PIPE, universal_newlines=True)
out, err = p.communicate()
# Return false if we detect MIG=off or that we are running on an older
# driver that does not support MIG
if out.find("No MIG-enabled devices found.") != -1 or out.find("Invalid combination of input arguments.") != -1:
return False
else:
return True
def run_only_if_mig_is_disabled():
'''
Decorator to skip tests that are unsupported or not yet supported with MIG mode enabled
'''
def decorator(fn):
@wraps(fn)
def wrapper(*args, **kwds):
if is_mig_mode_enabled():
skip_test("This test is not yet supported while MIG mode is enabled.")
else:
result = fn(*args, **kwds)
return wrapper
return decorator
def run_only_if_mig_is_enabled():
'''
Decorator to skip tests that only matter if MIG mode is enabled
'''
def decorator(fn):
@wraps(fn)
def wrapper(*args, **kwds):
if not is_mig_mode_enabled():
skip_test("this test does nothing if MIG mode is not enabled")
else:
result = fn(*args, **kwds)
return wrapper
return decorator
def is_mig_incompatible_failure(failure_msg):
mig_incompatible_str = "MIG configuration is incompatible with the diagnostic because it prevents access to the entire GPU."
pos = failure_msg.find(mig_incompatible_str)
# Return true if this is the end of the error string
return pos != -1 and pos + len(mig_incompatible_str) == len(failure_msg) - 1
def diag_execute_wrapper(dd, handle):
try:
response = dd.Execute(handle)
return response
except dcgm_structs.DCGMError as e:
if is_mig_incompatible_failure(str(e)):
skip_test("Skipping this test because MIG is configured incompatibly (preventing access to the whole GPU)")
else:
raise e
def action_validate_wrapper(runDiagInfo, handle, runDiagVersion=dcgm_structs.dcgmRunDiag_version7):
try:
response = dcgm_agent.dcgmActionValidate_v2(handle, runDiagInfo, runDiagVersion)
return response
except dcgm_structs.DCGMError as e:
if is_mig_incompatible_failure(str(e)):
skip_test("Skipping this test because MIG is configured incompatibly (preventing access to the whole GPU)")
else:
raise e
def run_only_if_checking_libraries():
'''
Decorator to only run a test if we're verifying the modules
'''
def decorator(fn):
@wraps(fn)
def wrapper(*args, **kwds):
if option_parser.options.no_library_check:
skip_test("The test framework has been run with --no-library-check, skipping this test.")
else:
result = fn(*args, **kwds)
return wrapper
return decorator
def run_with_logging_on():
def decorator(fn):
@wraps(fn)
def wrapper(*args, **kwds):
set_logging_state(True)
fn(*args, **kwds)
restore_logging_state()
return
return wrapper
return decorator
def set_nvvs_bin_path():
'''
Helper to make sure that NVVS_BIN_PATH is set so that dcgmi diag will actually run
'''
dirOfThisFile = os.path.dirname(os.path.realpath(__file__))
nvvsDir = os.path.join(dirOfThisFile, 'apps/nvvs')
if (not os.path.isdir(nvvsDir)) or (not os.path.isfile(nvvsDir + '/nvvs')):
logger.warning("NVVS is missing from the test framework install. Hopefully it's installed. Looked in " + nvvsDir)
else:
logger.debug("NVVS directory: %s" % nvvsDir)
os.environ['NVVS_BIN_PATH'] = nvvsDir #The env variable parser in DcgmDiagManager is only the directory
def run_for_each_gpu_individually():
def decorator(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
gpu_ids = kwargs['gpuIds']
del kwargs['gpuIds']
for gpu_id in gpu_ids:
kwargs['gpuId'] = gpu_id
try:
fn(*args, **kwargs)
except TestSkipped as skip:
logger.info("Skipping for gpuId %u due to %s" % (gpu_id, skip))
continue
return wrapper
return decorator
def with_service_account(serviceAccountName):
def decorator(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
try:
os.system('groupadd -r -f %s' % serviceAccountName)
os.system('useradd -r -g %s -s /usr/sbin/nologin -M %s' % (serviceAccountName, serviceAccountName))
fn(*args, **kwargs)
finally:
os.system('userdel %s' % serviceAccountName)
return wrapper
return decorator
def gpu_supports_gpm(handle, gpuId):
"""
Returns true if the given gpuId supports GPU Performance Monitoring (GPM). false if not
"""
entityPairList = [dcgm_structs.c_dcgmGroupEntityPair_t(dcgm_fields.DCGM_FE_GPU, gpuId), ]
flags = dcgm_structs.DCGM_FV_FLAG_LIVE_DATA
fieldIds = [dcgm_fields.DCGM_FI_DEV_CUDA_COMPUTE_CAPABILITY, ]
fieldValues = dcgm_agent.dcgmEntitiesGetLatestValues(handle, entityPairList, fieldIds, flags)
assert fieldValues[0].status == 0
computeCapability = fieldValues[0].value.i64
if computeCapability == 0x090000:
return True
else:
return False
def filter_sku(skus):
"""
This decorator gets gpuIds list and excludes GPUs which are in the string
of skus.
This decorator must come after a decorator that provides a list of gpuIds
like run_only_with_live_gpus.
:type skus: [string]
:return: decorated function
"""
def decorator(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
gpuIds = kwargs['gpuIds']
filteredGpuIds = []
nvidiaSmi = nvidia_smi_utils.NvidiaSmiJob()
nvidiaSmi.QueryNvidiaSmiXml()
for gpuId in gpuIds:
if gpuId in nvidiaSmi.m_data:
if dcgm_fields.DCGM_FI_DEV_PCI_COMBINED_ID in nvidiaSmi.m_data[gpuId]:
sku = nvidiaSmi.m_data[gpuId][dcgm_fields.DCGM_FI_DEV_PCI_COMBINED_ID][0][0:4]
if sku in skus:
logger.info("GPU %d sku %s can't participate in the test." % (gpuId, sku))
continue
filteredGpuIds.append(gpuId);
kwargs['gpuIds'] = filteredGpuIds
fn(*args, **kwargs)
return
return wrapper
return decorator
| DCGM-master | testing/python3/test_utils.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import dcgm_structs_internal
import dcgm_agent_internal
import dcgm_fields
import dcgm_structs
import time
# Stores the parameters in a field value of type DCGM_FT_INT64
def get_field_value_i64(fieldId, value, offset, entityGroupId=dcgm_fields.DCGM_FE_GPU):
field = dcgm_structs_internal.c_dcgmInjectFieldValue_v1()
field.version = dcgm_structs_internal.dcgmInjectFieldValue_version1
field.fieldId = fieldId
field.status = 0
field.fieldType = ord(dcgm_fields.DCGM_FT_INT64)
field.ts = int((time.time()+offset) * 1000000.0)
field.value.i64 = value
return field
# Stores the parameters in a field value of type DCGM_FT_DOUBLE
def get_field_value_fp64(fieldId, value, offset, entityGroupId=dcgm_fields.DCGM_FE_GPU):
field = dcgm_structs_internal.c_dcgmInjectFieldValue_v1()
field.version = dcgm_structs_internal.dcgmInjectFieldValue_version1
field.fieldId = fieldId
field.status = 0
field.fieldType = ord(dcgm_fields.DCGM_FT_DOUBLE)
field.ts = int((time.time()+offset) * 1000000.0)
field.value.dbl = value
return field
'''
inject_nvml_value - injects a value into injection NVML
handle - the handle to DCGM
gpuId - the id of the GPU we're injecting
fieldId - the DCGM field id of what we're injecting into NVML
value - the value we're injecting
offset - the offset in seconds for the timestamp the value should have
'''
def inject_nvml_value(handle, gpuId, fieldId, value, offset):
fieldType = dcgm_fields.DcgmFieldGetById(fieldId).fieldType
if fieldType == dcgm_fields.DCGM_FT_INT64:
field = get_field_value_i64(fieldId, value, offset)
ret = dcgm_agent_internal.dcgmInjectEntityFieldValueToNvml(handle, dcgm_fields.DCGM_FE_GPU, gpuId, field)
else:
field = get_field_value_fp64(fieldId, value, offset)
ret = dcgm_agent_internal.dcgmInjectEntityFieldValueToNvml(handle, dcgm_fields.DCGM_FE_GPU, gpuId, field)
return ret
'''
inject_value - injects a value into DCGM's cache
handle - the handle to DCGM
entityId - the id of the entity we're injecting the value for
fieldId - the id of the field we're injecting a value into
value - the value we're injecting
offset - the offset - in seconds - for the timestamp the value should have
verifyInsertion - True if we should fail if the value couldn't be injected, False = ignore. (default to True)
entityType - the type of entity we're injecting the value for, defaults to GPU
repeatCount - the number of repeated times we should inject the value, defaults to 0, meaning 1 injection
repeatOffset - how many seconds to increment the offset by in each subsequent injection
'''
def inject_value(handle, entityId, fieldId, value, offset, verifyInsertion=True,
entityType=dcgm_fields.DCGM_FE_GPU, repeatCount=0, repeatOffset=1):
fieldType = dcgm_fields.DcgmFieldGetById(fieldId).fieldType
if fieldType == dcgm_fields.DCGM_FT_INT64:
ret = inject_field_value_i64(handle, entityId, fieldId, value, offset, entityGroupId=entityType)
for i in range(0, repeatCount):
if ret != dcgm_structs.DCGM_ST_OK:
# Don't continue inserting if it isn't working
break
offset = offset + repeatOffset
ret = inject_field_value_i64(handle, entityId, fieldId, value, offset, entityGroupId=entityType)
elif fieldType == dcgm_fields.DCGM_FT_DOUBLE:
ret = inject_field_value_fp64(handle, entityId, fieldId, value, offset, entityGroupId=entityType)
for i in range(0, repeatCount):
if ret != dcgm_structs.DCGM_ST_OK:
# Don't continue inserting if it isn't working
break
offset = offset + repeatOffset
ret = inject_field_value_fp64(handle, entityId, fieldId, value, offset, entityGroupId=entityType)
else:
assert False, "Cannot inject field type '%s', only INT64 and DOUBLE are supported" % fieldType
if verifyInsertion:
assert ret == dcgm_structs.DCGM_ST_OK, "Could not inject value %s in field id %s" % (value, fieldId)
return ret
# Injects a field value of type DCGM_FT_INT64 into DCGM's cache
def inject_field_value_i64(handle, entityId, fieldId, value, offset, entityGroupId=dcgm_fields.DCGM_FE_GPU):
field = get_field_value_i64(fieldId, value, offset, entityGroupId)
return dcgm_agent_internal.dcgmInjectEntityFieldValue(handle, entityGroupId, entityId, field)
# Injects a field value of type DCGM_FT_DOUBLE into DCGM's cache
def inject_field_value_fp64(handle, entityId, fieldId, value, offset, entityGroupId=dcgm_fields.DCGM_FE_GPU):
field = get_field_value_fp64(fieldId, value, offset, entityGroupId)
return dcgm_agent_internal.dcgmInjectEntityFieldValue(handle, entityGroupId, entityId, field)
| DCGM-master | testing/python3/dcgm_field_injection_helpers.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from common.dcgm_client_main import main
from DcgmJsonReader import DcgmJsonReader
from socket import socket, AF_INET, SOCK_DGRAM
# Displayed to the user
FLUENTD_NAME = 'Fluentd'
DEFAULT_FLUENTD_PORT = 24225
# Fluentd Configuration
# =====================
# In order to use this client, Fluentd needs to accept json over udp.
# The default port is 24225
class DcgmFluentd(DcgmJsonReader):
###########################################################################
def __init__(self, publish_hostname, publish_port, **kwargs):
self.m_sock = socket(AF_INET, SOCK_DGRAM)
self.m_dest = (publish_hostname, publish_port)
super(DcgmFluentd, self).__init__(**kwargs)
###########################################################################
def SendToFluentd(self, payload):
self.m_sock.sendto(payload, self.m_dest)
###########################################################################
def CustomJsonHandler(self, outJson):
self.SendToFluentd(outJson)
if __name__ == '__main__': # pragma: no cover
main(DcgmFluentd, FLUENTD_NAME, DEFAULT_FLUENTD_PORT, add_target_host=True)
| DCGM-master | testing/python3/dcgm_fluentd.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import getpass
import os
import re
import string
import apps
import shlex
import dcgm_structs
import dcgm_agent
import dcgm_agent_internal
import utils
import test_utils
import logger
import option_parser
import time
import subprocess
import nvidia_smi_utils
from sys import version as python_version
from tests.nvswitch_tests import test_nvswitch_utils
def log_environment_info():
if utils.is_linux():
logger.info("Xorg running: %s" % test_utils.is_xorg_running())
logger.info("Python version: %s" % python_version.split(None, 1)[0])
logger.info("Platform identifier: %s" % utils.platform_identifier)
logger.info("Bare metal: %s" % utils.is_bare_metal_system())
logger.info("Running as user: %s" % getpass.getuser())
rawVersionInfo = dcgm_agent.dcgmVersionInfo()
logger.info("Build info: %s" % rawVersionInfo.rawBuildInfoString)
logger.info("Mig: %s" % test_utils.is_mig_mode_enabled())
logger.debug("ENV : %s" % "\n".join(list(map(str, sorted(os.environ.items())))))
##################################################################################
### Kills the specified processes. If murder is specified, then they are kill -9'ed
### instead of nicely killed.
##################################################################################
def kill_process_ids(process_ids, murder):
running = False
for pid in process_ids:
if not pid:
break
running = True
if murder:
cmd = 'kill -9 %s' % pid
else:
cmd = 'kill %s' % pid
runner = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
output, error = runner.communicate()
return running
##################################################################################
### Cleans up the hostengine if needed. If we can't clean it up, then we will
### abort the testing framework.
##################################################################################
def kill_hostengine_if_needed():
running = False
need_to_validate = False
for i in range(0,2):
process_ids = test_utils.check_for_running_hostengine_and_log_details(True)
running = kill_process_ids(process_ids, False)
if running == False:
break
need_to_validate = True
time.sleep(.5)
if running:
for i in range(0,2):
process_ids = test_utils.check_for_running_hostengine_and_log_details(True)
running = kill_process_ids(process_ids, True)
msg = "Cannot run test! An instance of nv-hostengine is running and cannot be killed."
msg += " Ensure nv-hostengine is stopped before running the tests."
pids = test_utils.check_for_running_hostengine_and_log_details(False)
assert not pids, msg
def run_tests():
'''
testDir: Subdirectory to look for tests in. For example: "tests" for NVML
'''
with test_utils.SubTest("Main"):
log_environment_info()
test_utils.RestoreDefaultEnvironment.restore_env()
try:
dcgm_structs._dcgmInit(utils.get_testing_framework_library_path())
except dcgm_structs.dcgmExceptionClass(dcgm_structs.DCGM_ST_LIBRARY_NOT_FOUND):
logger.warning("DCGM Library hasn't been found in the system, is the driver correctly installed?")
if utils.is_linux() and utils.is_32bit() and utils.is_system_64bit():
# 32bit test on 64bit system
logger.warning("Make sure that you've installed driver with both 64bit and 32bit binaries (e.g. not -internal.run or -no-compact32.run)")
raise
if option_parser.options.use_running_hostengine:
with test_utils.RunStandaloneHostEngine() as handle:
dcgmGpuCount = test_utils.log_gpu_information(handle)
if dcgmGpuCount < 1:
logger.error("No GPUs on DCGM's GPU allowlist found. Skipping tests.")
return
else:
with test_utils.RunEmbeddedHostEngine() as handle:
dcgmGpuCount = test_utils.log_gpu_information(handle)
if dcgmGpuCount < 1:
logger.error("No GPUs on DCGM's GPU allowlist found. Skipping tests.")
return
# Persistence mode is required
(_, error) = nvidia_smi_utils.enable_persistence_mode()
if error:
logger.error(error)
return
with test_utils.SubTest("restore state", quiet=True):
test_utils.RestoreDefaultEnvironment.restore() # restore the nvml settings
test_content = test_utils.get_test_content()
nvswitchModuleCounter = 0
try:
for module in test_content:
# Attempt to clean up stranded processes instead of aborting
kill_hostengine_if_needed()
with test_utils.SubTest("module %s" % module[0].__name__):
for function in module[1]:
test_utils.run_subtest(function)
with test_utils.SubTest("%s - restore state" % (function.__name__), quiet=True):
test_utils.RestoreDefaultEnvironment.restore()
finally:
# SubTest might return KeyboardInterrupt exception. We should try to restore
# state before closing
with test_utils.SubTest("restore state", quiet=True):
test_utils.RestoreDefaultEnvironment.restore()
#dcgm_structs.dcgmShutdown()
_test_info_split_non_verbose = re.compile("\n *\n") # Matches empty line that separates short from long version of function_doc
_test_info_split_verbose_first_newlines = re.compile("^[\n ]*\n") # Matches empty lines at the beginning of string
_test_info_split_verbose_last_newlines = re.compile("[\n ]*$") # Matches empty lines at the end of the string
def print_test_info():
"""
testDir: Subdirectory to look for tests in
"""
#Convert module subdirectory into module dot path like tests/nvvs/x => tests.nvvs.x
testDirWithDots = test_utils.test_directory.replace("/", ".")
test_content = test_utils.get_test_content()
for module in test_content:
module_name = module[0].__name__
module_name = module_name.replace("%s." % testDirWithDots, "", 1) # all tests are in testDir. module there's no use in printing that
for function in module[1]:
function_name = function.__name__
function_doc = function.__doc__
if function_doc is None:
# verbose output uses indentation of the original string
function_doc = " Missing doc"
if option_parser.options.verbose:
# remove new lines at the beginning and end of the function_doc
function_doc = _test_info_split_verbose_first_newlines.sub("", function_doc)
function_doc = _test_info_split_verbose_last_newlines.sub("", function_doc)
print("%s.%s:\n%s\n" % (module_name, function_name, function_doc))
else:
# It's non verbose output so just take the first part of the description (up to first double empty line)
function_doc = _test_info_split_non_verbose.split(function_doc)[0]
# remove spaces at beginning of each line (map strip), remove empty lines (filter bool) and make it one line (string join)
function_doc = " ".join(list(filter(bool, list(map(string.strip, function_doc.split("\n"))))))
print("%s.%s:\n\t%s" % (module_name, function_name, function_doc))
| DCGM-master | testing/python3/run_tests.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pydcgm
import dcgm_structs
import dcgm_agent
class DcgmHandle:
'''
Class to encapsulate a handle to DCGM and global methods to control + query the host engine
'''
def __init__(self, handle=None, ipAddress=None,
opMode=dcgm_structs.DCGM_OPERATION_MODE_AUTO, persistAfterDisconnect=False,
unixSocketPath=None, timeoutMs=0):
'''
Constructor
handle is an existing handle from dcgmInit(). Pass None if you want this object to handle DCGM initialization for you
ipAddress is the host to connect to. None = start embedded host engine
opMode is a dcgm_structs.DCGM_OPERATION_MODE_* constant for how the host engine should run (embedded mode only)
persistAfterDisconnect (TCP-IP connections only) is whether the host engine should persist all of our watches
after we disconnect. 1=persist our watches. 0=clean up after our connection
unixSocketPath is a path to a path on the local filesystem that is a unix socket that the host engine is listening on.
This option is mutually exclusive with ipAddress
timeoutMs is how long to wait for TCP/IP or Unix domain connections to establish in ms. 0=Default timeout (5000ms)
'''
self._handleCreated = False
self._persistAfterDisconnect = persistAfterDisconnect
if handle is not None:
self.handle = handle
return
self._ipAddress = ipAddress
#Can't provide both unix socket and ip address
if ipAddress is not None and unixSocketPath is not None:
raise dcgm_structs.dcgmExceptionClass(dcgm_structs.DCGM_ST_BADPARAM)
#Initialize the DCGM client library
dcgm_structs._dcgmInit()
dcgm_agent.dcgmInit() #Not harmful to call this multiple times in a process
#If neither ipAddress nor unixSocketPath are present, start an embedded host engine
if ipAddress is None and unixSocketPath is None:
self.handle = dcgm_agent.dcgmStartEmbedded(opMode)
self.isEmbedded = True
self._handleCreated = True
return
#Set up connection parameters. We're connecting to something
connectParams = dcgm_structs.c_dcgmConnectV2Params_v2()
connectParams.version = dcgm_structs.c_dcgmConnectV2Params_version
connectParams.timeoutMs = timeoutMs
if self._persistAfterDisconnect:
connectParams.persistAfterDisconnect = 1
else:
connectParams.persistAfterDisconnect = 0
if ipAddress is not None:
connectToAddress = ipAddress
connectParams.addressIsUnixSocket = 0
else:
connectToAddress = unixSocketPath
connectParams.addressIsUnixSocket = 1
self.handle = dcgm_agent.dcgmConnect_v2(connectToAddress, connectParams)
self.isEmbedded = False
self._handleCreated = True
def __del__(self):
'''
Destructor
'''
if self._handleCreated:
self.Shutdown()
def GetSystem(self):
'''
Get a DcgmSystem instance for this handle
'''
return pydcgm.DcgmSystem(self)
def __StopDcgm__(self):
'''
Shuts down either the hostengine or the embedded server
'''
if self.isEmbedded:
dcgm_agent.dcgmStopEmbedded(self.handle)
else:
dcgm_agent.dcgmDisconnect(self.handle)
def Shutdown(self):
'''
Shutdown DCGM hostengine
'''
if not self._handleCreated:
return
try:
self.__StopDcgm__()
except AttributeError as e:
# Due to multi-threading, sometimes this is called after the modules have been unloaded, making
# dcgm_agent effectively NoneType and resulting in this error being thrown.
pass
self._handleCreated = False
self.handle = None
@staticmethod
def Unload():
'''
Unload DCGM, removing any memory it is pointing at. Use this if you really
want DCGM gone from your process. Shutdown() only closes the connection/embedded host engine
that was create in __init__().
'''
dcgm_agent.dcgmShutdown()
def GetIpAddress(self):
'''
Returns the IP address associated with this handle. None=embedded connection
'''
return self._ipAddress
| DCGM-master | testing/python3/DcgmHandle.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dcgm_fields import *
from dcgm_fields_internal import *
import sys
class CollectdMetadata:
'''
Constructor
@params:
name: string identifying the dcgm field. The field_name as opposed to
field_id.Address:port of the host to connect. Defaults to localhost
kind: collectd type string.
used: a bool indicating whether or not the field is to be defined in
a collectd types.db file when GenerateCollectdTypesDB() is called
(generally if this file is run as a python3 mainline). We enumerate
all the dcgm fields, but only generate types.db records for those
supported at the current time. Others may or may not have correct
collectd type definitions (generally one might be a guage where it
is more correctly a counter). The idea is that an intrepid user may
enable generation of additional dcgm fields that they wish to collect
but are not officially supported yet.
'''
def __init__(self, name, kind, used = False):
self.name = name
self.kind = kind
self.used = used
# collectd metadata definition table.
CollectdMetadataDict = { DCGM_FI_DRIVER_VERSION: None,
DCGM_FI_NVML_VERSION: None,
DCGM_FI_PROCESS_NAME: None,
DCGM_FI_CUDA_DRIVER_VERSION: CollectdMetadata("cuda_driver_version", "value:GAUGE:U:U"),
DCGM_FI_DEV_COUNT: CollectdMetadata("device_count", "value:GAUGE:U:U"),
DCGM_FI_DEV_NAME: None,
DCGM_FI_DEV_BRAND: None,
DCGM_FI_DEV_NVML_INDEX: CollectdMetadata("nvml_index", "value:GAUGE:U:U"),
DCGM_FI_DEV_SERIAL: None,
DCGM_FI_DEV_CPU_AFFINITY_0: CollectdMetadata("cpu_affinity_0", "value:GAUGE:U:U"),
DCGM_FI_DEV_CPU_AFFINITY_1: CollectdMetadata("cpu_affinity_1", "value:GAUGE:U:U"),
DCGM_FI_DEV_CPU_AFFINITY_2: CollectdMetadata("cpu_affinity_2", "value:GAUGE:U:U"),
DCGM_FI_DEV_CPU_AFFINITY_3: CollectdMetadata("cpu_affinity_3", "value:GAUGE:U:U"),
DCGM_FI_DEV_UUID: None,
DCGM_FI_DEV_MINOR_NUMBER: CollectdMetadata("minor_number", "value:GAUGE:U:U"),
DCGM_FI_DEV_OEM_INFOROM_VER: None,
DCGM_FI_DEV_ECC_INFOROM_VER: None,
DCGM_FI_DEV_POWER_INFOROM_VER: None,
DCGM_FI_DEV_INFOROM_IMAGE_VER: None,
DCGM_FI_DEV_INFOROM_CONFIG_CHECK: CollectdMetadata("inforom_config_checksum", "value:GAUGE:U:U"),
DCGM_FI_DEV_PCI_BUSID: None,
DCGM_FI_DEV_PCI_COMBINED_ID: CollectdMetadata("pci_combined_id", "value:GAUGE:U:U"),
DCGM_FI_DEV_PCI_SUBSYS_ID: CollectdMetadata("pci_subsys_id", "value:GAUGE:U:U"),
DCGM_FI_DEV_PCIE_TX_THROUGHPUT: CollectdMetadata("pcie_tx_throughput", "value:GAUGE:0:U", True),
DCGM_FI_DEV_PCIE_RX_THROUGHPUT: CollectdMetadata("pcie_rx_throughput", "value:GAUGE:0:U", True),
DCGM_FI_DEV_PCIE_REPLAY_COUNTER: CollectdMetadata("pcie_replay_counter", "value:COUNTER:0:U", True),
DCGM_FI_DEV_SM_CLOCK: CollectdMetadata("sm_clock", "value:GAUGE:0:U", True),
DCGM_FI_DEV_MEM_CLOCK: CollectdMetadata("memory_clock", "value:GAUGE:0:U", True),
DCGM_FI_DEV_VIDEO_CLOCK: CollectdMetadata("video_clock", "value:GAUGE:0:U", True),
DCGM_FI_DEV_APP_SM_CLOCK: CollectdMetadata("sm_app_clock", "value:GAUGE:0:U", True),
DCGM_FI_DEV_APP_MEM_CLOCK: CollectdMetadata("mem_app_clock", "value:GAUGE:0:U", True),
DCGM_FI_DEV_CLOCK_THROTTLE_REASONS: CollectdMetadata("current_clock_throttle_reasons", "value:GAUGE:U:U"),
DCGM_FI_DEV_MAX_SM_CLOCK: CollectdMetadata("sm_max_clock", "value:GAUGE:0:U", True),
DCGM_FI_DEV_MAX_MEM_CLOCK: CollectdMetadata("memory_max_clock", "value:GAUGE:0:U", True),
DCGM_FI_DEV_MAX_VIDEO_CLOCK: CollectdMetadata("video_max_clock", "value:GAUGE:0:U", True),
DCGM_FI_DEV_AUTOBOOST: CollectdMetadata("autoboost", "value:GAUGE:U:U"),
DCGM_FI_DEV_GPU_TEMP: CollectdMetadata("gpu_temp", "value:GAUGE:U:U", True),
DCGM_FI_DEV_MEM_MAX_OP_TEMP: CollectdMetadata("gpu_mem_max_op_temp", "value:GAUGE:U:U"),
DCGM_FI_DEV_GPU_MAX_OP_TEMP: CollectdMetadata("gpu_max_op_temp", "value:GAUGE:U:U"),
DCGM_FI_DEV_SLOWDOWN_TEMP: CollectdMetadata("slowdown_temp", "value:GAUGE:U:U"),
DCGM_FI_DEV_SHUTDOWN_TEMP: CollectdMetadata("shutdown_temp", "value:GAUGE:U:U"),
DCGM_FI_DEV_POWER_MGMT_LIMIT: CollectdMetadata("power_management_limit", "value:GAUGE:U:U"),
DCGM_FI_DEV_POWER_MGMT_LIMIT_MIN: CollectdMetadata("power_management_limit_min", "value:GAUGE:U:U"),
DCGM_FI_DEV_POWER_MGMT_LIMIT_MAX: CollectdMetadata("power_management_limit_max", "value:GAUGE:U:U"),
DCGM_FI_DEV_POWER_MGMT_LIMIT_DEF: CollectdMetadata("power_management_limit_default", "value:GAUGE:U:U"),
DCGM_FI_DEV_POWER_USAGE: CollectdMetadata("power_usage", "value:GAUGE:0:U", True),
DCGM_FI_DEV_TOTAL_ENERGY_CONSUMPTION: CollectdMetadata("total_energy_consumption", "value:GAUGE:0:U", True), # left as guage since zeroed at driver reload
DCGM_FI_DEV_ENFORCED_POWER_LIMIT: CollectdMetadata("enforced_power_limit", "value:GAUGE:U:U"),
DCGM_FI_DEV_PSTATE: CollectdMetadata("pstate", "value:GAUGE:U:U"),
DCGM_FI_DEV_FAN_SPEED: CollectdMetadata("fan_speed", "value:GAUGE:U:U"),
DCGM_FI_DEV_COMPUTE_MODE: CollectdMetadata("compute_mode", "value:GAUGE:U:U"),
DCGM_FI_DEV_PERSISTENCE_MODE: CollectdMetadata("persistance_mode", "value:GAUGE:U:U"),
DCGM_FI_DEV_MIG_MODE: CollectdMetadata("mig_mode", "value:GAUGE:U:U"),
DCGM_FI_DEV_CUDA_VISIBLE_DEVICES_STR: None,
DCGM_FI_DEV_MIG_MAX_SLICES: CollectdMetadata("mig_max_slices", "value:GAUGE:U:U"),
DCGM_FI_DEV_ECC_CURRENT: CollectdMetadata("ecc", "value:GAUGE:U:U"),
DCGM_FI_DEV_ECC_PENDING: CollectdMetadata("ecc_pending", "value:GAUGE:U:U"),
DCGM_FI_DEV_ECC_SBE_VOL_TOTAL: CollectdMetadata("ecc_sbe_volatile_total", "value:COUNTER:0:U", True),
DCGM_FI_DEV_ECC_DBE_VOL_TOTAL: CollectdMetadata("ecc_dbe_volatile_total", "value:COUNTER:0:U", True),
DCGM_FI_DEV_ECC_SBE_AGG_TOTAL: CollectdMetadata("ecc_sbe_aggregate_total", "value:COUNTER:0:U", True),
DCGM_FI_DEV_ECC_DBE_AGG_TOTAL: CollectdMetadata("ecc_dbe_aggregate_total", "value:COUNTER:0:U", True),
DCGM_FI_DEV_ECC_SBE_VOL_L1: CollectdMetadata("ecc_sbe_volatile_l1", "value:GAUGE:U:U"),
DCGM_FI_DEV_ECC_DBE_VOL_L1: CollectdMetadata("ecc_dbe_volatile_l1", "value:GAUGE:U:U"),
DCGM_FI_DEV_ECC_SBE_VOL_L2: CollectdMetadata("ecc_sbe_volatile_l2", "value:GAUGE:U:U"),
DCGM_FI_DEV_ECC_DBE_VOL_L2: CollectdMetadata("ecc_dbe_volatile_l2", "value:GAUGE:U:U"),
DCGM_FI_DEV_ECC_SBE_VOL_DEV: CollectdMetadata("ecc_sbe_volatile_device", "value:GAUGE:U:U"),
DCGM_FI_DEV_ECC_DBE_VOL_DEV: CollectdMetadata("ecc_dbe_volatile_device", "value:GAUGE:U:U"),
DCGM_FI_DEV_ECC_SBE_VOL_REG: CollectdMetadata("ecc_sbe_volatile_register", "value:GAUGE:U:U"),
DCGM_FI_DEV_ECC_DBE_VOL_REG: CollectdMetadata("ecc_dbe_volatile_register", "value:GAUGE:U:U"),
DCGM_FI_DEV_ECC_SBE_VOL_TEX: CollectdMetadata("ecc_sbe_volatile_texture", "value:GAUGE:U:U"),
DCGM_FI_DEV_ECC_DBE_VOL_TEX: CollectdMetadata("ecc_dbe_volatile_texture", "value:GAUGE:U:U"),
DCGM_FI_DEV_ECC_SBE_AGG_L1: CollectdMetadata("ecc_sbe_aggregate_l1", "value:GAUGE:U:U"),
DCGM_FI_DEV_ECC_DBE_AGG_L1: CollectdMetadata("ecc_dbe_aggregate_l1", "value:GAUGE:U:U"),
DCGM_FI_DEV_ECC_SBE_AGG_L2: CollectdMetadata("ecc_sbe_aggregate_l2", "value:GAUGE:U:U"),
DCGM_FI_DEV_ECC_DBE_AGG_L2: CollectdMetadata("ecc_dbe_aggregate_l2", "value:GAUGE:U:U"),
DCGM_FI_DEV_ECC_SBE_AGG_DEV: CollectdMetadata("ecc_sbe_aggregate_device", "value:GAUGE:U:U"),
DCGM_FI_DEV_ECC_DBE_AGG_DEV: CollectdMetadata("ecc_dbe_aggregate_device", "value:GAUGE:U:U"),
DCGM_FI_DEV_ECC_SBE_AGG_REG: CollectdMetadata("ecc_sbe_aggregate_register", "value:GAUGE:U:U"),
DCGM_FI_DEV_ECC_DBE_AGG_REG: CollectdMetadata("ecc_dbe_aggregate_register", "value:GAUGE:U:U"),
DCGM_FI_DEV_ECC_SBE_AGG_TEX: CollectdMetadata("ecc_sbe_aggregate_texture", "value:GAUGE:U:U"),
DCGM_FI_DEV_ECC_DBE_AGG_TEX: CollectdMetadata("ecc_dbe_aggregate_texture", "value:GAUGE:U:U"),
DCGM_FI_DEV_GPU_UTIL: CollectdMetadata("gpu_utilization", "value:GAUGE:0.0:1.0", True),
DCGM_FI_DEV_MEM_COPY_UTIL: CollectdMetadata("mem_copy_utilization", "value:GAUGE:0:100", True),
DCGM_FI_DEV_ENC_UTIL: CollectdMetadata("enc_utilization", "value:GAUGE:0:100"),
DCGM_FI_DEV_DEC_UTIL: CollectdMetadata("dec_utilization", "value:GAUGE:0:100"),
DCGM_FI_DEV_VBIOS_VERSION: None,
DCGM_FI_DEV_BAR1_TOTAL: CollectdMetadata("bar1_total", "value:GAUGE:U:U"),
DCGM_FI_DEV_BAR1_USED: CollectdMetadata("bar1_used", "value:GAUGE:U:U"),
DCGM_FI_DEV_BAR1_FREE: CollectdMetadata("bar1_free", "value:GAUGE:U:U"),
DCGM_FI_DEV_FB_TOTAL: CollectdMetadata("fb_total", "value:GAUGE:0.0:U", True),
DCGM_FI_DEV_FB_FREE: CollectdMetadata("fb_free", "value:GAUGE:0.0:U", True),
DCGM_FI_DEV_FB_USED: CollectdMetadata("fb_used", "value:GAUGE:0.0:U", True),
DCGM_FI_DEV_FB_RESERVED: CollectdMetadata("fb_resv", "value:GAUGE:0.0:U", True),
DCGM_FI_DEV_VIRTUAL_MODE: CollectdMetadata("virtualization_mode", "value:GAUGE:U:U"),
DCGM_FI_DEV_VGPU_INSTANCE_IDS: None,
DCGM_FI_DEV_VGPU_UTILIZATIONS: None,
DCGM_FI_DEV_VGPU_PER_PROCESS_UTILIZATION: None,
DCGM_FI_DEV_VGPU_VM_ID: None,
DCGM_FI_DEV_VGPU_VM_NAME: None,
DCGM_FI_DEV_VGPU_TYPE: CollectdMetadata("vgpu_instance_type", "value:GAUGE:U:U"),
DCGM_FI_DEV_VGPU_UUID: None,
DCGM_FI_DEV_VGPU_DRIVER_VERSION: None,
DCGM_FI_DEV_VGPU_MEMORY_USAGE: CollectdMetadata("vgpu_instance_memory_usage", "value:GAUGE:U:U"),
DCGM_FI_DEV_VGPU_INSTANCE_LICENSE_STATE: CollectdMetadata("vgpu_instance_license_state", "value:GAUGE:U:U"),
DCGM_FI_DEV_VGPU_LICENSE_STATUS: CollectdMetadata("vgpu_instance_license_status", "value:GAUGE:U:U"),
DCGM_FI_DEV_VGPU_FRAME_RATE_LIMIT: CollectdMetadata("vgpu_instance_frame_rate_limit", "value:GAUGE:U:U"),
DCGM_FI_DEV_VGPU_PCI_ID: CollectdMetadata("vgpu_instance_pci_id", "value:GAUGE:U:U"),
DCGM_FI_DEV_VGPU_ENC_STATS: None,
DCGM_FI_DEV_VGPU_ENC_SESSIONS_INFO: None,
DCGM_FI_DEV_VGPU_FBC_STATS: None,
DCGM_FI_DEV_VGPU_FBC_SESSIONS_INFO: None,
DCGM_FI_DEV_VGPU_VM_GPU_INSTANCE_ID: None,
DCGM_FI_DEV_SUPPORTED_TYPE_INFO: None,
DCGM_FI_DEV_SUPPORTED_VGPU_TYPE_IDS: None,
DCGM_FI_DEV_VGPU_TYPE_INFO: None,
DCGM_FI_DEV_VGPU_TYPE_NAME: None,
DCGM_FI_DEV_VGPU_TYPE_CLASS: None,
DCGM_FI_DEV_VGPU_TYPE_LICENSE: None,
DCGM_FI_DEV_CREATABLE_VGPU_TYPE_IDS: None,
DCGM_FI_DEV_ENC_STATS: None,
DCGM_FI_DEV_FBC_STATS: None,
DCGM_FI_DEV_FBC_SESSIONS_INFO: None,
DCGM_FI_DEV_ACCOUNTING_DATA: None,
DCGM_FI_DEV_RETIRED_SBE: CollectdMetadata("retired_pages_sbe", "value:COUNTER:0:U", True),
DCGM_FI_DEV_RETIRED_DBE: CollectdMetadata("retired_pages_dbe", "value:COUNTER:0:U", True),
DCGM_FI_DEV_GRAPHICS_PIDS: None,
DCGM_FI_DEV_COMPUTE_PIDS: None,
DCGM_FI_DEV_SUPPORTED_CLOCKS: None,
DCGM_FI_SYNC_BOOST: None,
DCGM_FI_DEV_RETIRED_PENDING: CollectdMetadata("retired_pages_pending", "value:GAUGE:0:1", True), # boolean 1 = yes, 0 = no
DCGM_FI_DEV_UNCORRECTABLE_REMAPPED_ROWS: CollectdMetadata("uncorrectable_remapped_rows", "value:GAUGE:U:U"),
DCGM_FI_DEV_CORRECTABLE_REMAPPED_ROWS: CollectdMetadata("correctable_remapped_rows", "value:GAUGE:U:U"),
DCGM_FI_DEV_ROW_REMAP_FAILURE: CollectdMetadata("row_remap_failure", "value:GAUGE:U:U"),
DCGM_FI_DEV_ROW_REMAP_PENDING: CollectdMetadata("row_remap_pending", "value:GAUGE:U:U"),
DCGM_FI_DEV_INFOROM_CONFIG_VALID: CollectdMetadata("inforom_config_valid", "value:GAUGE:U:U"),
DCGM_FI_DEV_XID_ERRORS: CollectdMetadata("xid_errors", "value:GAUGE:0:U", True),
DCGM_FI_DEV_PCIE_MAX_LINK_GEN: CollectdMetadata("pcie_max_link_gen", "value:GAUGE:U:U"),
DCGM_FI_DEV_PCIE_MAX_LINK_WIDTH: CollectdMetadata("pcie_max_link_width", "value:GAUGE:U:U"),
DCGM_FI_DEV_PCIE_LINK_GEN: CollectdMetadata("pcie_link_gen", "value:GAUGE:U:U"),
DCGM_FI_DEV_PCIE_LINK_WIDTH: CollectdMetadata("pcie_link_width", "value:GAUGE:U:U"),
DCGM_FI_DEV_POWER_VIOLATION: CollectdMetadata("power_violation", "value:COUNTER:0:U", True),
DCGM_FI_DEV_THERMAL_VIOLATION: CollectdMetadata("thermal_violation", "value:COUNTER:0:U", True),
DCGM_FI_GPU_TOPOLOGY_PCI: None,
DCGM_FI_GPU_TOPOLOGY_NVLINK: None,
DCGM_FI_GPU_TOPOLOGY_AFFINITY: None,
DCGM_FI_DEV_SYNC_BOOST_VIOLATION: CollectdMetadata("sync_boost_violation", "value:GAUGE:U:U"),
DCGM_FI_DEV_BOARD_LIMIT_VIOLATION: CollectdMetadata("board_limit_violation", "value:GAUGE:U:U"),
DCGM_FI_DEV_LOW_UTIL_VIOLATION: CollectdMetadata("low_util_violation", "value:GAUGE:U:U"),
DCGM_FI_DEV_RELIABILITY_VIOLATION: CollectdMetadata("reliability_violation", "value:GAUGE:U:U"),
DCGM_FI_DEV_TOTAL_APP_CLOCKS_VIOLATION: CollectdMetadata("app_clock_violation", "value:GAUGE:U:U"),
DCGM_FI_DEV_TOTAL_BASE_CLOCKS_VIOLATION: CollectdMetadata("base_clock_violation", "value:GAUGE:U:U"),
DCGM_FI_DEV_MEM_COPY_UTIL_SAMPLES: CollectdMetadata("mem_util_samples", "value:GAUGE:U:U"),
DCGM_FI_DEV_GPU_UTIL_SAMPLES: CollectdMetadata("gpu_util_samples", "value:GAUGE:U:U"),
DCGM_FI_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L0: CollectdMetadata("nvlink_flit_crc_error_count_l0", "value:GAUGE:U:U"),
DCGM_FI_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L1: CollectdMetadata("nvlink_flit_crc_error_count_l1", "value:GAUGE:U:U"),
DCGM_FI_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L2: CollectdMetadata("nvlink_flit_crc_error_count_l2", "value:GAUGE:U:U"),
DCGM_FI_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L3: CollectdMetadata("nvlink_flit_crc_error_count_l3", "value:GAUGE:U:U"),
DCGM_FI_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L4: CollectdMetadata("nvlink_flit_crc_error_count_l4", "value:GAUGE:U:U"),
DCGM_FI_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L5: CollectdMetadata("nvlink_flit_crc_error_count_l5", "value:GAUGE:U:U"),
DCGM_FI_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_TOTAL: CollectdMetadata("nvlink_flit_crc_error_count_total", "value:COUNTER:0:U", True),
DCGM_FI_DEV_NVLINK_CRC_DATA_ERROR_COUNT_L0: CollectdMetadata("nvlink_data_crc_error_count_l0", "value:GAUGE:U:U"),
DCGM_FI_DEV_NVLINK_CRC_DATA_ERROR_COUNT_L1: CollectdMetadata("nvlink_data_crc_error_count_l1", "value:GAUGE:U:U"),
DCGM_FI_DEV_NVLINK_CRC_DATA_ERROR_COUNT_L2: CollectdMetadata("nvlink_data_crc_error_count_l2", "value:GAUGE:U:U"),
DCGM_FI_DEV_NVLINK_CRC_DATA_ERROR_COUNT_L3: CollectdMetadata("nvlink_data_crc_error_count_l3", "value:GAUGE:U:U"),
DCGM_FI_DEV_NVLINK_CRC_DATA_ERROR_COUNT_L4: CollectdMetadata("nvlink_data_crc_error_count_l4", "value:GAUGE:U:U"),
DCGM_FI_DEV_NVLINK_CRC_DATA_ERROR_COUNT_L5: CollectdMetadata("nvlink_data_crc_error_count_l5", "value:GAUGE:U:U"),
DCGM_FI_DEV_NVLINK_CRC_DATA_ERROR_COUNT_TOTAL: CollectdMetadata("nvlink_data_crc_error_count_total", "value:COUNTER:0:U", True),
DCGM_FI_DEV_NVLINK_REPLAY_ERROR_COUNT_L0: CollectdMetadata("nvlink_replay_error_count_l0", "value:GAUGE:U:U"),
DCGM_FI_DEV_NVLINK_REPLAY_ERROR_COUNT_L1: CollectdMetadata("nvlink_replay_error_count_l1", "value:GAUGE:U:U"),
DCGM_FI_DEV_NVLINK_REPLAY_ERROR_COUNT_L2: CollectdMetadata("nvlink_replay_error_count_l2", "value:GAUGE:U:U"),
DCGM_FI_DEV_NVLINK_REPLAY_ERROR_COUNT_L3: CollectdMetadata("nvlink_replay_error_count_l3", "value:GAUGE:U:U"),
DCGM_FI_DEV_NVLINK_REPLAY_ERROR_COUNT_L4: CollectdMetadata("nvlink_replay_error_count_l4", "value:GAUGE:U:U"),
DCGM_FI_DEV_NVLINK_REPLAY_ERROR_COUNT_L5: CollectdMetadata("nvlink_replay_error_count_l5", "value:GAUGE:U:U"),
DCGM_FI_DEV_NVLINK_REPLAY_ERROR_COUNT_TOTAL: CollectdMetadata("nvlink_replay_error_count_total", "value:GAUGE:U:U"),
DCGM_FI_DEV_NVLINK_RECOVERY_ERROR_COUNT_L0: CollectdMetadata("nvlink_recovery_error_count_l0", "value:GAUGE:U:U"),
DCGM_FI_DEV_NVLINK_RECOVERY_ERROR_COUNT_L1: CollectdMetadata("nvlink_recovery_error_count_l1", "value:GAUGE:U:U"),
DCGM_FI_DEV_NVLINK_RECOVERY_ERROR_COUNT_L2: CollectdMetadata("nvlink_recovery_error_count_l2", "value:GAUGE:U:U"),
DCGM_FI_DEV_NVLINK_RECOVERY_ERROR_COUNT_L3: CollectdMetadata("nvlink_recovery_error_count_l3", "value:GAUGE:U:U"),
DCGM_FI_DEV_NVLINK_RECOVERY_ERROR_COUNT_L4: CollectdMetadata("nvlink_recovery_error_count_l4", "value:GAUGE:U:U"),
DCGM_FI_DEV_NVLINK_RECOVERY_ERROR_COUNT_L5: CollectdMetadata("nvlink_recovery_error_count_l5", "value:GAUGE:U:U"),
DCGM_FI_DEV_NVLINK_RECOVERY_ERROR_COUNT_TOTAL: CollectdMetadata("nvlink_recovery_error_count_total", "value:COUNTER:0:U", True),
DCGM_FI_DEV_NVLINK_BANDWIDTH_L0: CollectdMetadata("nvlink_bandwidth_l0", "value:GAUGE:U:U"),
DCGM_FI_DEV_NVLINK_BANDWIDTH_L1: CollectdMetadata("nvlink_bandwidth_l1", "value:GAUGE:U:U"),
DCGM_FI_DEV_NVLINK_BANDWIDTH_L2: CollectdMetadata("nvlink_bandwidth_l2", "value:GAUGE:U:U"),
DCGM_FI_DEV_NVLINK_BANDWIDTH_L3: CollectdMetadata("nvlink_bandwidth_l3", "value:GAUGE:U:U"),
DCGM_FI_DEV_NVLINK_BANDWIDTH_L4: CollectdMetadata("nvlink_bandwidth_l4", "value:GAUGE:U:U"),
DCGM_FI_DEV_NVLINK_BANDWIDTH_L5: CollectdMetadata("nvlink_bandwidth_l5", "value:GAUGE:U:U"),
DCGM_FI_DEV_NVLINK_BANDWIDTH_TOTAL: CollectdMetadata("nvlink_bandwidth_total", "value:GAUGE:0:U", True),
DCGM_FI_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L6: CollectdMetadata("nvlink_flit_crc_error_count_l6", "value:GAUGE:U:U"),
DCGM_FI_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L7: CollectdMetadata("nvlink_flit_crc_error_count_l7", "value:GAUGE:U:U"),
DCGM_FI_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L8: CollectdMetadata("nvlink_flit_crc_error_count_l8", "value:GAUGE:U:U"),
DCGM_FI_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L9: CollectdMetadata("nvlink_flit_crc_error_count_l9", "value:GAUGE:U:U"),
DCGM_FI_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L10: CollectdMetadata("nvlink_flit_crc_error_count_l10", "value:GAUGE:U:U"),
DCGM_FI_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L11: CollectdMetadata("nvlink_flit_crc_error_count_l11", "value:GAUGE:U:U"),
DCGM_FI_DEV_NVLINK_CRC_DATA_ERROR_COUNT_L6: CollectdMetadata("nvlink_data_crc_error_count_l6", "value:GAUGE:U:U"),
DCGM_FI_DEV_NVLINK_CRC_DATA_ERROR_COUNT_L7: CollectdMetadata("nvlink_data_crc_error_count_l7", "value:GAUGE:U:U"),
DCGM_FI_DEV_NVLINK_CRC_DATA_ERROR_COUNT_L8: CollectdMetadata("nvlink_data_crc_error_count_l8", "value:GAUGE:U:U"),
DCGM_FI_DEV_NVLINK_CRC_DATA_ERROR_COUNT_L9: CollectdMetadata("nvlink_data_crc_error_count_l9", "value:GAUGE:U:U"),
DCGM_FI_DEV_NVLINK_CRC_DATA_ERROR_COUNT_L10: CollectdMetadata("nvlink_data_crc_error_count_l10", "value:GAUGE:U:U"),
DCGM_FI_DEV_NVLINK_CRC_DATA_ERROR_COUNT_L11: CollectdMetadata("nvlink_data_crc_error_count_l11", "value:GAUGE:U:U"),
DCGM_FI_DEV_NVLINK_REPLAY_ERROR_COUNT_L6: CollectdMetadata("nvlink_replay_error_count_l6", "value:GAUGE:U:U"),
DCGM_FI_DEV_NVLINK_REPLAY_ERROR_COUNT_L7: CollectdMetadata("nvlink_replay_error_count_l7", "value:GAUGE:U:U"),
DCGM_FI_DEV_NVLINK_REPLAY_ERROR_COUNT_L8: CollectdMetadata("nvlink_replay_error_count_l8", "value:GAUGE:U:U"),
DCGM_FI_DEV_NVLINK_REPLAY_ERROR_COUNT_L9: CollectdMetadata("nvlink_replay_error_count_l9", "value:GAUGE:U:U"),
DCGM_FI_DEV_NVLINK_REPLAY_ERROR_COUNT_L10: CollectdMetadata("nvlink_replay_error_count_l10", "value:GAUGE:U:U"),
DCGM_FI_DEV_NVLINK_REPLAY_ERROR_COUNT_L11: CollectdMetadata("nvlink_replay_error_count_l11", "value:GAUGE:U:U"),
DCGM_FI_DEV_NVLINK_RECOVERY_ERROR_COUNT_L6: CollectdMetadata("nvlink_recovery_error_count_l6", "value:GAUGE:U:U"),
DCGM_FI_DEV_NVLINK_RECOVERY_ERROR_COUNT_L7: CollectdMetadata("nvlink_recovery_error_count_l7", "value:GAUGE:U:U"),
DCGM_FI_DEV_NVLINK_RECOVERY_ERROR_COUNT_L8: CollectdMetadata("nvlink_recovery_error_count_l8", "value:GAUGE:U:U"),
DCGM_FI_DEV_NVLINK_RECOVERY_ERROR_COUNT_L9: CollectdMetadata("nvlink_recovery_error_count_l9", "value:GAUGE:U:U"),
DCGM_FI_DEV_NVLINK_RECOVERY_ERROR_COUNT_L10: CollectdMetadata("nvlink_recovery_error_count_l10", "value:GAUGE:U:U"),
DCGM_FI_DEV_NVLINK_RECOVERY_ERROR_COUNT_L11: CollectdMetadata("nvlink_recovery_error_count_l11", "value:GAUGE:U:U"),
DCGM_FI_DEV_NVLINK_BANDWIDTH_L6: CollectdMetadata("nvlink_bandwidth_l6", "value:GAUGE:U:U"),
DCGM_FI_DEV_NVLINK_BANDWIDTH_L7: CollectdMetadata("nvlink_bandwidth_l7", "value:GAUGE:U:U"),
DCGM_FI_DEV_NVLINK_BANDWIDTH_L8: CollectdMetadata("nvlink_bandwidth_l8", "value:GAUGE:U:U"),
DCGM_FI_DEV_NVLINK_BANDWIDTH_L9: CollectdMetadata("nvlink_bandwidth_l9", "value:GAUGE:U:U"),
DCGM_FI_DEV_NVLINK_BANDWIDTH_L10: CollectdMetadata("nvlink_bandwidth_l10", "value:GAUGE:U:U"),
DCGM_FI_DEV_NVLINK_BANDWIDTH_L11: CollectdMetadata("nvlink_bandwidth_l11", "value:GAUGE:U:U"),
DCGM_FI_DEV_MEMORY_TEMP: CollectdMetadata("memory_temp", "value:GAUGE:U:U", True),
DCGM_FI_DEV_GPU_NVLINK_ERRORS: CollectdMetadata("gpu_nvlink_errors", "value:GAUGE:U:U"),
DCGM_FI_DEV_NVSWITCH_LINK_THROUGHPUT_TX: CollectdMetadata("nvswitch_link_bandwidth_tx", "value:GAUGE:U:U"),
DCGM_FI_DEV_NVSWITCH_LINK_THROUGHPUT_RX: CollectdMetadata("nvswitch_link_bandwidth_rx", "value:GAUGE:U:U"),
DCGM_FI_DEV_NVSWITCH_LINK_NON_FATAL_ERRORS: CollectdMetadata("nvswitch_link_fatal_errors", "value:GAUGE:U:U"),
DCGM_FI_DEV_NVSWITCH_LINK_REPLAY_ERRORS: CollectdMetadata("nvswitch_link_non_fatal_errors", "value:GAUGE:U:U"),
DCGM_FI_DEV_NVSWITCH_LINK_RECOVERY_ERRORS: CollectdMetadata("nvswitch_link_recovery_errors", "value:GAUGE:U:U"),
DCGM_FI_DEV_NVSWITCH_LINK_FLIT_ERRORS: CollectdMetadata("nvswitch_link_flit_errors", "value:GAUGE:U:U"),
DCGM_FI_DEV_NVSWITCH_LINK_CRC_ERRORS:CollectdMetadata("nvswitch_link_crc_errors", "value:GAUGE:U:U"),
DCGM_FI_DEV_NVSWITCH_LINK_ECC_ERRORS:CollectdMetadata("nvswitch_link_ecc_errors", "value:GAUGE:U:U"),
DCGM_FI_DEV_NVSWITCH_LINK_LATENCY_LOW_VC0:CollectdMetadata("nvswitch_link_latency_low_vc0", "value:GAUGE:U:U"),
DCGM_FI_DEV_NVSWITCH_LINK_LATENCY_LOW_VC1:CollectdMetadata("nvswitch_link_latency_low_vc1", "value:GAUGE:U:U"),
DCGM_FI_DEV_NVSWITCH_LINK_LATENCY_LOW_VC2:CollectdMetadata("nvswitch_link_latency_low_vc2", "value:GAUGE:U:U"),
DCGM_FI_DEV_NVSWITCH_LINK_LATENCY_LOW_VC3:CollectdMetadata("nvswitch_link_latency_low_vc3", "value:GAUGE:U:U"),
DCGM_FI_DEV_NVSWITCH_LINK_LATENCY_MEDIUM_VC0:CollectdMetadata("nvswitch_link_latency_medium_vc0", "value:GAUGE:U:U"),
DCGM_FI_DEV_NVSWITCH_LINK_LATENCY_MEDIUM_VC1:CollectdMetadata("nvswitch_link_latency_medium_vc1", "value:GAUGE:U:U"),
DCGM_FI_DEV_NVSWITCH_LINK_LATENCY_MEDIUM_VC2:CollectdMetadata("nvswitch_link_latency_medium_vc2", "value:GAUGE:U:U"),
DCGM_FI_DEV_NVSWITCH_LINK_LATENCY_MEDIUM_VC3:CollectdMetadata("nvswitch_link_latency_medium_vc3", "value:GAUGE:U:U"),
DCGM_FI_DEV_NVSWITCH_LINK_LATENCY_HIGH_VC0:CollectdMetadata("nvswitch_link_latency_high_vc0", "value:GAUGE:U:U"),
DCGM_FI_DEV_NVSWITCH_LINK_LATENCY_HIGH_VC1:CollectdMetadata("nvswitch_link_latency_high_vc1", "value:GAUGE:U:U"),
DCGM_FI_DEV_NVSWITCH_LINK_LATENCY_HIGH_VC2:CollectdMetadata("nvswitch_link_latency_high_vc2", "value:GAUGE:U:U"),
DCGM_FI_DEV_NVSWITCH_LINK_LATENCY_HIGH_VC3:CollectdMetadata("nvswitch_link_latency_high_vc3", "value:GAUGE:U:U"),
DCGM_FI_DEV_NVSWITCH_LINK_LATENCY_PANIC_VC0:CollectdMetadata("nvswitch_link_latency_panic_vc0", "value:GAUGE:U:U"),
DCGM_FI_DEV_NVSWITCH_LINK_LATENCY_PANIC_VC1:CollectdMetadata("nvswitch_link_latency_panic_vc1", "value:GAUGE:U:U"),
DCGM_FI_DEV_NVSWITCH_LINK_LATENCY_PANIC_VC2:CollectdMetadata("nvswitch_link_latency_panic_vc2", "value:GAUGE:U:U"),
DCGM_FI_DEV_NVSWITCH_LINK_LATENCY_PANIC_VC3:CollectdMetadata("nvswitch_link_latency_panic_vc3", "value:GAUGE:U:U"),
DCGM_FI_DEV_NVSWITCH_LINK_LATENCY_COUNT_VC0:CollectdMetadata("nvswitch_link_latency_count_vc0", "value:GAUGE:U:U"),
DCGM_FI_DEV_NVSWITCH_LINK_LATENCY_COUNT_VC1:CollectdMetadata("nvswitch_link_latency_count_vc1", "value:GAUGE:U:U"),
DCGM_FI_DEV_NVSWITCH_LINK_LATENCY_COUNT_VC2:CollectdMetadata("nvswitch_link_latency_count_vc2", "value:GAUGE:U:U"),
DCGM_FI_DEV_NVSWITCH_LINK_LATENCY_COUNT_VC3:CollectdMetadata("nvswitch_link_latency_count_vc3", "value:GAUGE:U:U"),
DCGM_FI_DEV_NVSWITCH_LINK_CRC_ERRORS_LANE0:CollectdMetadata("nvswitch_link_crc_errors_lane0", "value:GAUGE:U:U"),
DCGM_FI_DEV_NVSWITCH_LINK_CRC_ERRORS_LANE1:CollectdMetadata("nvswitch_link_crc_errors_lane1", "value:GAUGE:U:U"),
DCGM_FI_DEV_NVSWITCH_LINK_CRC_ERRORS_LANE2:CollectdMetadata("nvswitch_link_crc_errors_lane2", "value:GAUGE:U:U"),
DCGM_FI_DEV_NVSWITCH_LINK_CRC_ERRORS_LANE3:CollectdMetadata("nvswitch_link_crc_errors_lane3", "value:GAUGE:U:U"),
DCGM_FI_DEV_NVSWITCH_LINK_ECC_ERRORS_LANE0:CollectdMetadata("nvswitch_link_ecc_errors_lane0", "value:GAUGE:U:U"),
DCGM_FI_DEV_NVSWITCH_LINK_ECC_ERRORS_LANE1:CollectdMetadata("nvswitch_link_ecc_errors_lane1", "value:GAUGE:U:U"),
DCGM_FI_DEV_NVSWITCH_LINK_ECC_ERRORS_LANE2:CollectdMetadata("nvswitch_link_ecc_errors_lane2", "value:GAUGE:U:U"),
DCGM_FI_DEV_NVSWITCH_LINK_ECC_ERRORS_LANE3:CollectdMetadata("nvswitch_link_ecc_errors_lane3", "value:GAUGE:U:U"),
DCGM_FI_DEV_NVSWITCH_FATAL_ERRORS: CollectdMetadata("nvswitch_fatal_error", "value:GAUGE:U:U"),
DCGM_FI_DEV_NVSWITCH_NON_FATAL_ERRORS: CollectdMetadata("nvswitch_non_fatal_error", "value:GAUGE:U:U"),
DCGM_FI_DEV_NVSWITCH_TEMPERATURE_CURRENT: CollectdMetadata("nvswitch_temperature_current", "value:GAUGE:U:U"),
DCGM_FI_DEV_NVSWITCH_TEMPERATURE_LIMIT_SLOWDOWN: CollectdMetadata("nvswitch_temperature_limit_slowdown", "value:GAUGE:U:U"),
DCGM_FI_DEV_NVSWITCH_TEMPERATURE_LIMIT_SHUTDOWN: CollectdMetadata("nvswitch_temperature_limit_shutdown", "value:GAUGE:U:U"),
DCGM_FI_DEV_NVSWITCH_THROUGHPUT_TX: CollectdMetadata("nvswitch_throughput_tx", "value:GAUGE:U:U"),
DCGM_FI_DEV_NVSWITCH_THROUGHPUT_RX: CollectdMetadata("nvswitch_throughput_rx", "value:GAUGE:U:U"),
DCGM_FI_DEV_CUDA_COMPUTE_CAPABILITY: CollectdMetadata("cuda_compute_capability", "value:GAUGE:U:U"),
DCGM_FI_PROF_GR_ENGINE_ACTIVE: CollectdMetadata("gr_engine_active", "value:GAUGE:0.0:1.0", True),
DCGM_FI_PROF_SM_ACTIVE: CollectdMetadata("sm_active", "value:GAUGE:0.0:1.0", True),
DCGM_FI_PROF_SM_OCCUPANCY: CollectdMetadata("sm_occupancy", "value:GAUGE:0:U", True),
DCGM_FI_PROF_PIPE_TENSOR_ACTIVE: CollectdMetadata("tensor_active", "value:GAUGE:0.0:1.0", True),
DCGM_FI_PROF_DRAM_ACTIVE: CollectdMetadata("dram_active", "value:GAUGE:0.0:1.0", True),
DCGM_FI_PROF_PIPE_FP64_ACTIVE: CollectdMetadata("fp64_active", "value:GAUGE:U:U"),
DCGM_FI_PROF_PIPE_FP32_ACTIVE: CollectdMetadata("fp32_active", "value:GAUGE:U:U"),
DCGM_FI_PROF_PIPE_FP16_ACTIVE: CollectdMetadata("fp16_active", "value:GAUGE:U:U"),
DCGM_FI_PROF_PCIE_TX_BYTES: CollectdMetadata("pcie_tx_bytes", "value:GAUGE:U:U"),
DCGM_FI_PROF_PCIE_RX_BYTES: CollectdMetadata("pcie_rx_bytes", "value:GAUGE:U:U"),
DCGM_FI_PROF_NVLINK_TX_BYTES: CollectdMetadata("nvlink_tx_bytes", "value:GAUGE:U:U"),
DCGM_FI_PROF_NVLINK_RX_BYTES: CollectdMetadata("nvlink_rx_bytes", "value:GAUGE:U:U"),
DCGM_FI_PROF_PIPE_TENSOR_IMMA_ACTIVE: CollectdMetadata("tensor_imma_active", "value:GAUGE:0.0:1.0", True),
DCGM_FI_PROF_PIPE_TENSOR_HMMA_ACTIVE: CollectdMetadata("tensor_hmma_active", "value:GAUGE:0.0:1.0", True),
}
__fieldDict = None
def GenerateCollectdTypesDB():
length = max(map(lambda x: len(x.name) if x else 0, CollectdMetadataDict.values()))
fmt = "{0:<" + str(length) + "}"
fail = False
for item in filter(None, CollectdMetadataDict.values()):
item_list = item.kind.split(':')
# Some rudimentary syntax checking.
if len(item_list) != 4:
sys.stderr.write('Item ' + item.name + ' has wrong number of collectd type fields - four required.\n')
fail = True
if item_list[1] not in ['GAUGE', 'COUNTER', 'DERIVE', 'ABSOLUTE']:
sys.stderr.write('Item ' + item.name + ' should be one of GAUGE, COUNTER, DERIVE, ABSOLUTE.\n')
fail = True
# We check this so we can enumerate all dcgm fields for possible
# inclusion, even if some are not (yet) formally supported.
if item.used:
print(fmt.format(item.name), item.kind)
if fail:
exit("Failed on db.types table syntax errors.\n")
def GetFieldByName(name):
global __fieldDict
if name.isnumeric():
item = CollectdMetadataDict[int(name)]
if item == None:
return -1
if item.used:
return int(name)
return -1
if __fieldDict == None:
__fieldDict = {}
for key in CollectdMetadataDict:
item = CollectdMetadataDict[key]
if item != None:
__fieldDict[item.name] = key;
if name not in __fieldDict.keys():
return -1
return __fieldDict[name]
if __name__ == '__main__':
GenerateCollectdTypesDB()
| DCGM-master | testing/python3/dcgm_fields_collectd.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
# Python bindings for the internal API of DCGM library (dcgm_fields_internal.hpp)
##
from ctypes import *
from ctypes.util import find_library
import dcgm_structs
# Provides access to functions
dcgmFP = dcgm_structs._dcgmGetFunctionPointer
#internal-only fields
DCGM_FI_DEV_MEM_COPY_UTIL_SAMPLES = 210 #Memory utilization samples
DCGM_FI_DEV_GPU_UTIL_SAMPLES = 211 #SM utilization samples
DCGM_FI_DEV_GRAPHICS_PIDS = 220 #Graphics processes running on the GPU.
DCGM_FI_DEV_COMPUTE_PIDS = 221 #Compute processes running on the GPU.
| DCGM-master | testing/python3/dcgm_fields_internal.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
# Python bindings for the internal API of DCGM library (dcgm_fields.h)
##
from ctypes import *
from ctypes.util import find_library
import dcgm_structs
# Provides access to functions
dcgmFP = dcgm_structs._dcgmGetFunctionPointer
# Field Types are a single byte. List these in ASCII order
DCGM_FT_BINARY = 'b' # Blob of binary data representing a structure
DCGM_FT_DOUBLE = 'd' # 8-byte double precision
DCGM_FT_INT64 = 'i' # 8-byte signed integer
DCGM_FT_STRING = 's' # Null-terminated ASCII Character string
DCGM_FT_TIMESTAMP = 't' # 8-byte signed integer usec since 1970
# Field scope. What are these fields associated with
DCGM_FS_GLOBAL = 0 # Field is global (ex: driver version)
DCGM_FS_ENTITY = 1 # Field is associated with an entity (GPU, VGPU, ..etc)
DCGM_FS_DEVICE = DCGM_FS_ENTITY # Field is associated with a device. Deprecated. Use DCGM_FS_ENTITY
# DCGM_FI_DEV_CLOCK_THROTTLE_REASONS is a bitmap of why the clock is throttled.
# These macros are masks for relevant throttling, and are a 1:1 map to the NVML
# reasons documented in nvml.h. The notes for the header are copied blow:
# Nothing is running on the GPU and the clocks are dropping to Idle state
DCGM_CLOCKS_THROTTLE_REASON_GPU_IDLE = 0x0000000000000001
# GPU clocks are limited by current setting of applications clocks
DCGM_CLOCKS_THROTTLE_REASON_CLOCKS_SETTING = 0x0000000000000002
# SW Power Scaling algorithm is reducing the clocks below requested clocks
DCGM_CLOCKS_THROTTLE_REASON_SW_POWER_CAP = 0x0000000000000004
# HW Slowdown (reducing the core clocks by a factor of 2 or more) is engaged
#
# This is an indicator of:
# - temperature being too high
# - External Power Brake Assertion is triggered (e.g. by the system power supply)
# - Power draw is too high and Fast Trigger protection is reducing the clocks
# - May be also reported during PState or clock change
# - This behavior may be removed in a later release.
DCGM_CLOCKS_THROTTLE_REASON_HW_SLOWDOWN = 0x0000000000000008
# Sync Boost
#
# This GPU has been added to a Sync boost group with nvidia-smi or DCGM in
# order to maximize performance per watt. All GPUs in the sync boost group
# will boost to the minimum possible clocks across the entire group. Look at
# the throttle reasons for other GPUs in the system to see why those GPUs are
# holding this one at lower clocks.
DCGM_CLOCKS_THROTTLE_REASON_SYNC_BOOST = 0x0000000000000010
# SW Thermal Slowdown
#
# This is an indicator of one or more of the following:
# - Current GPU temperature above the GPU Max Operating Temperature
# - Current memory temperature above the Memory Max Operating Temperature
DCGM_CLOCKS_THROTTLE_REASON_SW_THERMAL = 0x0000000000000020
# HW Thermal Slowdown (reducing the core clocks by a factor of 2 or more) is engaged
#
# This is an indicator of:
# - temperature being too high
DCGM_CLOCKS_THROTTLE_REASON_HW_THERMAL = 0x0000000000000040
# HW Power Brake Slowdown (reducing the core clocks by a factor of 2 or more) is engaged
#
# This is an indicator of:
# - External Power Brake Assertion being triggered (e.g. by the system power supply)
DCGM_CLOCKS_THROTTLE_REASON_HW_POWER_BRAKE = 0x0000000000000080
# GPU clocks are limited by current setting of Display clocks
DCGM_CLOCKS_THROTTLE_REASON_DISPLAY_CLOCKS = 0x0000000000000100
#Field entity groups. Which type of entity is this field or field value associated with
DCGM_FE_NONE = 0 # Field is not associated with an entity. Field scope should be DCGM_FS_GLOBAL
DCGM_FE_GPU = 1 # Field is associated with a GPU entity
DCGM_FE_VGPU = 2 # Field is associated with a VGPU entity
DCGM_FE_SWITCH = 3 # Field is associated with a Switch entity
DCGM_FE_GPU_I = 4 # Field is associated with a GPU Instance entity
DCGM_FE_GPU_CI = 5 # Field is associated with a GPU Compute Instance entity
DCGM_FE_LINK = 6 # Field is associated with an NVLINK
c_dcgm_field_eid_t = c_uint32 #Represents an identifier for an entity within a field entity. For instance, this is the gpuId for DCGM_FE_GPU.
#System attributes
DCGM_FI_UNKNOWN = 0
DCGM_FI_DRIVER_VERSION = 1 #Driver Version
DCGM_FI_NVML_VERSION = 2 #Underlying NVML version
DCGM_FI_PROCESS_NAME = 3 #Process Name. Will be nv-hostengine or your process's name in embedded mode
DCGM_FI_DEV_COUNT = 4 #Number of Devices on the node
DCGM_FI_CUDA_DRIVER_VERSION = 5 #Cuda Driver Version as an integer. CUDA 11.1 = 11100
#Device attributes
DCGM_FI_DEV_NAME = 50 #Name of the GPU device
DCGM_FI_DEV_BRAND = 51 #Device Brand
DCGM_FI_DEV_NVML_INDEX = 52 #NVML index of this GPU
DCGM_FI_DEV_SERIAL = 53 #Device Serial Number
DCGM_FI_DEV_UUID = 54 #UUID corresponding to the device
DCGM_FI_DEV_MINOR_NUMBER = 55 #Device node minor number /dev/nvidia#
DCGM_FI_DEV_OEM_INFOROM_VER = 56 #OEM inforom version
DCGM_FI_DEV_PCI_BUSID = 57 #PCI attributes for the device
DCGM_FI_DEV_PCI_COMBINED_ID = 58 #The combined 16-bit device id and 16-bit vendor id
DCGM_FI_DEV_PCI_SUBSYS_ID = 59 #The 32-bit Sub System Device ID
DCGM_FI_GPU_TOPOLOGY_PCI = 60 #Topology of all GPUs on the system via PCI (static)
DCGM_FI_GPU_TOPOLOGY_NVLINK = 61 #Topology of all GPUs on the system via NVLINK (static)
DCGM_FI_GPU_TOPOLOGY_AFFINITY = 62 #Affinity of all GPUs on the system (static)
DCGM_FI_DEV_CUDA_COMPUTE_CAPABILITY = 63 #Cuda compute capability for the device
DCGM_FI_DEV_COMPUTE_MODE = 65 #Compute mode for the device
DCGM_FI_DEV_PERSISTENCE_MODE = 66 #Persistence mode for the device
DCGM_FI_DEV_MIG_MODE = 67 #MIG mode for the device
DCGM_FI_DEV_CUDA_VISIBLE_DEVICES_STR = 68 #String value for CUDA_VISIBLE_DEVICES for the device
DCGM_FI_DEV_MIG_MAX_SLICES = 69 #The maximum number of slices this GPU supports
DCGM_FI_DEV_CPU_AFFINITY_0 = 70 #Device CPU affinity. part 1/8 = cpus 0 - 63
DCGM_FI_DEV_CPU_AFFINITY_1 = 71 #Device CPU affinity. part 1/8 = cpus 64 - 127
DCGM_FI_DEV_CPU_AFFINITY_2 = 72 #Device CPU affinity. part 2/8 = cpus 128 - 191
DCGM_FI_DEV_CPU_AFFINITY_3 = 73 #Device CPU affinity. part 3/8 = cpus 192 - 255
DCGM_FI_DEV_CC_MODE = 74 #Device CC/APM mode
DCGM_FI_DEV_MIG_ATTRIBUTES = 75 #MIG device attributes
DCGM_FI_DEV_MIG_GI_INFO = 76 #GPU instance profile information
DCGM_FI_DEV_MIG_CI_INFO = 77 #Compute instance profile information
DCGM_FI_DEV_ECC_INFOROM_VER = 80 #ECC inforom version
DCGM_FI_DEV_POWER_INFOROM_VER = 81 #Power management object inforom version
DCGM_FI_DEV_INFOROM_IMAGE_VER = 82 #Inforom image version
DCGM_FI_DEV_INFOROM_CONFIG_CHECK = 83 #Inforom configuration checksum
DCGM_FI_DEV_INFOROM_CONFIG_VALID = 84 #Reads the infoROM from the flash and verifies the checksums
DCGM_FI_DEV_VBIOS_VERSION = 85 #VBIOS version of the device
DCGM_FI_DEV_MEM_AFFINITY_0 = 86 #Device MEM affinity. part 1/4 = nodes 0 - 63
DCGM_FI_DEV_MEM_AFFINITY_1 = 87 #Device MEM affinity. part 1/4 = nodes 64 - 127
DCGM_FI_DEV_MEM_AFFINITY_2 = 88 #Device MEM affinity. part 1/4 = nodes 128 - 191
DCGM_FI_DEV_MEM_AFFINITY_3 = 89 #Device MEM affinity. part 1/4 = nodes 192 - 255
DCGM_FI_DEV_BAR1_TOTAL = 90 #Total BAR1 of the GPU
DCGM_FI_SYNC_BOOST = 91 #Deprecated - Sync boost settings on the node
DCGM_FI_DEV_BAR1_USED = 92 #Used BAR1 of the GPU in MB
DCGM_FI_DEV_BAR1_FREE = 93 #Free BAR1 of the GPU in MB
#Clocks and power
DCGM_FI_DEV_SM_CLOCK = 100 #SM clock for the device
DCGM_FI_DEV_MEM_CLOCK = 101 #Memory clock for the device
DCGM_FI_DEV_VIDEO_CLOCK = 102 #Video encoder/decoder clock for the device
DCGM_FI_DEV_APP_SM_CLOCK = 110 #SM Application clocks
DCGM_FI_DEV_APP_MEM_CLOCK = 111 #Memory Application clocks
DCGM_FI_DEV_CLOCK_THROTTLE_REASONS = 112 #Current clock throttle reasons (bitmask of DCGM_CLOCKS_THROTTLE_REASON_*)
DCGM_FI_DEV_MAX_SM_CLOCK = 113 #Maximum supported SM clock for the device
DCGM_FI_DEV_MAX_MEM_CLOCK = 114 #Maximum supported Memory clock for the device
DCGM_FI_DEV_MAX_VIDEO_CLOCK = 115 #Maximum supported Video encoder/decoder clock for the device
DCGM_FI_DEV_AUTOBOOST = 120 #Auto-boost for the device (1 = enabled. 0 = disabled)
DCGM_FI_DEV_SUPPORTED_CLOCKS = 130 #Supported clocks for the device
DCGM_FI_DEV_MEMORY_TEMP = 140 #Memory temperature for the device
DCGM_FI_DEV_GPU_TEMP = 150 #Current temperature readings for the device, in degrees C
DCGM_FI_DEV_MEM_MAX_OP_TEMP = 151 #Maximum operating temperature for the memory of this GPU
DCGM_FI_DEV_GPU_MAX_OP_TEMP = 152 #Maximum operating temperature for this GPU
DCGM_FI_DEV_POWER_USAGE = 155 #Power usage for the device in Watts
DCGM_FI_DEV_TOTAL_ENERGY_CONSUMPTION = 156 #Total energy consumption for the GPU in mJ since the driver was last reloaded
DCGM_FI_DEV_POWER_USAGE_INSTANT = 157 #Current instantaneous power usage of the device in Watts
DCGM_FI_DEV_SLOWDOWN_TEMP = 158 #Slowdown temperature for the device
DCGM_FI_DEV_SHUTDOWN_TEMP = 159 #Shutdown temperature for the device
DCGM_FI_DEV_POWER_MGMT_LIMIT = 160 #Current Power limit for the device
DCGM_FI_DEV_POWER_MGMT_LIMIT_MIN = 161 #Minimum power management limit for the device
DCGM_FI_DEV_POWER_MGMT_LIMIT_MAX = 162 #Maximum power management limit for the device
DCGM_FI_DEV_POWER_MGMT_LIMIT_DEF = 163 #Default power management limit for the device
DCGM_FI_DEV_ENFORCED_POWER_LIMIT = 164 #Effective power limit that the driver enforces after taking into account all limiters
DCGM_FI_DEV_PSTATE = 190 #Performance state (P-State) 0-15. 0=highest
DCGM_FI_DEV_FAN_SPEED = 191 #Fan speed for the device in percent 0-100
#Device utilization and telemetry
DCGM_FI_DEV_PCIE_TX_THROUGHPUT = 200 #Deprecated - PCIe Tx utilization information
DCGM_FI_DEV_PCIE_RX_THROUGHPUT = 201 #Deprecated - PCIe Rx utilization information
DCGM_FI_DEV_PCIE_REPLAY_COUNTER = 202 #PCIe replay counter
DCGM_FI_DEV_GPU_UTIL = 203 #GPU Utilization
DCGM_FI_DEV_MEM_COPY_UTIL = 204 #Memory Utilization
DCGM_FI_DEV_ACCOUNTING_DATA = 205 #Process accounting stats
DCGM_FI_DEV_ENC_UTIL = 206 #Encoder utilization
DCGM_FI_DEV_DEC_UTIL = 207 #Decoder utilization
# Fields 210, 211, 220, and 221 are internal-only. see dcgm_fields_internal.py
DCGM_FI_DEV_XID_ERRORS = 230 #XID errors. The value is the specific XID error
DCGM_FI_DEV_PCIE_MAX_LINK_GEN = 235 #PCIe Max Link Generation
DCGM_FI_DEV_PCIE_MAX_LINK_WIDTH = 236 #PCIe Max Link Width
DCGM_FI_DEV_PCIE_LINK_GEN = 237 #PCIe Current Link Generation
DCGM_FI_DEV_PCIE_LINK_WIDTH = 238 #PCIe Current Link Width
#Violation counters
DCGM_FI_DEV_POWER_VIOLATION = 240 #Power Violation time in usec
DCGM_FI_DEV_THERMAL_VIOLATION = 241 #Thermal Violation time in usec
DCGM_FI_DEV_SYNC_BOOST_VIOLATION = 242 #Sync Boost Violation time in usec
DCGM_FI_DEV_BOARD_LIMIT_VIOLATION = 243 #Board Limit Violation time in usec.
DCGM_FI_DEV_LOW_UTIL_VIOLATION = 244 #Low Utilization Violation time in usec.
DCGM_FI_DEV_RELIABILITY_VIOLATION = 245 #Reliability Violation time in usec.
DCGM_FI_DEV_TOTAL_APP_CLOCKS_VIOLATION = 246 #App Clocks Violation time in usec.
DCGM_FI_DEV_TOTAL_BASE_CLOCKS_VIOLATION = 247 #Base Clocks Violation time in usec.
#Framebuffer usage
DCGM_FI_DEV_FB_TOTAL = 250 #Total framebuffer memory in MB
DCGM_FI_DEV_FB_FREE = 251 #Total framebuffer used in MB
DCGM_FI_DEV_FB_USED = 252 #Total framebuffer free in MB
DCGM_FI_DEV_FB_RESERVED = 253 #Total framebuffer reserved in MB
#Device ECC Counters
DCGM_FI_DEV_ECC_CURRENT = 300 #Current ECC mode for the device
DCGM_FI_DEV_ECC_PENDING = 301 #Pending ECC mode for the device
DCGM_FI_DEV_ECC_SBE_VOL_TOTAL = 310 #Total single bit volatile ecc errors
DCGM_FI_DEV_ECC_DBE_VOL_TOTAL = 311 #Total double bit volatile ecc errors
DCGM_FI_DEV_ECC_SBE_AGG_TOTAL = 312 #Total single bit aggregate (persistent) ecc errors
DCGM_FI_DEV_ECC_DBE_AGG_TOTAL = 313 #Total double bit aggregate (persistent) ecc errors
DCGM_FI_DEV_ECC_SBE_VOL_L1 = 314 #L1 cache single bit volatile ecc errors
DCGM_FI_DEV_ECC_DBE_VOL_L1 = 315 #L1 cache double bit volatile ecc errors
DCGM_FI_DEV_ECC_SBE_VOL_L2 = 316 #L2 cache single bit volatile ecc errors
DCGM_FI_DEV_ECC_DBE_VOL_L2 = 317 #L2 cache double bit volatile ecc errors
DCGM_FI_DEV_ECC_SBE_VOL_DEV = 318 #Device memory single bit volatile ecc errors
DCGM_FI_DEV_ECC_DBE_VOL_DEV = 319 #Device memory double bit volatile ecc errors
DCGM_FI_DEV_ECC_SBE_VOL_REG = 320 #Register file single bit volatile ecc errors
DCGM_FI_DEV_ECC_DBE_VOL_REG = 321 #Register file double bit volatile ecc errors
DCGM_FI_DEV_ECC_SBE_VOL_TEX = 322 #Texture memory single bit volatile ecc errors
DCGM_FI_DEV_ECC_DBE_VOL_TEX = 323 #Texture memory double bit volatile ecc errors
DCGM_FI_DEV_ECC_SBE_AGG_L1 = 324 #L1 cache single bit aggregate (persistent) ecc errors
DCGM_FI_DEV_ECC_DBE_AGG_L1 = 325 #L1 cache double bit aggregate (persistent) ecc errors
DCGM_FI_DEV_ECC_SBE_AGG_L2 = 326 #L2 cache single bit aggregate (persistent) ecc errors
DCGM_FI_DEV_ECC_DBE_AGG_L2 = 327 #L2 cache double bit aggregate (persistent) ecc errors
DCGM_FI_DEV_ECC_SBE_AGG_DEV = 328 #Device memory single bit aggregate (persistent) ecc errors
DCGM_FI_DEV_ECC_DBE_AGG_DEV = 329 #Device memory double bit aggregate (persistent) ecc errors
DCGM_FI_DEV_ECC_SBE_AGG_REG = 330 #Register File single bit aggregate (persistent) ecc errors
DCGM_FI_DEV_ECC_DBE_AGG_REG = 331 #Register File double bit aggregate (persistent) ecc errors
DCGM_FI_DEV_ECC_SBE_AGG_TEX = 332 #Texture memory single bit aggregate (persistent) ecc errors
DCGM_FI_DEV_ECC_DBE_AGG_TEX = 333 #Texture memory double bit aggregate (persistent) ecc errors
# Remap availability histogram for each memory bank on the GPU.
DCGM_FI_DEV_BANKS_REMAP_ROWS_AVAIL_MAX = 385
DCGM_FI_DEV_BANKS_REMAP_ROWS_AVAIL_HIGH = 386
DCGM_FI_DEV_BANKS_REMAP_ROWS_AVAIL_PARTIAL = 387
DCGM_FI_DEV_BANKS_REMAP_ROWS_AVAIL_LOW = 388
DCGM_FI_DEV_BANKS_REMAP_ROWS_AVAIL_NONE = 389
DCGM_FI_DEV_RETIRED_SBE = 390 #Number of retired pages because of single bit errors
DCGM_FI_DEV_RETIRED_DBE = 391 #Number of retired pages because of double bit errors
DCGM_FI_DEV_RETIRED_PENDING = 392 #Number of pages pending retirement
#Row remapper fields (Ampere and newer)
DCGM_FI_DEV_UNCORRECTABLE_REMAPPED_ROWS = 393 #Number of remapped rows for uncorrectable errors
DCGM_FI_DEV_CORRECTABLE_REMAPPED_ROWS = 394 #Number of remapped rows for correctable errors
DCGM_FI_DEV_ROW_REMAP_FAILURE = 395 #Whether remapping of rows has failed
DCGM_FI_DEV_ROW_REMAP_PENDING = 396 #Whether remapping of rows is pending
#Device NvLink Bandwidth and Error Counters
DCGM_FI_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L0 = 400 #NV Link flow control CRC Error Counter for Lane 0
DCGM_FI_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L1 = 401 #NV Link flow control CRC Error Counter for Lane 1
DCGM_FI_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L2 = 402 #NV Link flow control CRC Error Counter for Lane 2
DCGM_FI_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L3 = 403 #NV Link flow control CRC Error Counter for Lane 3
DCGM_FI_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L4 = 404 #NV Link flow control CRC Error Counter for Lane 4
DCGM_FI_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L5 = 405 #NV Link flow control CRC Error Counter for Lane 5
DCGM_FI_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_TOTAL = 409 #NV Link flow control CRC Error Counter total for all Lanes
DCGM_FI_DEV_NVLINK_CRC_DATA_ERROR_COUNT_L0 = 410 #NV Link data CRC Error Counter for Lane 0
DCGM_FI_DEV_NVLINK_CRC_DATA_ERROR_COUNT_L1 = 411 #NV Link data CRC Error Counter for Lane 1
DCGM_FI_DEV_NVLINK_CRC_DATA_ERROR_COUNT_L2 = 412 #NV Link data CRC Error Counter for Lane 2
DCGM_FI_DEV_NVLINK_CRC_DATA_ERROR_COUNT_L3 = 413 #NV Link data CRC Error Counter for Lane 3
DCGM_FI_DEV_NVLINK_CRC_DATA_ERROR_COUNT_L4 = 414 #NV Link data CRC Error Counter for Lane 4
DCGM_FI_DEV_NVLINK_CRC_DATA_ERROR_COUNT_L5 = 415 #NV Link data CRC Error Counter for Lane 5
DCGM_FI_DEV_NVLINK_CRC_DATA_ERROR_COUNT_TOTAL = 419 #NV Link data CRC Error Counter total for all Lanes
DCGM_FI_DEV_NVLINK_REPLAY_ERROR_COUNT_L0 = 420 #NV Link Replay Error Counter for Lane 0
DCGM_FI_DEV_NVLINK_REPLAY_ERROR_COUNT_L1 = 421 #NV Link Replay Error Counter for Lane 1
DCGM_FI_DEV_NVLINK_REPLAY_ERROR_COUNT_L2 = 422 #NV Link Replay Error Counter for Lane 2
DCGM_FI_DEV_NVLINK_REPLAY_ERROR_COUNT_L3 = 423 #NV Link Replay Error Counter for Lane 3
DCGM_FI_DEV_NVLINK_REPLAY_ERROR_COUNT_L4 = 424 #NV Link Replay Error Counter for Lane 4
DCGM_FI_DEV_NVLINK_REPLAY_ERROR_COUNT_L5 = 425 #NV Link Replay Error Counter for Lane 3
DCGM_FI_DEV_NVLINK_REPLAY_ERROR_COUNT_TOTAL = 429 #NV Link Replay Error Counter total for all Lanes
DCGM_FI_DEV_NVLINK_RECOVERY_ERROR_COUNT_L0 = 430 #NV Link Recovery Error Counter for Lane 0
DCGM_FI_DEV_NVLINK_RECOVERY_ERROR_COUNT_L1 = 431 #NV Link Recovery Error Counter for Lane 1
DCGM_FI_DEV_NVLINK_RECOVERY_ERROR_COUNT_L2 = 432 #NV Link Recovery Error Counter for Lane 2
DCGM_FI_DEV_NVLINK_RECOVERY_ERROR_COUNT_L3 = 433 #NV Link Recovery Error Counter for Lane 3
DCGM_FI_DEV_NVLINK_RECOVERY_ERROR_COUNT_L4 = 434 #NV Link Recovery Error Counter for Lane 4
DCGM_FI_DEV_NVLINK_RECOVERY_ERROR_COUNT_L5 = 435 #NV Link Recovery Error Counter for Lane 5
DCGM_FI_DEV_NVLINK_RECOVERY_ERROR_COUNT_TOTAL = 439 #NV Link Recovery Error Counter total for all Lanes
DCGM_FI_DEV_NVLINK_BANDWIDTH_L0 = 440 #NV Link Bandwidth Counter for Lane 0
DCGM_FI_DEV_NVLINK_BANDWIDTH_L1 = 441 #NV Link Bandwidth Counter for Lane 1
DCGM_FI_DEV_NVLINK_BANDWIDTH_L2 = 442 #NV Link Bandwidth Counter for Lane 2
DCGM_FI_DEV_NVLINK_BANDWIDTH_L3 = 443 #NV Link Bandwidth Counter for Lane 3
DCGM_FI_DEV_NVLINK_BANDWIDTH_L4 = 444 #NV Link Bandwidth Counter for Lane 4
DCGM_FI_DEV_NVLINK_BANDWIDTH_L5 = 445 #NV Link Bandwidth Counter for Lane 5
DCGM_FI_DEV_NVLINK_BANDWIDTH_TOTAL = 449 #NV Link Bandwidth Counter total for all Lanes
DCGM_FI_DEV_GPU_NVLINK_ERRORS = 450 #GPU NVLink error information
DCGM_FI_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L6 = 451
DCGM_FI_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L7 = 452
DCGM_FI_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L8 = 453
DCGM_FI_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L9 = 454
DCGM_FI_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L10 = 455
DCGM_FI_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L11 = 456
DCGM_FI_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L12 = 406
DCGM_FI_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L13 = 407
DCGM_FI_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L14 = 408
DCGM_FI_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L15 = 481
DCGM_FI_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L16 = 482
DCGM_FI_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L17 = 483
DCGM_FI_DEV_NVLINK_CRC_DATA_ERROR_COUNT_L6 = 457
DCGM_FI_DEV_NVLINK_CRC_DATA_ERROR_COUNT_L7 = 458
DCGM_FI_DEV_NVLINK_CRC_DATA_ERROR_COUNT_L8 = 459
DCGM_FI_DEV_NVLINK_CRC_DATA_ERROR_COUNT_L9 = 460
DCGM_FI_DEV_NVLINK_CRC_DATA_ERROR_COUNT_L10 = 461
DCGM_FI_DEV_NVLINK_CRC_DATA_ERROR_COUNT_L11 = 462
DCGM_FI_DEV_NVLINK_CRC_DATA_ERROR_COUNT_L12 = 416
DCGM_FI_DEV_NVLINK_CRC_DATA_ERROR_COUNT_L13 = 417
DCGM_FI_DEV_NVLINK_CRC_DATA_ERROR_COUNT_L14 = 418
DCGM_FI_DEV_NVLINK_CRC_DATA_ERROR_COUNT_L15 = 484
DCGM_FI_DEV_NVLINK_CRC_DATA_ERROR_COUNT_L16 = 485
DCGM_FI_DEV_NVLINK_CRC_DATA_ERROR_COUNT_L17 = 486
DCGM_FI_DEV_NVLINK_REPLAY_ERROR_COUNT_L6 = 463
DCGM_FI_DEV_NVLINK_REPLAY_ERROR_COUNT_L7 = 464
DCGM_FI_DEV_NVLINK_REPLAY_ERROR_COUNT_L8 = 465
DCGM_FI_DEV_NVLINK_REPLAY_ERROR_COUNT_L9 = 466
DCGM_FI_DEV_NVLINK_REPLAY_ERROR_COUNT_L10 = 467
DCGM_FI_DEV_NVLINK_REPLAY_ERROR_COUNT_L11 = 468
DCGM_FI_DEV_NVLINK_REPLAY_ERROR_COUNT_L12 = 426
DCGM_FI_DEV_NVLINK_REPLAY_ERROR_COUNT_L13 = 427
DCGM_FI_DEV_NVLINK_REPLAY_ERROR_COUNT_L14 = 428
DCGM_FI_DEV_NVLINK_REPLAY_ERROR_COUNT_L15 = 487
DCGM_FI_DEV_NVLINK_REPLAY_ERROR_COUNT_L16 = 488
DCGM_FI_DEV_NVLINK_REPLAY_ERROR_COUNT_L17 = 489
DCGM_FI_DEV_NVLINK_RECOVERY_ERROR_COUNT_L6 = 469
DCGM_FI_DEV_NVLINK_RECOVERY_ERROR_COUNT_L7 = 470
DCGM_FI_DEV_NVLINK_RECOVERY_ERROR_COUNT_L8 = 471
DCGM_FI_DEV_NVLINK_RECOVERY_ERROR_COUNT_L9 = 472
DCGM_FI_DEV_NVLINK_RECOVERY_ERROR_COUNT_L10 = 473
DCGM_FI_DEV_NVLINK_RECOVERY_ERROR_COUNT_L11 = 474
DCGM_FI_DEV_NVLINK_RECOVERY_ERROR_COUNT_L12 = 436
DCGM_FI_DEV_NVLINK_RECOVERY_ERROR_COUNT_L13 = 437
DCGM_FI_DEV_NVLINK_RECOVERY_ERROR_COUNT_L14 = 438
DCGM_FI_DEV_NVLINK_RECOVERY_ERROR_COUNT_L15 = 491
DCGM_FI_DEV_NVLINK_RECOVERY_ERROR_COUNT_L16 = 492
DCGM_FI_DEV_NVLINK_RECOVERY_ERROR_COUNT_L17 = 493
DCGM_FI_DEV_NVLINK_BANDWIDTH_L6 = 475
DCGM_FI_DEV_NVLINK_BANDWIDTH_L7 = 476
DCGM_FI_DEV_NVLINK_BANDWIDTH_L8 = 477
DCGM_FI_DEV_NVLINK_BANDWIDTH_L9 = 478
DCGM_FI_DEV_NVLINK_BANDWIDTH_L10 = 479
DCGM_FI_DEV_NVLINK_BANDWIDTH_L11 = 480
DCGM_FI_DEV_NVLINK_BANDWIDTH_L12 = 446
DCGM_FI_DEV_NVLINK_BANDWIDTH_L13 = 447
DCGM_FI_DEV_NVLINK_BANDWIDTH_L14 = 448
DCGM_FI_DEV_NVLINK_BANDWIDTH_L15 = 494
DCGM_FI_DEV_NVLINK_BANDWIDTH_L16 = 495
DCGM_FI_DEV_NVLINK_BANDWIDTH_L17 = 496
#Device Attributes associated with virtualization
DCGM_FI_DEV_VIRTUAL_MODE = 500 #Operating mode of the GPU
DCGM_FI_DEV_SUPPORTED_TYPE_INFO = 501 #Includes Count and Supported vGPU type information
DCGM_FI_DEV_CREATABLE_VGPU_TYPE_IDS = 502 #Includes Count and List of Creatable vGPU type IDs
DCGM_FI_DEV_VGPU_INSTANCE_IDS = 503 #Includes Count and List of vGPU instance IDs
DCGM_FI_DEV_VGPU_UTILIZATIONS = 504 #Utilization values for vGPUs running on the device
DCGM_FI_DEV_VGPU_PER_PROCESS_UTILIZATION = 505 #Utilization values for processes running within vGPU VMs using the device
DCGM_FI_DEV_ENC_STATS = 506 #Current encoder statistics for a given device
DCGM_FI_DEV_FBC_STATS = 507 #Statistics of current active frame buffer capture sessions on a given device
DCGM_FI_DEV_FBC_SESSIONS_INFO = 508 #Information about active frame buffer capture sessions on a target device
DCGM_FI_DEV_SUPPORTED_VGPU_TYPE_IDS = 509 #Includes Count and currently Supported vGPU types on a device
DCGM_FI_DEV_VGPU_TYPE_INFO = 510 #Includes Static info of vGPU types supported on a device
DCGM_FI_DEV_VGPU_TYPE_NAME = 511 #Includes the name of a vGPU type supported on a device
DCGM_FI_DEV_VGPU_TYPE_CLASS = 512 #Includes the class of a vGPU type supported on a device
DCGM_FI_DEV_VGPU_TYPE_LICENSE = 513 #Includes the license info for a vGPU type supported on a device
#Related to vGPU Instance IDs
DCGM_FI_DEV_VGPU_VM_ID = 520 #vGPU VM ID
DCGM_FI_DEV_VGPU_VM_NAME = 521 #vGPU VM name
DCGM_FI_DEV_VGPU_TYPE = 522 #vGPU type of the vGPU instance
DCGM_FI_DEV_VGPU_UUID = 523 #UUID of the vGPU instance
DCGM_FI_DEV_VGPU_DRIVER_VERSION = 524 #Driver version of the vGPU instance
DCGM_FI_DEV_VGPU_MEMORY_USAGE = 525 #Memory usage of the vGPU instance
DCGM_FI_DEV_VGPU_LICENSE_STATUS = 526 #License status of the vGPU
DCGM_FI_DEV_VGPU_FRAME_RATE_LIMIT = 527 #Frame rate limit of the vGPU instance
DCGM_FI_DEV_VGPU_ENC_STATS = 528 #Current encoder statistics of the vGPU instance
DCGM_FI_DEV_VGPU_ENC_SESSIONS_INFO = 529 #Information about all active encoder sessions on the vGPU instance
DCGM_FI_DEV_VGPU_FBC_STATS = 530 #Statistics of current active frame buffer capture sessions on the vGPU instance
DCGM_FI_DEV_VGPU_FBC_SESSIONS_INFO = 531 #Information about active frame buffer capture sessions on the vGPU instance
DCGM_FI_DEV_VGPU_INSTANCE_LICENSE_STATE = 532 #License state information of the vGPU instance
DCGM_FI_DEV_VGPU_PCI_ID = 533 #PCI Id of the vGPU instance
DCGM_FI_DEV_VGPU_VM_GPU_INSTANCE_ID = 534 #GPU Instance Id of the vGPU instance
#Internal fields reserve the range 600..699
#below fields related to NVSwitch
DCGM_FI_FIRST_NVSWITCH_FIELD_ID = 700 #Starting field ID of the NVSwitch instance
DCGM_FI_DEV_NVSWITCH_VOLTAGE_MVOLT = 701
DCGM_FI_DEV_NVSWITCH_CURRENT_IDDQ = 702
DCGM_FI_DEV_NVSWITCH_CURRENT_IDDQ_REV = 703
DCGM_FI_DEV_NVSWITCH_CURRENT_IDDQ_DVDD = 704
DCGM_FI_DEV_NVSWITCH_LINK_THROUGHPUT_TX = 780
DCGM_FI_DEV_NVSWITCH_LINK_THROUGHPUT_RX = 781
DCGM_FI_DEV_NVSWITCH_LINK_FATAL_ERRORS = 782
DCGM_FI_DEV_NVSWITCH_LINK_NON_FATAL_ERRORS = 783
DCGM_FI_DEV_NVSWITCH_LINK_REPLAY_ERRORS = 784
DCGM_FI_DEV_NVSWITCH_LINK_RECOVERY_ERRORS = 785
DCGM_FI_DEV_NVSWITCH_LINK_FLIT_ERRORS = 786
DCGM_FI_DEV_NVSWITCH_LINK_CRC_ERRORS = 787
DCGM_FI_DEV_NVSWITCH_LINK_ECC_ERRORS = 788
DCGM_FI_DEV_NVSWITCH_LINK_LATENCY_LOW_VC0 = 789
DCGM_FI_DEV_NVSWITCH_LINK_LATENCY_LOW_VC1 = 790
DCGM_FI_DEV_NVSWITCH_LINK_LATENCY_LOW_VC2 = 791
DCGM_FI_DEV_NVSWITCH_LINK_LATENCY_LOW_VC3 = 792
DCGM_FI_DEV_NVSWITCH_LINK_LATENCY_MEDIUM_VC0 = 793
DCGM_FI_DEV_NVSWITCH_LINK_LATENCY_MEDIUM_VC1 = 794
DCGM_FI_DEV_NVSWITCH_LINK_LATENCY_MEDIUM_VC2 = 795
DCGM_FI_DEV_NVSWITCH_LINK_LATENCY_MEDIUM_VC3 = 796
DCGM_FI_DEV_NVSWITCH_LINK_LATENCY_HIGH_VC0 = 797
DCGM_FI_DEV_NVSWITCH_LINK_LATENCY_HIGH_VC1 = 798
DCGM_FI_DEV_NVSWITCH_LINK_LATENCY_HIGH_VC2 = 799
DCGM_FI_DEV_NVSWITCH_LINK_LATENCY_HIGH_VC3 = 800
DCGM_FI_DEV_NVSWITCH_LINK_LATENCY_PANIC_VC0 = 801
DCGM_FI_DEV_NVSWITCH_LINK_LATENCY_PANIC_VC1 = 802
DCGM_FI_DEV_NVSWITCH_LINK_LATENCY_PANIC_VC2 = 803
DCGM_FI_DEV_NVSWITCH_LINK_LATENCY_PANIC_VC3 = 804
DCGM_FI_DEV_NVSWITCH_LINK_LATENCY_COUNT_VC0 = 805
DCGM_FI_DEV_NVSWITCH_LINK_LATENCY_COUNT_VC1 = 806
DCGM_FI_DEV_NVSWITCH_LINK_LATENCY_COUNT_VC2 = 807
DCGM_FI_DEV_NVSWITCH_LINK_LATENCY_COUNT_VC3 = 808
DCGM_FI_DEV_NVSWITCH_LINK_CRC_ERRORS_LANE0 = 809
DCGM_FI_DEV_NVSWITCH_LINK_CRC_ERRORS_LANE1 = 810
DCGM_FI_DEV_NVSWITCH_LINK_CRC_ERRORS_LANE2 = 811
DCGM_FI_DEV_NVSWITCH_LINK_CRC_ERRORS_LANE3 = 812
DCGM_FI_DEV_NVSWITCH_LINK_ECC_ERRORS_LANE0 = 813
DCGM_FI_DEV_NVSWITCH_LINK_ECC_ERRORS_LANE1 = 814
DCGM_FI_DEV_NVSWITCH_LINK_ECC_ERRORS_LANE2 = 815
DCGM_FI_DEV_NVSWITCH_LINK_ECC_ERRORS_LANE3 = 816
DCGM_FI_DEV_NVSWITCH_FATAL_ERRORS = 856
DCGM_FI_DEV_NVSWITCH_NON_FATAL_ERRORS = 857
DCGM_FI_DEV_NVSWITCH_TEMPERATURE_CURRENT = 858
DCGM_FI_DEV_NVSWITCH_TEMPERATURE_LIMIT_SLOWDOWN = 859
DCGM_FI_DEV_NVSWITCH_TEMPERATURE_LIMIT_SHUTDOWN = 860
DCGM_FI_DEV_NVSWITCH_THROUGHPUT_TX = 861
DCGM_FI_DEV_NVSWITCH_THROUGHPUT_RX = 862
DCGM_FI_LAST_NVSWITCH_FIELD_ID = 899 #Last field ID of the NVSwitch instance
'''
Profiling Fields
'''
DCGM_FI_PROF_GR_ENGINE_ACTIVE = 1001 #Ratio of time the graphics engine is active. The graphics engine is
#active if a graphics/compute context is bound and the graphics pipe or
#compute pipe is busy.
DCGM_FI_PROF_SM_ACTIVE = 1002 #The ratio of cycles an SM has at least 1 warp assigned
#(computed from the number of cycles and elapsed cycles)
DCGM_FI_PROF_SM_OCCUPANCY = 1003 #The ratio of number of warps resident on an SM.
#(number of resident as a ratio of the theoretical
#maximum number of warps per elapsed cycle)
DCGM_FI_PROF_PIPE_TENSOR_ACTIVE = 1004 #The ratio of cycles the any tensor pipe is active
#(off the peak sustained elapsed cycles)
DCGM_FI_PROF_DRAM_ACTIVE = 1005 #The ratio of cycles the device memory interface is active sending or receiving data.
DCGM_FI_PROF_PIPE_FP64_ACTIVE = 1006 #Ratio of cycles the fp64 pipe is active.
DCGM_FI_PROF_PIPE_FP32_ACTIVE = 1007 #Ratio of cycles the fp32 pipe is active.
DCGM_FI_PROF_PIPE_FP16_ACTIVE = 1008 #Ratio of cycles the fp16 pipe is active. This does not include HMMA.
DCGM_FI_PROF_PCIE_TX_BYTES = 1009 #The number of bytes of active PCIe tx (transmit) data including both header and payload.
DCGM_FI_PROF_PCIE_RX_BYTES = 1010 #The number of bytes of active PCIe rx (read) data including both header and payload.
DCGM_FI_PROF_NVLINK_TX_BYTES = 1011 #The number of bytes of active NvLink tx (transmit) data including both header and payload.
DCGM_FI_PROF_NVLINK_RX_BYTES = 1012 #The number of bytes of active NvLink rx (receive) data including both header and payload.
DCGM_FI_PROF_PIPE_TENSOR_IMMA_ACTIVE = 1013 #The ratio of cycles the IMMA tensor pipe is active (off the peak sustained elapsed cycles)
DCGM_FI_PROF_PIPE_TENSOR_HMMA_ACTIVE = 1014 #The ratio of cycles the HMMA tensor pipe is active (off the peak sustained elapsed cycles)
DCGM_FI_PROF_PIPE_TENSOR_DFMA_ACTIVE = 1015 #The ratio of cycles the tensor (DFMA) pipe is active (off the peak sustained elapsed cycles)
DCGM_FI_PROF_PIPE_INT_ACTIVE = 1016 #Ratio of cycles the integer pipe is active.
#Ratio of cycles each of the NVDEC engines are active.
DCGM_FI_PROF_NVDEC0_ACTIVE = 1017
DCGM_FI_PROF_NVDEC1_ACTIVE = 1018
DCGM_FI_PROF_NVDEC2_ACTIVE = 1019
DCGM_FI_PROF_NVDEC3_ACTIVE = 1020
DCGM_FI_PROF_NVDEC4_ACTIVE = 1021
DCGM_FI_PROF_NVDEC5_ACTIVE = 1022
DCGM_FI_PROF_NVDEC6_ACTIVE = 1023
DCGM_FI_PROF_NVDEC7_ACTIVE = 1024
#Ratio of cycles each of the NVJPG engines are active.
DCGM_FI_PROF_NVJPG0_ACTIVE = 1025
DCGM_FI_PROF_NVJPG1_ACTIVE = 1026
DCGM_FI_PROF_NVJPG2_ACTIVE = 1027
DCGM_FI_PROF_NVJPG3_ACTIVE = 1028
DCGM_FI_PROF_NVJPG4_ACTIVE = 1029
DCGM_FI_PROF_NVJPG5_ACTIVE = 1030
DCGM_FI_PROF_NVJPG6_ACTIVE = 1031
DCGM_FI_PROF_NVJPG7_ACTIVE = 1032
#Ratio of cycles each of the NVOFA engines are active.
DCGM_FI_PROF_NVOFA0_ACTIVE = 1033
'''
The per-link number of bytes of active NvLink TX (transmit) or RX (transmit) data including both header and payload.
For example: DCGM_FI_PROF_NVLINK_L0_TX_BYTES -> L0 TX
To get the bandwidth for a link, add the RX and TX value together like
total = DCGM_FI_PROF_NVLINK_L0_TX_BYTES + DCGM_FI_PROF_NVLINK_L0_RX_BYTES
'''
DCGM_FI_PROF_NVLINK_L0_TX_BYTES = 1040
DCGM_FI_PROF_NVLINK_L0_RX_BYTES = 1041
DCGM_FI_PROF_NVLINK_L1_TX_BYTES = 1042
DCGM_FI_PROF_NVLINK_L1_RX_BYTES = 1043
DCGM_FI_PROF_NVLINK_L2_TX_BYTES = 1044
DCGM_FI_PROF_NVLINK_L2_RX_BYTES = 1045
DCGM_FI_PROF_NVLINK_L3_TX_BYTES = 1046
DCGM_FI_PROF_NVLINK_L3_RX_BYTES = 1047
DCGM_FI_PROF_NVLINK_L4_TX_BYTES = 1048
DCGM_FI_PROF_NVLINK_L4_RX_BYTES = 1049
DCGM_FI_PROF_NVLINK_L5_TX_BYTES = 1050
DCGM_FI_PROF_NVLINK_L5_RX_BYTES = 1051
DCGM_FI_PROF_NVLINK_L6_TX_BYTES = 1052
DCGM_FI_PROF_NVLINK_L6_RX_BYTES = 1053
DCGM_FI_PROF_NVLINK_L7_TX_BYTES = 1054
DCGM_FI_PROF_NVLINK_L7_RX_BYTES = 1055
DCGM_FI_PROF_NVLINK_L8_TX_BYTES = 1056
DCGM_FI_PROF_NVLINK_L8_RX_BYTES = 1057
DCGM_FI_PROF_NVLINK_L9_TX_BYTES = 1058
DCGM_FI_PROF_NVLINK_L9_RX_BYTES = 1059
DCGM_FI_PROF_NVLINK_L10_TX_BYTES = 1060
DCGM_FI_PROF_NVLINK_L10_RX_BYTES = 1061
DCGM_FI_PROF_NVLINK_L11_TX_BYTES = 1062
DCGM_FI_PROF_NVLINK_L11_RX_BYTES = 1063
DCGM_FI_PROF_NVLINK_L12_TX_BYTES = 1064
DCGM_FI_PROF_NVLINK_L12_RX_BYTES = 1065
DCGM_FI_PROF_NVLINK_L13_TX_BYTES = 1066
DCGM_FI_PROF_NVLINK_L13_RX_BYTES = 1067
DCGM_FI_PROF_NVLINK_L14_TX_BYTES = 1068
DCGM_FI_PROF_NVLINK_L14_RX_BYTES = 1069
DCGM_FI_PROF_NVLINK_L15_TX_BYTES = 1070
DCGM_FI_PROF_NVLINK_L15_RX_BYTES = 1071
DCGM_FI_PROF_NVLINK_L16_TX_BYTES = 1072
DCGM_FI_PROF_NVLINK_L16_RX_BYTES = 1073
DCGM_FI_PROF_NVLINK_L17_TX_BYTES = 1074
DCGM_FI_PROF_NVLINK_L17_RX_BYTES = 1075
DCGM_FI_PROF_NVLINK_THROUGHPUT_FIRST = DCGM_FI_PROF_NVLINK_L0_TX_BYTES
DCGM_FI_PROF_NVLINK_THROUGHPUT_LAST = DCGM_FI_PROF_NVLINK_L17_RX_BYTES
#greater than maximum fields above. This value can increase in the future
DCGM_FI_MAX_FIELDS = 1076
class struct_c_dcgm_field_meta_t(dcgm_structs._DcgmStructure):
# struct_c_dcgm_field_meta_t structure
pass # opaque handle
dcgm_field_meta_t = POINTER(struct_c_dcgm_field_meta_t)
class _PrintableStructure(dcgm_structs._DcgmStructure):
"""
Abstract class that produces nicer __str__ output than ctypes.Structure.
e.g. instead of:
>>> print str(obj)
<class_name object at 0x7fdf82fef9e0>
this class will print
class_name(field_name: formatted_value, field_name: formatted_value)
_fmt_ dictionary of <str _field_ name> -> <str format>
e.g. class that has _field_ 'hex_value', c_uint could be formatted with
_fmt_ = {"hex_value" : "%08X"}
to produce nicer output.
Default fomratting string for all fields can be set with key "<default>" like:
_fmt_ = {"<default>" : "%d MHz"} # e.g all values are numbers in MHz.
If not set it's assumed to be just "%s"
Exact format of returned str from this class is subject to change in the future.
"""
_fmt_ = {}
def __str__(self):
result = []
for x in self._fields_:
key = x[0]
value = getattr(self, key)
fmt = "%s"
if key in self._fmt_:
fmt = self._fmt_[key]
elif "<default>" in self._fmt_:
fmt = self._fmt_["<default>"]
result.append(("%s: " + fmt) % (key, value))
return self.__class__.__name__ + "(" + ', '.join(result) + ")"
# Provides access to functions from dcgm_agent_internal
dcgmFP = dcgm_structs._dcgmGetFunctionPointer
SHORTNAME_LENGTH = 10
UNIT_LENGTH = 4
# Structure to hold formatting information for values
class c_dcgm_field_output_format_t(_PrintableStructure):
_fields_ = [
('shortName', c_char * SHORTNAME_LENGTH),
('unit' , c_char * UNIT_LENGTH),
('width' , c_short)
]
TAG_LENGTH = 48
# Structure to represent device information
class c_dcgm_field_meta_t(_PrintableStructure):
_fields_ = [
# version must always be first
('fieldId', c_short),
('fieldType', c_char),
('size', c_ubyte),
('tag', c_char * TAG_LENGTH),
('scope', c_int),
('valueFormat', c_dcgm_field_output_format_t)
]
# Class for maintaining properties for each sampling type like Power, Utilization and Clock.
class pySamplingProperties:
'''
The instance of this class is used to hold information related to each sampling event type.
'''
def __init__(self, name, sampling_type, sample_val_type, timeIntervalIdle, timeIntervalBoost, min_value, max_value):
self.name = name
self.sampling_type = sampling_type
self.timeIntervalIdle = timeIntervalIdle
self.timeIntervalBoost = timeIntervalBoost
self.min_value = min_value
self.max_value = max_value
self.sample_val_type = sample_val_type
def DcgmFieldsInit():
fn = dcgmFP("DcgmFieldsInit")
ret = fn()
assert ret == 0, "Got return %d from DcgmFieldsInit" % ret
def DcgmFieldGetById(fieldId):
'''
Get metadata for a field, given its fieldId
:param fieldId: Field ID to get metadata for
:return: c_dcgm_field_meta_t struct on success. None on error.
'''
DcgmFieldsInit()
retVal = c_dcgm_field_meta_t()
fn = dcgmFP("DcgmFieldGetById")
fn.restype = POINTER(c_dcgm_field_meta_t)
c_field_meta_ptr = fn(fieldId)
if not c_field_meta_ptr:
return None
retVal = c_dcgm_field_meta_t()
memmove(addressof(retVal), c_field_meta_ptr, sizeof(retVal))
return retVal
def DcgmFieldGetByTag(tag):
'''
Get metadata for a field, given its string tag
:param tag: Field tag to get metadata for. Example 'brand'
:return: c_dcgm_field_meta_t struct on success. None on error.
'''
DcgmFieldsInit()
retVal = c_dcgm_field_meta_t()
fn = dcgmFP("DcgmFieldGetByTag")
fn.restype = POINTER(c_dcgm_field_meta_t)
c_field_meta_ptr = fn(c_char_p(tag.encode('utf-8')))
if not c_field_meta_ptr:
return None
retVal = c_dcgm_field_meta_t()
memmove(addressof(retVal), c_field_meta_ptr, sizeof(retVal))
return retVal
def DcgmFieldGetTagById(fieldId):
field = DcgmFieldGetById(fieldId)
if field:
return field.tag
else:
return None
| DCGM-master | testing/python3/dcgm_fields.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import dcgm_structs_internal
import dcgm_agent_internal
import dcgm_field_injection_helpers
import dcgm_fields
import dcgm_structs
import pydcgm
import argparse
import sys
import time
##############################################################################
# NOTE: Although DCGM supports injecting floating point field values, the argument parser currently only accepts
# integer values for the fieldValue argument
parser = argparse.ArgumentParser()
parser.add_argument('-f', '--fieldId', dest='fieldId', type=int, required=True)
parser.add_argument('-i', dest='gpuId', type=int, default=0)
parser.add_argument('-o', '--offset', dest='offset', type=float, default=0)
parser.add_argument('-v', '--value', dest='fieldValue', type=int, required=True)
parser.add_argument('-l', '--loop', dest='loop', action='store_true')
parser.add_argument('--interval', dest='interval', type=float)
parser.add_argument('--iterations', dest='iterations', type=int,
help='Set to 0 to insert the given value until stopped via SIGINT')
args = parser.parse_args()
if args.loop and (args.interval is None or args.iterations is None):
print("Must specify interval and iterations when looping")
sys.exit(-1)
if args.iterations is not None and args.iterations < 0:
print("iterations must be >= 0")
sys.exit(-1)
handle = pydcgm.DcgmHandle(None, 'localhost', dcgm_structs.DCGM_OPERATION_MODE_AUTO)
if not args.loop:
dcgm_field_injection_helpers.inject_value(handle.handle, args.gpuId, args.fieldId, args.fieldValue, args.offset)
sys.exit(0)
# loop
try:
i = 0
while args.iterations == 0 or i < args.iterations:
dcgm_field_injection_helpers.inject_value(handle.handle, args.gpuId, args.fieldId, args.fieldValue, args.offset)
time.sleep(args.interval)
i += 1
except KeyboardInterrupt:
print("Exiting")
sys.exit(0)
| DCGM-master | testing/python3/inject_field_value.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from DcgmReader import DcgmReader
import dcgm_fields
import argparse
import sys
import time
##############################################################################
# Parse arguments
parser = argparse.ArgumentParser(
description="""
Verifies that DCGM reports the expected value for the specified field id and GPU.
Waits a maximum of maxWait seconds to see the expectedValue for the given field.
The expectedValue must be reported by DCGM at least numMatches times for the check to be considered successful.
Returns 0 on success and prints "Passed" to stdout.
Returns 20 on failure and prints "Failed" to stdout.
"""
)
parser.add_argument('-f', '--fieldId', type=int, required=True)
parser.add_argument('-v', '--expectedValue', type=int, required=True, help='The expected value for the field')
parser.add_argument('-i', '--gpuId', type=int, required=True)
parser.add_argument('-w', '--maxWait', type=float, required=True,
help='The maximum number of seconds the script should wait for the expected value before failing')
parser.add_argument('--checkInterval', type=float, required=False, default=0.5,
help='How often the field value should be updated in seconds')
parser.add_argument('-n', '--numMatches', type=int, required=False, default=3,
help='The number of occurences of expected value to look for before treating it as a success')
args = parser.parse_args()
##############################################################################
# Constants
RET_VALUE_PASS = 0
RET_VALUE_FAILED = 20
# Global vars
MOST_RECENT_TS = 0
NUM_MATCHES = 0
PASSED = False
class FieldReader(DcgmReader):
def CustomFieldHandler(self, gpuId, fieldId, fieldTag, val):
'''
This method is called once for each field for each GPU each
time that its Process() method is invoked, and it will be skipped
for blank values and fields in the ignore list.
fieldTag is the field name, and val is a dcgm_field_helpers.DcgmFieldValue instance.
'''
global MOST_RECENT_TS
global NUM_MATCHES
global PASSED
if val.ts > MOST_RECENT_TS:
MOST_RECENT_TS = val.ts
else:
return
if val.value == args.expectedValue:
NUM_MATCHES += 1
if NUM_MATCHES == args.numMatches:
PASSED = True
return
def main():
interval_in_usec = int(args.checkInterval * 1000000)
fr = FieldReader(fieldIds=[args.fieldId], updateFrequency=interval_in_usec, gpuIds=[args.gpuId])
start = time.time()
while True:
fr.Process()
if PASSED:
print("Passed")
return RET_VALUE_PASS
if (time.time() - start > args.maxWait):
print("Failed")
return RET_VALUE_FAILED
time.sleep(args.checkInterval)
if __name__ == "__main__":
sys.exit(main())
| DCGM-master | testing/python3/verify_field_value.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pydcgm
import dcgm_structs
import dcgm_structs_internal
import dcgm_agent_internal
import dcgm_fields
import argparse
import sys
def create_fake_gpus(handle, gpuCount):
cfe = dcgm_structs_internal.c_dcgmCreateFakeEntities_v2()
cfe.numToCreate = 0
fakeGpuList = []
for i in range(0, gpuCount):
cfe.entityList[cfe.numToCreate].entity.entityGroupId = dcgm_fields.DCGM_FE_GPU
cfe.numToCreate += 1
updated = dcgm_agent_internal.dcgmCreateFakeEntities(handle, cfe)
for i in range(0, updated.numToCreate):
if updated.entityList[i].entity.entityGroupId == dcgm_fields.DCGM_FE_GPU:
fakeGpuList.append(updated.entityList[i].entity.entityId)
return fakeGpuList
def create_fake_gpu_instances(handle, gpuIds, instanceCount):
cfe = dcgm_structs_internal.c_dcgmCreateFakeEntities_v2()
cfe.numToCreate = 0
fakeInstanceMap = {}
if instanceCount > 0:
for i in range(0, instanceCount):
cfe.entityList[cfe.numToCreate].parent.entityGroupId = dcgm_fields.DCGM_FE_GPU
gpuListIndex = cfe.numToCreate % len(gpuIds)
cfe.entityList[cfe.numToCreate].parent.entityId = gpuIds[gpuListIndex]
cfe.entityList[cfe.numToCreate].entity.entityGroupId = dcgm_fields.DCGM_FE_GPU_I
cfe.numToCreate += 1
# Create the instances first so we can control which GPU the compute instances are placed on
updated = dcgm_agent_internal.dcgmCreateFakeEntities(handle, cfe)
for i in range(0, updated.numToCreate):
if updated.entityList[i].entity.entityGroupId == dcgm_fields.DCGM_FE_GPU_I:
fakeInstanceMap[updated.entityList[i].entity.entityId] = updated.entityList[i].parent.entityId
return fakeInstanceMap
def create_fake_compute_instances(handle, parentIds, ciCount):
fakeCIMap = {}
if ciCount > 0:
cfe = dcgm_structs_internal.c_dcgmCreateFakeEntities_v2()
instanceIndex = 0
for i in range(0, ciCount):
cfe.entityList[cfe.numToCreate].parent.entityGroupId = dcgm_fields.DCGM_FE_GPU_I
if instanceIndex > len(parentIds):
instanceIndex = 0
cfe.entityList[cfe.numToCreate].parent.entityId = parentIds[instanceIndex]
instanceIndex = instanceIndex + 1
cfe.entityList[cfe.numToCreate].entity.entityGroupId = dcgm_fields.DCGM_FE_GPU_CI
cfe.numToCreate += 1
updated = dcgm_agent_internal.dcgmCreateFakeEntities(handle, cfe)
for i in range(0, updated.numToCreate):
if updated.entityList[i].entity.entityGroupId == dcgm_fields.DCGM_FE_GPU_CI:
fakeCIMap[updated.entityList[i].entity.entityId] = updated.entityList[i].parent.entityId
return fakeCIMap
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-g', '--gpu-count', type=int, default=1, dest='gpuCount',
help='Specify the number of fake GPUs to create')
parser.add_argument('-i', '--gpu-instance-count', type=int, default=2, dest='gpuInstanceCount',
help='Specify the number of fake GPU instances to create')
parser.add_argument('-c', '--compute-instance-count', type=int, default=2, dest='ciCount',
help='Specify the number of fake compute instances to create')
args = parser.parse_args()
if args.gpuCount < 1:
print("GPU count must be 1 or larger.")
sys.exit(1)
if args.ciCount > 0 and args.gpuInstanceCount < 1:
print("GPU instance count must be greater than 1 if compute instance count is greater than 1")
sys.exit(1)
handle = pydcgm.DcgmHandle(None, "localhost", dcgm_structs.DCGM_OPERATION_MODE_AUTO)
gpuIds = create_fake_gpus(handle.handle, args.gpuCount)
if args.gpuInstanceCount > 0:
instanceMap = create_fake_gpu_instances(handle.handle, gpuIds, args.gpuInstanceCount)
if args.ciCount > 0:
create_fake_compute_instances(handle.handle, list(instanceMap.keys()), args.ciCount)
print("Created {} fake GPUs, {} fake GPU instances, and {} fake compute instances".format(args.gpuCount, args.gpuInstanceCount, args.ciCount))
if __name__ == "__main__":
main()
| DCGM-master | testing/python3/make_fake_instances.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import dcgm_fields
import time
import logging
import os
import argparse
import sys
import signal
dir_path = os.path.dirname(os.path.realpath(__file__))
parent_dir_path = os.path.abspath(os.path.join(dir_path, os.pardir))
sys.path.insert(0, parent_dir_path)
from DcgmReader import DcgmReader
from common import dcgm_client_cli_parser as cli
if 'DCGM_TESTING_FRAMEWORK' in os.environ:
try:
from prometheus_tester_api import start_http_server, Gauge
except:
logging.critical("prometheus_tester_api missing, reinstall test framework.")
sys.exit(3)
else:
try:
from prometheus_client import start_http_server, Gauge
except ImportError:
pass
logging.critical("prometheus_client not installed, please run: \"pip install prometheus_client\"")
sys.exit(3)
DEFAULT_FIELDS = [
dcgm_fields.DCGM_FI_DEV_PCI_BUSID, #Needed for plugin_instance
dcgm_fields.DCGM_FI_DEV_POWER_USAGE,
dcgm_fields.DCGM_FI_DEV_GPU_TEMP,
dcgm_fields.DCGM_FI_DEV_SM_CLOCK,
dcgm_fields.DCGM_FI_DEV_GPU_UTIL,
dcgm_fields.DCGM_FI_DEV_RETIRED_PENDING,
dcgm_fields.DCGM_FI_DEV_RETIRED_SBE,
dcgm_fields.DCGM_FI_DEV_RETIRED_DBE,
dcgm_fields.DCGM_FI_DEV_ECC_SBE_AGG_TOTAL,
dcgm_fields.DCGM_FI_DEV_ECC_DBE_AGG_TOTAL,
dcgm_fields.DCGM_FI_DEV_FB_TOTAL,
dcgm_fields.DCGM_FI_DEV_FB_FREE,
dcgm_fields.DCGM_FI_DEV_FB_USED,
dcgm_fields.DCGM_FI_DEV_PCIE_REPLAY_COUNTER,
dcgm_fields.DCGM_FI_DEV_ECC_SBE_VOL_TOTAL,
dcgm_fields.DCGM_FI_DEV_ECC_DBE_VOL_TOTAL,
dcgm_fields.DCGM_FI_DEV_POWER_VIOLATION,
dcgm_fields.DCGM_FI_DEV_THERMAL_VIOLATION,
dcgm_fields.DCGM_FI_DEV_XID_ERRORS,
dcgm_fields.DCGM_FI_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_TOTAL,
dcgm_fields.DCGM_FI_DEV_NVLINK_CRC_DATA_ERROR_COUNT_TOTAL,
dcgm_fields.DCGM_FI_DEV_NVLINK_REPLAY_ERROR_COUNT_TOTAL,
dcgm_fields.DCGM_FI_DEV_NVLINK_RECOVERY_ERROR_COUNT_TOTAL,
]
class DcgmPrometheus(DcgmReader):
###########################################################################
def __init__(self):
#Have DCGM update its watches twice as fast as our update interval so we don't get out of phase by our update interval
updateIntervalUsec = int((1000000 * g_settings['prometheusPublishInterval']) / 2)
#Add our PID to our field group name so we can have multiple instances running
fieldGroupName = 'dcgm_prometheus_' + str(os.getpid())
DcgmReader.__init__(self, ignoreList=g_settings['ignoreList'], fieldIds=g_settings['publishFieldIds'],
updateFrequency=updateIntervalUsec,
fieldGroupName=fieldGroupName, hostname=g_settings['dcgmHostName'])
self.m_existingGauge = {}
###########################################################################
'''
This function is implemented from the base class : DcgmReader. It converts each
field / value from the fvs dictionary to a gauge and publishes the gauge to the
prometheus client server.
@params:
fvs : The fieldvalue dictionary that contains info about the values of field Ids for each gpuId.
'''
def CustomDataHandler(self,fvs):
if not self.m_existingGauge:
self.SetupGauges()
for _, fieldIds in self.m_publishFields.items():
if fieldIds is None:
continue;
for fieldId in fieldIds:
if fieldId in self.m_dcgmIgnoreFields:
continue
g = self.m_existingGauge[fieldId]
for gpuId in list(fvs.keys()):
gpuFv = fvs[gpuId]
val = gpuFv[fieldId][-1]
#Skip blank values. Otherwise, we'd have to insert a placeholder blank value based on the fieldId
if val.isBlank:
continue
gpuUuid = self.m_gpuIdToUUId[gpuId]
gpuBusId = self.m_gpuIdToBusId[gpuId]
gpuUniqueId = gpuUuid if g_settings['sendUuid'] else gpuBusId
# pylint doesn't find the labels member for Gauge, but it exists. Ignore the warning
g.labels(gpuId, gpuUniqueId).set(val.value) # pylint: disable=no-member
logging.debug('Sent GPU %d %s %s = %s' % (gpuId, gpuUniqueId, self.m_fieldIdToInfo[fieldId].tag, str(val.value)))
###############################################################################
'''
NOTE: even though some fields are monotonically increasing and therefore fit the mold to be
counters, all are published as gauges so that DCGM is the sole authority on the state of the
system, preventing problems around down times, driver reboots, and the unlikely event of
flashing the inforom.
For specific information about which fields monotonically increase, see the API guide or
dcgm_fields.h
'''
def SetupGauges(self):
for _, fieldIds in self.m_publishFields.items():
if fieldIds is None:
continue;
for fieldId in fieldIds:
if fieldId in self.m_dcgmIgnoreFields:
continue
uniqueIdName = 'GpuUuid' if g_settings['sendUuid'] else 'GpuBusID'
fieldTag = self.m_fieldIdToInfo[fieldId].tag
self.m_existingGauge[fieldId] = Gauge("dcgm_"+fieldTag,'DCGM_PROMETHEUS',['GpuID', uniqueIdName])
###############################################################################
'''
Scrape the fieldvalue data and publish. This function calls the process function of
the base class DcgmReader.
'''
def Scrape(self, data=None):
return self.Process()
###############################################################################
def LogBasicInformation(self):
# Reconnect causes everything to get initialized
self.Reconnect()
logging.info('Started prometheus client')
fieldTagList = ''
for _, fieldIds in self.m_publishFields.items():
if fieldIds is None:
continue
for fieldId in fieldIds:
if fieldId in self.m_dcgmIgnoreFields:
continue
if fieldTagList == '':
fieldTagList = self.m_fieldIdToInfo[fieldId].tag
else:
fieldTagList = fieldTagList + ", %s" % (self.m_fieldIdToInfo[fieldId].tag)
logging.info("Publishing fields: '%s'" % (fieldTagList))
###############################################################################
def LogError(self, msg):
logging.error(msg)
###############################################################################
def LogInfo(self, msg):
logging.info(msg)
###############################################################################
def exit_handler(signum, frame):
g_settings['shouldExit'] = True
###############################################################################
def main_loop(prometheus_obj, publish_interval):
try:
while True:
prometheus_obj.Scrape(prometheus_obj)
time.sleep(publish_interval)
if g_settings['shouldExit'] == True:
prometheus_obj.LogInfo('Received a signal...shutting down')
break
except KeyboardInterrupt:
print("Caught CTRL-C. Exiting")
###############################################################################
def initialize_globals():
'''
Name of the host.
'''
global g_settings
g_settings = {}
g_settings['shouldExit'] = False
'''
List of the ids that are present in g_settings['publishFieldIds'] but ignored for watch.
'''
g_settings['ignoreList'] = [dcgm_fields.DCGM_FI_DEV_PCI_BUSID, ]
'''
Those are initialized by the CLI parser. We only list them here for clarity.
'''
for key in [
'dcgmHostName',
'prometheusPort',
'prometheusPublishInterval',
'publishFieldIds',
]:
g_settings[key] = None
###############################################################################
def parse_command_line():
parser = cli.create_parser(
name='Prometheus',
field_ids=DEFAULT_FIELDS,
)
cli.add_custom_argument(parser, '--send-uuid', dest='send_uuid', default=False,
action='store_true', help='Send GPU UUID instead of bus id')
args = cli.run_parser(parser)
field_ids = cli.get_field_ids(args)
numeric_log_level = cli.get_log_level(args)
# Defaults to localhost, so we need to set it to None
if args.embedded:
g_settings['dcgmHostName'] = None
else:
g_settings['dcgmHostName'] = args.hostname
g_settings['prometheusPort'] = args.publish_port
g_settings['prometheusPublishInterval'] = args.interval
logfile = args.logfile
g_settings['publishFieldIds'] = field_ids
g_settings['sendUuid'] = args.send_uuid
if logfile != None:
logging.basicConfig(level=numeric_log_level, filename=logfile, filemode='w+', format='%(asctime)s %(levelname)s: %(message)s')
else:
logging.basicConfig(level=numeric_log_level, stream=sys.stdout, filemode='w+', format='%(asctime)s %(levelname)s: %(message)s')
###############################################################################
def initialize_signal_handlers():
signal.signal(signal.SIGINT, exit_handler)
signal.signal(signal.SIGTERM, exit_handler)
###############################################################################
def main():
initialize_globals()
initialize_signal_handlers()
parse_command_line()
prometheus_obj = DcgmPrometheus()
logging.critical("dcgm_prometheus has been deprecated and will be removed in a future release. Please use DCGM exporter for Prometheus integration")
logging.info("Starting Prometheus server on port " + str(g_settings['prometheusPort']))
#start prometheus client server.
start_http_server(g_settings['prometheusPort'])
prometheus_obj.LogBasicInformation()
main_loop(prometheus_obj, g_settings['prometheusPublishInterval'])
prometheus_obj.Shutdown()
if __name__ == '__main__':
main()
| DCGM-master | testing/python3/dcgm_prometheus.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from DcgmReader import DcgmReader
from json import dumps as toJson
from os import environ
from socket import socket, AF_INET, SOCK_DGRAM
from time import sleep
import dcgm_fields
import logging
class DcgmJsonReader(DcgmReader):
###########################################################################
def ConvertFieldIdToTag(self, fieldId):
return self.m_fieldIdToInfo[fieldId].tag
###########################################################################
def PrepareJson(self, gpuId, obj):
'''
Receive an object with measurements turn it into an equivalent JSON. We
add the GPU UUID first.
'''
uuid = self.m_gpuIdToUUId[gpuId]
# This mutates the original object, but it shouldn't be a problem here
obj['gpu_uuid'] = uuid
return toJson(obj)
###########################################################################
def CustomDataHandler(self, fvs):
for gpuId in list(fvs.keys()):
# We don't need the keys because each value has a `fieldId`
# So just get the values
gpuData = list(fvs[gpuId].values())
# Get the values from FV (which is a list of values)
valuesListOfLists = [datum.values for datum in gpuData]
# We only want the last measurement
lastValueList = [l[-1] for l in valuesListOfLists]
# Turn FV into a conventional Python Object which can be converted to JSON
outObject = {self.ConvertFieldIdToTag(i.fieldId): i.value for i in lastValueList}
outJson = self.PrepareJson(gpuId, outObject)
self.CustomJsonHandler(outJson)
###########################################################################
def CustomJsonHandler(self, outJson):
'''
This method should be overriden by subclasses to handle the JSON objects
received.
'''
logging.warning('CustomJsonHandler has not been overriden')
logging.info(outJson)
| DCGM-master | testing/python3/DcgmJsonReader.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pydcgm
import dcgm_agent
import dcgm_structs
import dcgm_fields
import ctypes
class DcgmSystemDiscovery:
'''
Constructor
'''
def __init__(self, dcgmHandle):
self._dcgmHandle = dcgmHandle
'''
Get all IDs of the GPUs that DCGM knows about. To get only GPUs that DCGM support,
use GetAllSupportedGpuIds().
Returns an array of GPU IDs. Each of these can be passed to DcgmGroup::AddGpu()
'''
def GetAllGpuIds(self):
gpuIds = dcgm_agent.dcgmGetAllDevices(self._dcgmHandle.handle)
return gpuIds
'''
Get all of IDs of the GPUs that DCGM supports. This will exclude unsupported
GPUs
Returns an array of GPU IDs. Each of these can be passed to DcgmGroup::AddGpu()
'''
def GetAllSupportedGpuIds(self):
gpuIds = dcgm_agent.dcgmGetAllSupportedDevices(self._dcgmHandle.handle)
return gpuIds
'''
Get some basic GPU attributes for a given GPU ID.
Returns a dcgm_structs.c_dcgmDeviceAttributes_v3() object for the given GPU
'''
def GetGpuAttributes(self, gpuId):
return dcgm_agent.dcgmGetDeviceAttributes(self._dcgmHandle.handle, gpuId)
'''
Get topology information for a given GPU ID
Returns a dcgm_structs.c_dcgmDeviceTopology_v1 structure representing the topology for the given GPU
'''
def GetGpuTopology(self, gpuId):
return dcgm_agent.dcgmGetDeviceTopology(self._dcgmHandle.handle, gpuId)
'''
Get all entityIds of the entities that DCGM knows about.
entityGroupId IN: DCGM_FE_? constant of the entity group to fetch the entities of
onlyActive IN: Boolean as to whether to fetch entities that are supported by DCGM (True)
or all entity IDs (False)
Returns an array of entity IDs. Each of these can be passed to DcgmGroup::AddEntity()
'''
def GetEntityGroupEntities(self, entityGroupId, onlySupported):
flags = 0
if onlySupported:
flags |= dcgm_structs.DCGM_GEGE_FLAG_ONLY_SUPPORTED
entityIds = dcgm_agent.dcgmGetEntityGroupEntities(self._dcgmHandle.handle, entityGroupId, flags)
return entityIds
'''
Get the status of all of the NvLink links in the system.
Returns a dcgm_structs.c_dcgmNvLinkStatus_v3 object.
'''
def GetNvLinkLinkStatus(self):
return dcgm_agent.dcgmGetNvLinkLinkStatus(self._dcgmHandle.handle)
'''
From a bitmask of input gpu ids, return a bitmask of numGpus GPUs which identifies the topologically
closest GPUs to use for a single job. DCGM will consider CPU affinities and NVLink connection speeds
to determine the closest.
hintFlags can instruct DCGM to consider GPU health or not. By default, unhealthy GPUs are excluded from
consideration.
'''
def SelectGpusByTopology(self, inputGpuIds, numGpus, hintFlags):
return dcgm_agent.dcgmSelectGpusByTopology(self._dcgmHandle.handle, inputGpuIds, numGpus, hintFlags)
class DcgmSystemIntrospect:
'''
Class to access the system-wide introspection modules of DCGM
'''
def __init__(self, dcgmHandle):
self._handle = dcgmHandle
self.memory = DcgmSystemIntrospectMemory(dcgmHandle)
self.cpuUtil = DcgmSystemIntrospectCpuUtil(dcgmHandle)
def UpdateAll(self, waitForUpdate=True):
dcgm_agent.dcgmIntrospectUpdateAll(self._handle.handle, waitForUpdate)
class DcgmSystemIntrospectMemory:
'''
Class to access information about the memory usage of DCGM itself
'''
def __init__(self, dcgmHandle):
self._dcgmHandle = dcgmHandle
def GetForHostengine(self, waitIfNoData=True):
'''
Retrieve the total amount of virtual memory that the hostengine process is currently using.
This measurement represents both the resident set size (what is currently in RAM) and
the swapped memory that belongs to the process.
waitIfNoData: wait for metadata to be updated if it's not available
Returns a dcgm_structs.c_dcgmIntrospectMemory_v1 object
Raises an exception for DCGM_ST_NO_DATA if no data is available yet and \ref waitIfNoData is False
'''
return dcgm_agent.dcgmIntrospectGetHostengineMemoryUsage(self._dcgmHandle.handle, waitIfNoData)
class DcgmSystemIntrospectCpuUtil:
'''
Class to access information about the CPU Utilization of DCGM
'''
def __init__(self, dcgmHandle):
self._dcgmHandle = dcgmHandle
def GetForHostengine(self, waitIfNoData=True):
'''
Get the current CPU Utilization of the hostengine process.
waitIfNoData: wait for metadata to be updated if it's not available
Returns a dcgm_structs.c_dcgmIntrospectCpuUtil_v1 object
Raises an exception for DCGM_ST_NO_DATA if no data is available yet and \ref waitIfNoData is False
'''
return dcgm_agent.dcgmIntrospectGetHostengineCpuUtilization(self._dcgmHandle.handle, waitIfNoData)
'''
Class to encapsulate DCGM field-metadata requests
'''
class DcgmSystemFields:
def GetFieldById(self, fieldId):
'''
Get a field's metadata by its dcgm_fields.DCGM_FI_* field ID
fieldId: dcgm_fields.DCGM_FI_* field ID of the field
Returns a dcgm_fields.c_dcgm_field_meta_t struct on success or None on error.
'''
return dcgm_fields.DcgmFieldGetById(fieldId)
def GetFieldByTag(self, tag):
'''
Get a field's metadata by its tag name. Ex: 'brand'
tag: Tag name of the field
Returns a dcgm_fields.c_dcgm_field_meta_t struct on success or None on error.
'''
return dcgm_fields.DcgmFieldGetByTag(tag)
'''
Class to encapsulate DCGM module management and introspection
'''
class DcgmSystemModules:
'''
Constructor
'''
def __init__(self, dcgmHandle):
self._dcgmHandle = dcgmHandle
'''
Denylist a module from being loaded by DCGM.
moduleId a dcgm_structs.dcgmModuleId* ID of the module to denylist
Returns: Nothing.
Raises a DCGM_ST_IN_USE exception if the module was already loaded
'''
def Denylist(self, moduleId):
dcgm_agent.dcgmModuleDenylist(self._dcgmHandle.handle, moduleId)
'''
Get the statuses of all of the modules in DCGM
Returns: a dcgm_structs.c_dcgmModuleGetStatuses_v1 structure.
'''
def GetStatuses(self):
return dcgm_agent.dcgmModuleGetStatuses(self._dcgmHandle.handle)
'''
Class to encapsulate DCGM profiling
'''
class DcgmSystemProfiling:
'''
Constructor
'''
def __init__(self, dcgmHandle):
self._dcgmHandle = dcgmHandle
'''
Pause profiling activities in DCGM. This should be used when you are monitoring profiling fields
from DCGM but want to be able to still run developer tools like nvprof, nsight systems, and nsight compute.
Profiling fields start with DCGM_PROF_ and are in the field ID range 1001-1012.
Call this API before you launch one of those tools and Resume() after the tool has completed.
DCGM will save BLANK values while profiling is paused.
Calling this while profiling activities are already paused is fine and will be treated as a no-op.
'''
def Pause(self):
return dcgm_agent.dcgmProfPause(self._dcgmHandle.handle)
'''
Resume profiling activities in DCGM that were previously paused with Pause().
Call this API after you have completed running other NVIDIA developer tools to reenable DCGM
profiling metrics.
DCGM will save BLANK values while profiling is paused.
Calling this while profiling activities have already been resumed is fine and will be treated as a no-op.
'''
def Resume(self):
return dcgm_agent.dcgmProfResume(self._dcgmHandle.handle)
'''
Class to encapsulate global DCGM methods. These apply to a single DcgmHandle, provided to the constructor
'''
class DcgmSystem:
'''
Constructor
dcgmHandle is a pydcgm.DcgmHandle instance of the connection that will be used by all methods of this class
'''
def __init__(self, dcgmHandle):
self._dcgmHandle = dcgmHandle
#Child classes
self.discovery = DcgmSystemDiscovery(self._dcgmHandle)
self.introspect = DcgmSystemIntrospect(self._dcgmHandle)
self.fields = DcgmSystemFields()
self.modules = DcgmSystemModules(self._dcgmHandle)
self.profiling = DcgmSystemProfiling(self._dcgmHandle)
'''
Request that the host engine perform a field value update cycle. If the host
engine was starting in DCGM_OPERATION_MODE_MANUAL, calling this method is
the only way that field values will be updated.
Note that performing a field value update cycle does not update every field.
It only update fields that are newly watched or fields that haven't updated
in enough time to warrant updating again, based on their update frequency.
waitForUpdate specifies whether this function call should block until the
field value update loop is complete or not. Use True if you intend to query
values immediately after calling this.
'''
def UpdateAllFields(self, waitForUpdate):
ret = dcgm_agent.dcgmUpdateAllFields(self._dcgmHandle.handle, waitForUpdate)
#Throw an exception on error
dcgm_structs._dcgmCheckReturn(ret)
'''
Get a DcgmGroup instance for the default all-GPUs group. This object is used to
perform operations on a group of GPUs. See DcgmGroup.py for details.
AddGpu() and RemoveGpu() operations are not allowed on the default group
'''
def GetDefaultGroup(self):
return pydcgm.DcgmGroup(self._dcgmHandle, groupId=dcgm_structs.DCGM_GROUP_ALL_GPUS)
'''
Get an instance of DcgmGroup with no GPUs. Call AddGpu() on the returned
object with GPU IDs from GetAllGpuIds() before performing actions on
the returned DcgmGroup instance.
groupName is the name of the group to create in the host engine. This name must be
unique.
Note: The group will be deleted from the host engine when the returned object goes out of scope
'''
def GetEmptyGroup(self, groupName):
return pydcgm.DcgmGroup(self._dcgmHandle, groupName=groupName)
'''
Get an instance of DcgmGroup populated with the gpuIds provided
groupName is the name of the group to create in the host engine. This name must be
unique.
gpuIds is the list of GPU IDs to add to the group
Note: The group will be deleted from the host engine when the returned object goes out of scope
'''
def GetGroupWithGpuIds(self, groupName, gpuIds):
newGroup = pydcgm.DcgmGroup(self._dcgmHandle, groupName=groupName)
for gpuId in gpuIds:
newGroup.AddGpu(gpuId)
return newGroup
'''
Get an instance of DcgmGroup populated with the provided entities
groupName is the name of the group to create in the host engine. This name must be
unique.
entities is the list of entity pairs (type and id) to add to the group
Note: The group will be deleted from the host engine when the returned object goes out of scope
'''
def GetGroupWithEntities(self, groupName, entities):
group = pydcgm.DcgmGroup(self._dcgmHandle, groupName=groupName)
for entity in entities:
group.AddEntity(entity.entityGroupId, entity.entityId)
return group
'''
Get ids of all DcgmGroups of GPUs. This returns a list containing the ids of the DcgmGroups.
'''
def GetAllGroupIds(self):
return dcgm_agent.dcgmGroupGetAllIds(self._dcgmHandle.handle)
'''
Get all all of the field groups in the system
'''
def GetAllFieldGroups(self):
return dcgm_agent.dcgmFieldGroupGetAll(self._dcgmHandle.handle)
'''
Get a field group's id by its name.
Returns: Field group ID if found
None if not found
'''
def GetFieldGroupIdByName(self, name):
allGroups = self.GetAllFieldGroups()
for i in range(0, allGroups.numFieldGroups):
if allGroups.fieldGroups[i].fieldGroupName == name:
return ctypes.c_void_p(allGroups.fieldGroups[i].fieldGroupId)
return None
def PauseTelemetryForDiag(self):
"""Pause DCGM modules from updating field values."""
import dcgm_agent_internal
dcgm_agent_internal.dcgmPauseTelemetryForDiag(self._dcgmHandle.handle)
def ResumeTelemetryForDiag(self):
"""Resume previously paused DCGM modules so that they can update field values."""
import dcgm_agent_internal
dcgm_agent_internal.dcgmResumeTelemetryForDiag(self._dcgmHandle.handle)
| DCGM-master | testing/python3/DcgmSystem.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Library for executing processes
from apps.app_runner import *
# Libraries that wrap common command line applications
# and provide easier to use python interface
from apps.dcgm_stub_runner_app import *
from apps.xid_app import *
from apps.cuda_ctx_create_app import *
from apps.nvidia_smi_app import *
from apps.lsof_app import *
from apps.lspci_app import *
from cuda.cuda_utils import *
# once you "import apps" module you can refer
# to all classes by apps.ClassName e.g. apps.XidApp instead of apps.xid_app.XidApp (a bit shorter)
| DCGM-master | testing/python3/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
def _python_version_check():
import sys
python_version = sys.version.split(None, 1)[0]
if python_version < '3':
print('[ERROR] Detected Python version {}. These bindings are for Python 3.5+. Please load the Python 2 bindings found at /usr/local/dcgm/bindings'.format(python_version))
sys.exit(1)
_python_version_check()
#Bring classes into this namespace
from DcgmHandle import *
from DcgmGroup import *
from DcgmStatus import *
from DcgmSystem import *
from DcgmFieldGroup import *
import os
if '__DCGM_TESTING_FRAMEWORK_ACTIVE' in os.environ and os.environ['__DCGM_TESTING_FRAMEWORK_ACTIVE'] == '1':
import utils
import dcgm_structs
dcgm_structs._dcgmInit(utils.get_testing_framework_library_path())
'''
Define a unique exception type we will return so that callers can distinguish our exceptions from python standard ones
'''
class DcgmException(Exception):
pass
| DCGM-master | testing/python3/pydcgm.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import test_utils
import logger
import option_parser
import datetime
_erisTestNumber = 0
class ProgressPrinter(object):
def subtest_start(self, subtest):
pass
def subtest_finish(self, subtest):
pass
class DefaultProgressPrinter(ProgressPrinter):
def subtest_start(self, subtest):
global _erisTestNumber
# defer the quiet tests. If they don't fail there's no need to print their name
# but print right away all non quiet tests
if option_parser.options.eris:
if subtest.depth == 3:
self.childrenTest = ""
_erisTestNumber += 1
subtest._DefaultProgressPrinter_header_log_id = logger.info("\n&&&& RUNNING %s - %d\n" % (subtest.name, _erisTestNumber), defer=subtest.quiet)
else:
subtest._DefaultProgressPrinter_header_log_id = logger.info("", defer=subtest.quiet)
else:
subtest._DefaultProgressPrinter_header_log_id = logger.info("- Test %s" % (subtest.name), defer=subtest.quiet)
logger.indent_icrement()
if subtest.name.startswith("test_") and not subtest.name.endswith("restore state"):
logger.info("Test %s start time: %s" % (subtest.name, datetime.datetime.now()))
def subtest_finish(self, subtest):
global _erisTestNumber
if subtest.name.startswith("test_") and not subtest.name.endswith("restore state"):
logger.info("Test %s end time: %s" % (subtest.name, datetime.datetime.now()))
if subtest.result == test_utils.SubTest.FAILED and test_utils.reRunning == True:
subtest.result = test_utils.SubTest.FAILURE_LOGGED
logger.error(subtest.result_details)
if subtest.result == test_utils.SubTest.FAILED:
logger.error(subtest.result_details)
logger.indent_decrement()
logger.pop_defered(subtest._DefaultProgressPrinter_header_log_id)
if subtest.result == test_utils.SubTest.SKIPPED:
with logger.IndentBlock():
logger.info("SKIPPED: " + str(subtest.result_details_raw.exception))
elif subtest.result != test_utils.SubTest.SUCCESS:
logger.info("<< %s" % (subtest))
if option_parser.options.eris:
# Validating results of subtest with depth bigger than 3
if subtest.depth > 3 and not subtest.name.endswith("restore state"):
if subtest.result == test_utils.SubTest.FAILED:
self.childrenTest = "F"
if subtest.depth == 3 and subtest.name.startswith("test_") and not subtest.name.endswith("restore state"):
if subtest.result == test_utils.SubTest.SKIPPED:
logger.info("\n&&&& WAIVED %s - %d\n" % (subtest.name, _erisTestNumber))
elif subtest.result == test_utils.SubTest.SUCCESS and not self.childrenTest == "F":
logger.info("\n&&&& PASSED %s - %d\n" % (subtest.name, _erisTestNumber))
elif subtest.result == test_utils.SubTest.FAILURE_LOGGED:
logger.info("\n&&&& FAILURE_LOGGED %s - %d\n" % (subtest.name, _erisTestNumber))
elif subtest.result == test_utils.SubTest.FAILED or self.childrenTest == "F":
logger.info("\n&&&& FAILED %s - %d\n" % (subtest.name, _erisTestNumber))
elif subtest.result == test_utils.SubTest.NOT_CONNECTED:
logger.info("\n&&&& RETRY %s - %d\n" % (subtest.name, _erisTestNumber))
progress_printer = DefaultProgressPrinter()
| DCGM-master | testing/python3/progress_printer.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import wraps
# pylint: disable=no-name-in-module
from test_utils import skip_test
import logger
MOCK_MSG = "mock not installed. Please run \"pip install mock\""
try:
import mock
MOCK_INSTALLED = True
except ImportError:
logger.warning(MOCK_MSG)
MOCK_INSTALLED = False
def skip_test_if_no_mock():
'''
Returns a decorator for functions. The decorator skips
the test in the provided function if mock is not installed
'''
def decorator(fn):
@wraps(fn)
def wrapper(*args, **kwds):
if MOCK_INSTALLED:
fn(*args, **kwds)
else:
skip_test(MOCK_MSG)
return wrapper
return decorator
# Do not use this class directly
class _MaybeMock:
def __call__(self, *args, **kwds):
return skip_test_if_no_mock()
def __getattr__(self, attr):
if (MOCK_INSTALLED):
return getattr(mock, attr)
return self
maybemock = _MaybeMock()
| DCGM-master | testing/python3/_test_helpers.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import os
import shutil
import datetime
import threading
import trace
import zipfile
import string
import re
import libs_3rdparty.colorama as colorama
import dcgm_structs
import dcgm_agent_internal
import option_parser
import utils
import test_utils
log_dir = None
have_recreated_log_dir = False #Have we removed and recreated the log dir from previous runs?
default_log_dir = '_out_runLogs'
_log_file = None
_log_file_counter = None
summary_filename = "terminal_main.py_stdout.txt"
_summary_file = None
log_archive_filename = "all_results.zip"
dcgm_trace_log_filename = None
nvml_trace_log_filename = None
_indent_lvl = 0
_coloring_enabled = True #coloring is applied even there is no file descriptor connected to tty like device
_message_levels = (FATAL, ERROR, INFO, WARNING, DEBUG) = list(range(5))
messages_level_counts = [0] * len(_message_levels)
level_names = ("FATAL", "ERROR", "INFO", "WARNING", "DEBUG")
stdout_loglevel = WARNING
def caller_function_details(depth=1):
"""
Returns tuple with details of function up the call stack
returns (file_name, func_name, file_nb)
"""
import inspect
# Get up the stack of functions
func = inspect.currentframe().f_back
for i in range(depth):
func = func.f_back
func = func.f_code
return (os.path.relpath(func.co_filename), func.co_name, func.co_firstlineno)
def addtrace_logging(module, filter_fns=lambda name, fn: True):
'''
Find all functions in module and add logging before and after each call.
'''
from functools import wraps
import inspect
for name in dir(module):
if name.startswith("_"):
continue
fn = getattr(module, name)
if not inspect.isfunction(fn):
continue
if not filter_fns(name, fn):
continue
def genfunc(fn):
@wraps(fn)
def tmpfn(*args, **kwargs):
debug("Call %s(args: %s kwargs: %s)" % (fn.__name__, list(zip(inspect.getfullargspec(fn).args, args)), kwargs), caller_depth=1)
try:
res = fn(*args, **kwargs)
debug("Call %s returned: %s" % (fn.__name__, res), caller_depth=1)
return res
except Exception as e:
debug("Call %s raised: %s" % (fn.__name__, e), caller_depth=1)
raise
return tmpfn
setattr(module, name, genfunc(fn))
def setup_environment():
global _log_file
global _summary_file
global log_dir
global dcgm_trace_log_filename
global nvml_trace_log_filename
global have_recreated_log_dir
curr_log_dir = None
# Users can specify a non-default logging base path via the command line
if option_parser.options.log_dir:
assert os.path.exists(option_parser.options.log_dir)
curr_log_dir = os.path.normpath(os.path.join(option_parser.options.log_dir, default_log_dir))
else:
curr_log_dir = os.path.join(os.getcwd(), default_log_dir)
log_dir = os.path.realpath(curr_log_dir)
dcgm_trace_log_filename = os.path.join(log_dir, "dcgm_trace.log")
nvml_trace_log_filename = os.path.join(log_dir, "nvml_trace.log")
#We clean up the log dir from previous runs once per test run in order to prevent the constant accumulation of logs
if not have_recreated_log_dir:
have_recreated_log_dir = True #We set this boolean here so we don't incorrectly remove this directory if this function is called again
if os.path.isdir(log_dir):
shutil.rmtree(log_dir, ignore_errors=True)
try:
os.mkdir(log_dir)
except OSError:
pass
os.chmod(log_dir, 0o777) # so that non-root tests could write to this directory
if _log_file is None:
_log_file = open(os.path.join(log_dir, "log.txt"), "a")
if _summary_file is None:
_summary_file = open(os.path.join(log_dir, summary_filename), "a")
# We always want the dcgm_trace log to be set to log within our test framework folder
# so we don't fill up /var/log with errors.
os.environ['__DCGM_DBG_FILE'] = dcgm_trace_log_filename
os.environ['__DCGM_DBG_APPEND'] = '1'
# create the file upfront ant set proper chmod
# so that non-root tests could dcgmInit and write to this log
with open(dcgm_trace_log_filename, "a"):
pass
os.chmod(dcgm_trace_log_filename, 0o777)
if not test_utils.noLogging:
#Log both NVML and DCGM so we can the underlying NVML calls for the DCGM data
os.environ['__NVML_DBG_FILE'] = nvml_trace_log_filename
os.environ['__NVML_DBG_LVL'] = test_utils.loggingLevel
os.environ['__NVML_DBG_APPEND'] = '1'
os.environ['__DCGM_DBG_LVL'] = test_utils.loggingLevel
# create the file upfront ant set proper chmod
# so that non-root tests could dcgmInit and write to this log
with open(nvml_trace_log_filename, "a"):
pass
os.chmod(nvml_trace_log_filename, 0o777)
addtrace_logging(dcgm_structs,
# Don't attach trace logging to some functions
lambda name, fn: name not in ["dcgmErrorString", "dcgmStructToFriendlyObject"])
else:
#Not logging. Clear the environmental variables
envVars = ['__NVML_DBG_FILE', '__NVML_DBG_LVL', '__NVML_DBG_APPEND',
'__DCGM_DBG_LVL']
for envVar in envVars:
if envVar in os.environ:
del(os.environ[envVar])
global _coloring_enabled
if sys.stdout.isatty():
colorama.init()
_coloring_enabled = True
else:
_coloring_enabled = False
if not test_utils.SubTest.FAILED:
info("Package version information:")
with IndentBlock():
try:
version_file = open(os.path.join(utils.script_dir, "data/version.txt"), "wr+")
info("".join(version_file.readlines()))
version_file.close()
except IOError:
warning("No build version information")
if os.path.exists(log_archive_filename):
info("Removing old %s" % log_archive_filename)
try:
os.remove(log_archive_filename)
except IOError:
pass
def close():
"""
Closes all the debug file streams and archives all logs
into single zip file logger.log_archive_filename
"""
debug("Storing all logs in " + log_archive_filename)
if _coloring_enabled:
colorama.deinit()
if log_dir is not None and os.path.isdir(log_dir):
try:
zip = zipfile.ZipFile(log_archive_filename, 'w', compression=zipfile.ZIP_DEFLATED)
for root, dirs, files in os.walk(log_dir):
for file in files:
zip.write(os.path.join(root, file))
zip.close()
os.chmod(log_archive_filename, 0o777)
except Exception:
pass
def run_with_coverage(fn):
"""
Runs the function (that shouldn't take any arguments!) with coverage tool.
Stores the results in a log and returns the results.
"""
coverage_trace = trace.Trace(trace=0, ignoredirs=[sys.prefix, sys.exec_prefix])
try:
coverage_trace.runfunc(lambda ignore1, ignore2: fn(), [], {})
finally:
results = coverage_trace.results()
coverdir = os.path.join(log_dir, "pycoverage")
if not os.path.exists(coverdir):
os.mkdir(coverdir)
os.chmod(coverdir, 0o777) # so that non-root tests could write to this directory
results.write_results(show_missing=False, coverdir=coverdir)
return results
_defered_lines = []
log_lock = threading.Lock()
_log_id = 0
def log(level, msg, caller_depth=0, defer=False):
def apply_coloring(level, line):
if option_parser.options.eris:
return line
if not _coloring_enabled:
return line
if level == FATAL or level == ERROR:
return colorama.Fore.RED + line + colorama.Fore.RESET
elif level == WARNING:
return colorama.Fore.YELLOW + line + colorama.Fore.RESET
coloring = [
("SKIPPED", colorama.Fore.CYAN),
("Reason: .*", colorama.Fore.CYAN),
("FAILED", colorama.Fore.RED),
("DEBUG", colorama.Fore.MAGENTA),
("FAILURE_LOGGED", colorama.Fore.CYAN),
]
for (what, color) in coloring:
coloring_match = re.match("(.*)(%s)(.*)" % what, line)
if coloring_match:
groups = coloring_match.groups()
line = "".join([groups[0], color, groups[1], colorama.Fore.RESET, groups[2]])
return line
global _log_id
with log_lock:
_log_id += 1
messages_level_counts[level] += 1
if option_parser.options.eris:
indent = ""
else:
indent = " " * _indent_lvl
timestamp = datetime.datetime.now()
if not defer and level <= stdout_loglevel:
# will be printing this message to stdout so we need to flush all deferred lines first
for (log_id, defered_level, line) in _defered_lines:
print(apply_coloring(defered_level, line))
if _summary_file is not None:
_summary_file.write(line)
_summary_file.write("\n")
del _defered_lines[:]
for msg in msg.splitlines():
if level <= stdout_loglevel:
if level != INFO:
level_name = level_names[level] + ": "
else:
level_name = ""
line = "%s%s%s" % (indent, level_name, msg)
if defer:
_defered_lines.append((_log_id, level, line))
else:
print(apply_coloring(level, line))
if _summary_file is not None:
_summary_file.write(line)
_summary_file.write("\n")
if _log_file and (not test_utils.noLogging):
_log_file.write("%-8s: [%s] %s%s\n" % (level_names[level], timestamp, indent, msg))
try:
fndetails = caller_function_details(caller_depth + 1)
#dcgm_agent_internal.dcgmTraceLogPrintLine("<testing %s> [%s - %s:%s:%d] %s%s" %
# (level_names[level], timestamp, fndetails[0], fndetails[1], fndetails[2], indent, msg))
except dcgm_structs.DCGMError:
pass
# Every 100 messages force OS to write to log file to HD in case OS kernel panics
if _log_id % 100 == 0:
os.fsync(_log_file)
if level == FATAL:
close()
os._exit(1)
return _log_id
def pop_defered(log_id):
"""
Removes the message from deferred lines buffer and returns True. Removed log_id must be the last log_id on the list.
If the log_id is not found returns False.
Note: Messages added to defered log need to be removed in reverse order that they were added (like unrolling stack).
"""
result = False
while _defered_lines and _defered_lines[-1][0] == log_id:
_defered_lines.pop()
result = True
return result
def fatal(msg="\n", caller_depth = 0, defer=False):
"""
Calls sys.exit at the end
"""
return log(FATAL, msg, caller_depth + 1, defer)
def error(msg="\n", caller_depth = 0, defer=False):
return log(ERROR, msg, caller_depth + 1, defer)
def info(msg="\n", caller_depth = 0, defer=False):
return log(INFO, msg, caller_depth + 1, defer)
def warning(msg="\n", caller_depth = 0, defer=False):
return log(WARNING, msg, caller_depth + 1, defer)
def debug(msg="\n", caller_depth = 0, defer=False):
return log(DEBUG, msg, caller_depth + 1, defer)
def indent_icrement(val=1):
global _indent_lvl
_indent_lvl += val
def indent_decrement(val=1):
global _indent_lvl
_indent_lvl -= val
class IndentBlock(object):
def __init__(self, val=1):
self._old_indent = _indent_lvl
self._val = val
def __enter__(self):
indent_icrement(self._val)
def __exit__(self, exception_type, exception, trace):
indent_decrement(self._val)
# Sample usage
if __name__ == "__main__":
option_parser.parse_options()
setup_environment()
info("This gets printed to stdout")
debug("This by default gets printed only to debug log in %s dir" % (log_dir))
log_id1 = info("This message is deferred. It can be removed from stdout but not from debug log", defer=True)
debug("Even when one prints to debug log, deferred line can be removed")
assert pop_defered(log_id1) == True
log_id2 = info("This message is also deferred. But will get printed as soon as error message requested", defer=True)
with IndentBlock(): # indent this message
error("This causes the deferred message to be printed")
assert pop_defered(log_id2) == False
close()
| DCGM-master | testing/python3/logger.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pydcgm
import dcgm_agent
import dcgm_structs
import dcgm_fields
import dcgm_field_helpers
from DcgmHandle import DcgmHandle
class DcgmGroupConfig:
def __init__(self, dcgmHandle, groupId, dcgmGroup):
self._dcgmHandle = dcgmHandle
self._groupId = groupId
self._dcgmGroup = dcgmGroup
'''
Set configuration for this group
config should be an instance of dcgm_structs.c_dcgmDeviceConfig_v1
Will throw an exception on error
'''
def Set(self, config):
status = pydcgm.DcgmStatus()
ret = dcgm_structs.DCGM_ST_OK
try:
ret = dcgm_agent.dcgmConfigSet(self._dcgmHandle.handle, self._groupId, config, status.handle)
except dcgm_structs.DCGMError as e:
pass
#Throw specific errors before return error
status.ThrowExceptionOnErrors()
#Throw an appropriate exception on error
dcgm_structs._dcgmCheckReturn(ret)
'''
Get configuration for this group
configType is a DCGM_CONFIG_? constant
Returns an array of dcgm_structs.c_dcgmDeviceConfig_v1 objects
Throws an exception on error
'''
def Get(self, configType):
status = pydcgm.DcgmStatus()
gpuIds = self._dcgmGroup.GetGpuIds()
configList = dcgm_agent.dcgmConfigGet(self._dcgmHandle.handle, self._groupId, configType, len(gpuIds), status.handle)
#Throw specific errors before return error
status.ThrowExceptionOnErrors()
return configList
'''
Enforce the configuration that has been set with Set()
Throws an exception on error
'''
def Enforce(self):
status = pydcgm.DcgmStatus()
ret = dcgm_structs.DCGM_ST_OK
try:
ret = dcgm_agent.dcgmConfigEnforce(self._dcgmHandle.handle, self._groupId, status.handle)
except dcgm_structs.DCGMError as e:
pass
#Throw specific errors before return error
status.ThrowExceptionOnErrors()
#Throw an appropriate exception on error
dcgm_structs._dcgmCheckReturn(ret)
class DcgmGroupSamples:
def __init__(self, dcgmHandle, groupId, dcgmGroup):
self._dcgmHandle = dcgmHandle
self._groupId = groupId
self._dcgmGroup = dcgmGroup
'''
Tell DCGM to start recording samples for the given field group
fieldGroup: DcgmFieldGroup() instance tracking the fields we want to watch.
updateFreq: How often to update these fields in usec
maxKeepAge: How long to keep data for these fields in seconds
maxKeepSamples: Maximum number of samples to keep per field. 0=no limit
Once the field collection is watched, it will update whenever the next update
loop occurs. If you want to query these values immediately, use
handle.UpdateAllFields(True) to make sure that the fields have updated at least once.
'''
def WatchFields(self, fieldGroup, updateFreq, maxKeepAge, maxKeepSamples):
ret = dcgm_agent.dcgmWatchFields(self._dcgmHandle.handle, self._groupId, fieldGroup.fieldGroupId, updateFreq, maxKeepAge, maxKeepSamples)
dcgm_structs._dcgmCheckReturn(ret)
'''
tell DCGM to stop recording samples for a given field group
fieldGroup: DcgmFieldGroup() instance tracking the fields we want to unwatch.
'''
def UnwatchFields(self, fieldGroup):
ret = dcgm_agent.dcgmUnwatchFields(self._dcgmHandle.handle, self._groupId, fieldGroup.fieldGroupId)
dcgm_structs._dcgmCheckReturn(ret)
'''
Get the most recent values for each field in a field collection
fieldGroup: DcgmFieldGroup() instance tracking the fields we want to watch.
Returns DcgmFieldValueCollection object. Use its .values[gpuId][fieldId][0].value to access values
'''
def GetLatest(self, fieldGroup):
dfvc = dcgm_field_helpers.DcgmFieldValueCollection(self._dcgmHandle.handle, self._groupId)
dfvc.GetLatestValues(fieldGroup)
return dfvc
'''
Get the most recent values for each field in a field collection
fieldGroup: DcgmFieldGroup() instance tracking the fields we want to watch.
Returns DcgmFieldValueEntityCollection object. Use its .values[entityGroupId][entityId][fieldId][0].value to access values
'''
def GetLatest_v2(self, fieldGroup):
dfvec = dcgm_field_helpers.DcgmFieldValueEntityCollection(self._dcgmHandle.handle, self._groupId)
dfvec.GetLatestValues(fieldGroup)
return dfvec
'''
Get the new values for each field in a field collection since the last
collection.
dfvc: DcgmFieldValueCollection() instance. Will return a
DcgmFieldValueCollection with values since the one passed in.
Pass None for the first call to get one for subsequent calls.
On subsequent calls, pass what was returned.
fieldGroup: DcgmFieldGroup() instance tracking the fields we want to watch.
Returns DcgmFieldValueCollection object. Use its .values[gpuId][fieldId][*].value to access values
'''
def GetAllSinceLastCall(self, dfvc, fieldGroup):
if dfvc == None:
dfvc = dcgm_field_helpers.DcgmFieldValueCollection(self._dcgmHandle.handle, self._groupId)
dfvc.GetLatestValues(fieldGroup)
else:
# We used to expect at least one value (GetLatestValues), so this
# ensures we provide one at the risk of repetition. This should not
# happen if we call this function infrequently enough (slower than
# the sampling rate).
dfvc.GetAllSinceLastCall(fieldGroup)
if len(dfvc.values) == 0:
dfvc.GetLatestValues(fieldGroup)
return dfvc
'''
Gets more values for each field in a field entity collection
dfvec: DcgmFieldValueEntityCollection() instance. Will return a
DcgmFieldValueEntityCollection with values since the one passed
in. Pass None for the first call to get one for subsequent
calls. On subsequent calls, pass what was returned.
fieldGroup: DcgmFieldGroup() instance tracking the fields we want to watch.
Returns DcgmFieldValueEntityCollection object. Use its .values[entityGroupId][entityId][fieldId][*].value to access values
'''
def GetAllSinceLastCall_v2(self, dfvec, fieldGroup):
if dfvec == None:
dfvec = dcgm_field_helpers.DcgmFieldValueEntityCollection(self._dcgmHandle.handle, self._groupId)
dfvec.GetLastestValues_v2(fieldGroup)
else:
dfvec.GetAllSinceLastCall_v2(fieldGroup)
# We used to expect at least one value (GetLatestValues), so this
# ensures we provide one at the risk of repetition. This should not
# happen if we call this function infrequently enough (slower than
# the sampling rate).
if len(dfvec.values) == 0:
dfvec.GetLatestValues_v2(fieldGroup)
return dfvec
'''
Convenience alias for DcgmHandle.UpdateAllFields(). All fields on the system will be updated, not
just this group's.
'''
def UpdateAllFields(self, waitForUpdate):
self._dcgmHandle.UpdateAllFields(waitForUpdate)
class DcgmGroupHealth:
def __init__(self, dcgmHandle, groupId, dcgmGroup):
self._dcgmHandle = dcgmHandle
self._groupId = groupId
self._dcgmGroup = dcgmGroup
'''
Enable health checks for this group
systems: A bitmask of dcgm_structs.DCGM_HEALTH_WATCH_? definitions of which health checks to enable
updateInterval: How often DCGM should request new health data from the driver in usec
maxKeepAge: How long DCGM should keep health data around once it has been retrieved from the driver in seconds
'''
def Set(self, systems, updateInterval = None, maxKeepAge = None):
if updateInterval is None or maxKeepAge is None:
ret = dcgm_agent.dcgmHealthSet(self._dcgmHandle.handle, self._groupId, systems)
else:
ret = dcgm_agent.dcgmHealthSet_v2(self._dcgmHandle.handle, self._groupId, systems, updateInterval, maxKeepAge)
dcgm_structs._dcgmCheckReturn(ret)
'''
Retrieve the current state of the DCGM health check system
Returns a bitmask of dcgm_structs.DCGM_HEALTH_WATCH_? definitions of which health checks are currently enabled
'''
def Get(self):
systems = dcgm_agent.dcgmHealthGet(self._dcgmHandle.handle, self._groupId)
return systems
'''
Check the configured watches for any errors/failures/warnings that have occurred
since the last time this check was invoked. On the first call, stateful information
about all of the enabled watches within a group is created but no error results are
provided. On subsequent calls, any error information will be returned.
@param version IN: Allows the caller to use an older version of this request. Should be
dcgm_structs.dcgmHealthResponse_version4
Returns a dcgm_structs.c_dcgmHealthResponse_* object that contains results for each GPU/entity
'''
def Check(self, version = dcgm_structs.dcgmHealthResponse_version4):
resp = dcgm_agent.dcgmHealthCheck(self._dcgmHandle.handle, self._groupId, version)
return resp
class DcgmGroupPolicy:
def __init__(self, dcgmHandle, groupId, dcgmGroup):
self._dcgmHandle = dcgmHandle
self._groupId = groupId
self._dcgmGroup = dcgmGroup
'''
Get the current violation policy inside the policy manager. Given a groupId, a number of
policy structures are retrieved.
@param statusHandle IN/OUT: pydcgm.DcgmStatus for the resulting status of the operation. Pass it as None
if the detailed error information for the operation is not needed (default).
Returns a list of dcgm_structs.c_dcgmPolicy_v1 with the same length as the number of GPUs in the group.
The index of an entry corresponds to a given GPU ID in the group. Throws an exception on error.
'''
def Get(self, statusHandle=None):
if statusHandle:
statusHandle = statusHandle.handle
count = len(self._dcgmGroup.GetGpuIds())
if count <= 0:
raise pydcgm.DcgmException("This group has no GPUs, cannot retrieve policies")
return dcgm_agent.dcgmPolicyGet(self._dcgmHandle.handle, self._groupId, count, statusHandle)
'''
Set the current violation policy inside the policy manager. Given the conditions within "policy",
if a violation has occurred, subsequent action(s) may be performed to either
report or contain the failure.
This API is only supported on Tesla GPUs and will throw DCGMError_NotSupported if called on non-Tesla GPUs.
@param policy IN: dcgm_structs.c_dcgmPolicy_v1 that will be applied to all GPUs in the group
@param statusHandle IN/OUT: pydcgm.DcgmStatus for the resulting status for the operation. Pass it as
None if the detailed error information for the operation is not needed (default).
Returns Nothing. Throws an exception on error
'''
def Set(self, policy, statusHandle=None):
if statusHandle:
statusHandle = statusHandle.handle
dcgm_agent.dcgmPolicySet(self._dcgmHandle.handle, self._groupId, policy, statusHandle)
'''
Register a function to be called when a specific policy condition (see dcgm_structs.c_dcgmPolicy_v1.condition)
has been violated. This callback(s) will be called automatically when in DCGM_OPERATION_MODE_AUTO mode and only after
DcgmPolicy.Trigger when in DCGM_OPERATION_MODE_MANUAL mode.
All callbacks are made within a separate thread.
This API is only supported on Tesla GPUs and will throw DCGMError_NotSupported if called on non-Tesla GPUs.
@param condition IN: The set of conditions specified as an OR'd list
(see dcgm_structs.DCGM_POLICY_COND_*)
for which to register a callback function
@param beginCallback IN: A function that should be called should a violation occur. This
function will be called prior to any actions specified by the policy are taken.
@param finishCallback IN: A reference to a function that should be called should a violation occur.
This function will be called after any action specified by the policy are completed.
At least one callback must be provided that is not None.
Returns Nothing. Throws an exception on error.
'''
def Register(self, condition, beginCallback=None, finishCallback=None):
if beginCallback is None and finishCallback is None:
raise pydcgm.DcgmException("At least 1 callback must be provided to register that is not None")
dcgm_agent.dcgmPolicyRegister(self._dcgmHandle.handle, self._groupId, condition, beginCallback, finishCallback)
'''
Unregister a function to be called for a specific policy condition (see dcgm_structs.c_dcgmPolicy_v1.condition) .
This function will unregister all callbacks for a given condition.
@param condition IN: The set of conditions specified as an OR'd list
(see dcgm_structs.DCGM_POLICY_COND_*)
for which to unregister a callback function
Returns Nothing. Throws an exception on error.
'''
def Unregister(self, condition):
dcgm_agent.dcgmPolicyUnregister(self._dcgmHandle.handle, self._groupId, condition)
'''
Inform the policy manager loop to perform an iteration and trigger the callbacks of any
registered functions. Callback functions will be called from a separate thread as the calling function.
Note: The GPU monitoring and management agent must call this method periodically if the operation
mode is set to manual mode (DCGM_OPERATION_MODE_MANUAL) during initialization
(\ref DcgmHandle.__init__).
Returns Nothing. Throws an exception if there is a generic error that the
policy manager was unable to perform another iteration.
'''
def Trigger(self):
dcgm_agent.dcgmPolicyTrigger(self._dcgmHandle.handle)
class DcgmGroupDiscovery:
def __init__(self, dcgmHandle, groupId, dcgmGroup):
self._dcgmHandle = dcgmHandle
self._groupId = groupId
self._dcgmGroup = dcgmGroup
'''
Get the topology for this group
Returns a c_dcgmGroupTopology_v1 object representing the topology for this group
'''
def GetTopology(self):
return dcgm_agent.dcgmGetGroupTopology(self._dcgmHandle.handle, self._groupId)
class DcgmGroupStats:
def __init__(self, dcgmHandle, groupId, dcgmGroup):
self._dcgmHandle = dcgmHandle
self._groupId = groupId
self._dcgmGroup = dcgmGroup
'''
Tell DCGM to start recording samples for fields returned from GetPidInfo()
updateFreq: How often to update these fields in usec
maxKeepAge: How long to keep data for these fields in seconds
maxKeepSamples: Maximum number of samples to keep per field. 0=no limit
Once the field collection is watched, it will update whenever the next update
loop occurs. If you want to query these values immediately, use
handle.UpdateAllFields(True) to make sure that the fields have updated at least once.
'''
def WatchPidFields(self, updateFreq, maxKeepAge, maxKeepSamples):
ret = dcgm_agent.dcgmWatchPidFields(self._dcgmHandle.handle, self._groupId, updateFreq, maxKeepAge, maxKeepSamples)
dcgm_structs._dcgmCheckReturn(ret)
'''
Get process stats for a given PID on this GPU group
You must call WatchPidFields() before this query for this method to return any results
Returns a dcgm_structs.c_dcgmPidInfo_v2 structure
'''
def GetPidInfo(self, pid):
return dcgm_agent.dcgmGetPidInfo(self._dcgmHandle.handle, self._groupId, pid)
'''
Tell DCGM to start recording samples for fields returned from GetJobStats()
updateFreq: How often to update these fields in usec
maxKeepAge: How long to keep data for these fields in seconds
maxKeepSamples: Maximum number of samples to keep per field. 0=no limit
Once the fields are watched, they will update whenever the next update
loop occurs. If you want to query these values immediately, use
handle.UpdateAllFields(True) to make sure that the fields have updated at least once.
'''
def WatchJobFields(self, updateFreq, maxKeepAge, maxKeepSamples):
ret = dcgm_agent.dcgmWatchJobFields(self._dcgmHandle.handle, self._groupId, updateFreq, maxKeepAge, maxKeepSamples)
dcgm_structs._dcgmCheckReturn(ret)
'''
Start collecting stats for a named job for this GPU group
Calling this will tell DCGM to start tracking stats for the given jobId. Stats tracking
will end when StopJobStats() is called
You must call WatchJobFields() before this call to tell DCGM to start sampling the fields
that are returned from GetJobStats().
jobId is a unique string identifier for this job. An exception will be thrown if this is not unique
Returns Nothing (Will throw exception on error)
'''
def StartJobStats(self, jobId):
ret = dcgm_agent.dcgmJobStartStats(self._dcgmHandle.handle, self._groupId, jobId)
dcgm_structs._dcgmCheckReturn(ret)
'''
Stop collecting stats for a named job
Calling this will tell DCGM to stop collecting stats for a job that was previously started
with StartJobStats().
jobId is the unique string that was passed as jobId to StartJobStats.
Returns Nothing (Will throw exception on error)
'''
def StopJobStats(self, jobId):
ret = dcgm_agent.dcgmJobStopStats(self._dcgmHandle.handle, jobId)
dcgm_structs._dcgmCheckReturn(ret)
'''
Get stats for a job that was started with StartJobStats. If StopJobStats has not been called yet,
this will get stats from when the job started until now. If StopJob was called prior to
this, the returned Stats will go from when StartJobStats was called to when StopJobStats was called.
jobId is the unique string that was passed as jobId to StartJobStats and StopJobStats
Returns a dcgm_structs.c_dcgmJobInfo_v3 structure. Throws an exception on error
'''
def GetJobStats(self, jobId):
ret = dcgm_agent.dcgmJobGetStats(self._dcgmHandle.handle, jobId)
return ret
'''
This API tells DCGM to stop tracking the job given by jobId. After this call, you will no longer
be able to call GetJobStats() on this jobId. However, you will be able to reuse jobId after
this call.
jobId is the unique string that was passed as jobId to StartJobStats and StopJobStats
Returns Nothing (Will throw exception on error)
'''
def RemoveJob(self, jobId):
ret = dcgm_agent.dcgmJobRemove(self._dcgmHandle.handle, jobId)
return ret
'''
This API tells DCGM to stop tracking all jobs. After this call, you will no longer
be able to call dcgmJobGetStats() any jobs until you call StartJobStats() again.
You will be able to reuse any previously-used jobIds after this call.
Returns Nothing (Will throw exception on error)
'''
def RemoveAllJobs(self):
ret = dcgm_agent.dcgmJobRemoveAll(self._dcgmHandle.handle)
return ret
class DcgmGroupAction:
def __init__(self, dcgmHandle, groupId, dcgmGroup):
self._dcgmHandle = dcgmHandle
self._groupId = groupId
self._dcgmGroup = dcgmGroup
'''
Inform the action manager to perform a manual validation of a group of GPUs on the system
validate is what sort of validation to do. See dcgm_structs.DCGM_POLICY_VALID_* defines.
Returns a dcgm_structs.c_dcgmDiagResponse_v5 instance
'''
def Validate(self, validate):
runDiagInfo = dcgm_structs.c_dcgmRunDiag_v7()
runDiagInfo.version = dcgm_structs.dcgmRunDiag_version7
runDiagInfo.validate = validate
runDiagInfo.groupId = self._groupId
ret = dcgm_agent.dcgmActionValidate_v2(self._dcgmHandle.handle, runDiagInfo)
return ret
'''
Run a diagnostic on this group of GPUs.
diagLevel is the level of diagnostic desired. See dcgm_structs.DCGM_DIAG_LVL_* constants.
Returns a dcgm_structs.c_dcgmDiagResponse_v5 instance
'''
def RunDiagnostic(self, diagLevel):
ret = dcgm_agent.dcgmRunDiagnostic(self._dcgmHandle.handle, self._groupId, diagLevel)
return ret
'''
Run a specific diagnostic test on this group of GPUs.
testName is the name of the specific test that should be invoked.
Returns a dcgm_structs.c_dcgmDiagResponse_v5 instance
'''
def RunSpecificTest(self, testName):
runDiagInfo = dcgm_structs.c_dcgmRunDiag_v7()
runDiagInfo.version = dcgm_structs.dcgmRunDiag_version7
for i in range(len(testName)):
runDiagInfo.testNames[0][i] = testName[i]
runDiagInfo.groupId = self._groupId
runDiagInfo.validate = dcgm_structs.DCGM_POLICY_VALID_NONE
response = dcgm_agent.dcgmActionValidate_v2(self._dcgmHandle.handle, runDiagInfo)
return response
class DcgmGroupProfiling:
def __init__(self, dcgmHandle, groupId, dcgmGroup):
"""
Parameters
----------
dcgmHandle : DcgmHandle
groupId : int
dcgmGroup : DcgmGroup
"""
self._dcgmHandle = dcgmHandle
self._groupId = groupId
self._dcgmGroup = dcgmGroup
def GetSupportedMetricGroups(self):
"""
Get a list of the profiling metric groups available for this group of entities
:return: dcgm_structs.c_dcgmProfGetMetricGroups_v3
:throws: dcgm_structs.DCGMError on error
"""
gpuIds = self._dcgmGroup.GetGpuIds()
if len(gpuIds) < 1:
raise dcgm_structs.DCGMError_ProfilingNotSupported
ret = dcgm_agent.dcgmProfGetSupportedMetricGroups(self._dcgmHandle.handle, gpuIds[0])
return ret
class DcgmGroup:
'''
Constructor.
Either groupId OR groupName must be provided as a parameter.
This will set which GPU group this object is bound to
groupId=DCGM_GROUP_ALL_GPUS creates a group with all GPUs. Passing an existing groupId will
not create an additional group.
If groupName is provided, an empty group (No GPUs) of name groupName will be created. This group
will be destroyed when this object goes out of scope or is deleted with del().
groupType is the type of group to create. See dcgm_structs.DCGM_GROUP_? constants.
'''
def __init__(self, dcgmHandle, groupId=None, groupName=None, groupType=dcgm_structs.DCGM_GROUP_EMPTY):
self._dcgmHandle = dcgmHandle
if groupId is None and groupName is None:
raise pydcgm.DcgmException("Either groupId or groupName is required")
if groupId is not None:
self._groupId = groupId
else:
self._groupId = dcgm_agent.dcgmGroupCreate(self._dcgmHandle.handle, groupType, groupName)
#Create namespace classes
self.config = DcgmGroupConfig(self._dcgmHandle, self._groupId, self)
self.samples = DcgmGroupSamples(self._dcgmHandle, self._groupId, self)
self.health = DcgmGroupHealth(self._dcgmHandle, self._groupId, self)
self.policy = DcgmGroupPolicy(self._dcgmHandle, self._groupId, self)
self.discovery = DcgmGroupDiscovery(self._dcgmHandle, self._groupId, self)
self.stats = DcgmGroupStats(self._dcgmHandle, self._groupId, self)
self.action = DcgmGroupAction(self._dcgmHandle, self._groupId, self)
self.profiling = DcgmGroupProfiling(self._dcgmHandle, self._groupId, self)
'''
Remove this group from DCGM. This object will no longer be valid after this call.
'''
def Delete(self):
del self.config
self.config = None
del self.samples
self.samples = None
del self.health
self.health = None
del self.policy
self.policy = None
del self.discovery
self.discovery = None
del self.stats
self.stats = None
del self.action
self.action = None
del self.profiling
self.profiling = None
#Delete the group we created if we're not using the special all-GPU group
if self._groupId is not None and not self._IsGroupIdStatic():
ret = dcgm_agent.dcgmGroupDestroy(self._dcgmHandle.handle, self._groupId)
dcgm_structs._dcgmCheckReturn(ret)
self._groupId = None
'''
Private method to determine if our groupId is a predefined one
'''
def _IsGroupIdStatic(self):
if self._groupId == dcgm_structs.DCGM_GROUP_ALL_GPUS or \
self._groupId == dcgm_structs.DCGM_GROUP_ALL_NVSWITCHES:
return True
return False
'''
Add a GPU to this group
gpuId is the GPU ID to add to our group
Returns Nothing. Throws an exception on error
'''
def AddGpu(self, gpuId):
if self._IsGroupIdStatic():
raise pydcgm.DcgmException("Can't add a GPU to a static group")
ret = dcgm_agent.dcgmGroupAddDevice(self._dcgmHandle.handle, self._groupId, gpuId)
dcgm_structs._dcgmCheckReturn(ret)
'''
Add an entity to this group
entityGroupId is DCGM_FE_? constant of the entity group this entity belongs to
entityId is the entity to add to this group
Returns Nothing. Throws an exception on error
'''
def AddEntity(self, entityGroupId, entityId):
if self._IsGroupIdStatic():
raise pydcgm.DcgmException("Can't add an entity to a static group")
ret = dcgm_agent.dcgmGroupAddEntity(self._dcgmHandle.handle, self._groupId, entityGroupId, entityId)
dcgm_structs._dcgmCheckReturn(ret)
'''
Remove a GPU from this group
gpuId is the GPU ID to remove from our group
Returns Nothing. Throws an exception on error
'''
def RemoveGpu(self, gpuId):
if self._IsGroupIdStatic():
raise pydcgm.DcgmException("Can't remove a GPU from a static group")
ret = dcgm_agent.dcgmGroupRemoveDevice(self._dcgmHandle.handle, self._groupId, gpuId)
dcgm_structs._dcgmCheckReturn(ret)
'''
Remove an entity from this group
entityGroupId is DCGM_FE_? constant of the entity group this entity belongs to
entityId is the entity to remove from this group
Returns Nothing. Throws an exception on error
'''
def RemoveEntity(self, entityGroupId, entityId):
if self._IsGroupIdStatic():
raise pydcgm.DcgmException("Can't remove an entity from a static group")
ret = dcgm_agent.dcgmGroupRemoveEntity(self._dcgmHandle.handle, self._groupId, entityGroupId, entityId)
dcgm_structs._dcgmCheckReturn(ret)
'''
Get an array of GPU ids that are part of this group
Note: this ignores non-GPU members of the group
Returns a list of GPU ids. Throws an exception on error
'''
def GetGpuIds(self):
groupInfo = dcgm_agent.dcgmGroupGetInfo(self._dcgmHandle.handle, self._groupId)
groupGpuIds = []
for i in range(groupInfo.count):
if groupInfo.entityList[i].entityGroupId != dcgm_fields.DCGM_FE_GPU:
continue
groupGpuIds.append(groupInfo.entityList[i].entityId)
return groupGpuIds
'''
Get an array of entities that are part of this group
Returns a list of c_dcgmGroupEntityPair_t structs. Throws an exception on error
'''
def GetEntities(self):
groupInfo = dcgm_agent.dcgmGroupGetInfo(self._dcgmHandle.handle, self._groupId)
entities = groupInfo.entityList[0:groupInfo.count]
return entities
'''
Get the groupId of this object
Returns our groupId
'''
def GetId(self):
return self._groupId
| DCGM-master | testing/python3/DcgmGroup.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pydcgm
import dcgm_agent
import dcgm_structs
class DcgmStatus:
def __init__(self):
self.handle = dcgm_agent.dcgmStatusCreate()
self.errors = []
def __del__(self):
dcgm_agent.dcgmStatusDestroy(self.handle)
'''
Take any errors stored in our handle and update self.errors with them
'''
def UpdateErrors(self):
errorCount = dcgm_agent.dcgmStatusGetCount(self.handle)
if errorCount < 1:
return
for i in range(errorCount):
self.errors.append(dcgm_agent.dcgmStatusPopError(self.handle))
'''
Throw an exception if any errors are stored in our status handle
The exception text will contain all of the errors
'''
def ThrowExceptionOnErrors(self):
#Make sure we've captured all errors before looking at them
self.UpdateErrors()
if len(self.errors) < 1:
return
errorString = "Errors: "
for value in self.errors:
errorString += "\"%s\"" % value
raise dcgm_structs.DCGMError(value.status)
| DCGM-master | testing/python3/DcgmStatus.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import dcgm_fields
import dcgm_fields_internal
import dcgm_structs
import dcgm_agent
import ctypes
import dcgmvalue
import pydcgm
import json
'''
Helper class that makes a python-friendly field value from one returned from the python bindings
'''
class DcgmFieldValue():
'''
Constructor
rawValue is the latest dcgm_structs.c_dcgmFieldValue_v? structure of a field value returned from the raw APIs
'''
def __init__(self, rawValue):
#Make sure the class passed in is an expected type
if not type(rawValue) == dcgm_structs.c_dcgmFieldValue_v1:
raise Exception("Unexpected rawValue type %s" % str(type(rawValue)))
self.ts = rawValue.ts
self.fieldId = rawValue.fieldId
self.fieldType = chr(rawValue.fieldType)
self.isBlank = False
self.value = None
if rawValue.status != dcgm_structs.DCGM_ST_OK:
self.isBlank = True
return
if self.fieldType == dcgm_fields.DCGM_FT_DOUBLE:
self.value = float(rawValue.value.dbl)
self.isBlank = dcgmvalue.DCGM_FP64_IS_BLANK(self.value)
elif self.fieldType == dcgm_fields.DCGM_FT_INT64 or self.fieldType == dcgm_fields.DCGM_FT_TIMESTAMP:
self.value = int(rawValue.value.i64)
self.isBlank = dcgmvalue.DCGM_INT64_IS_BLANK(self.value)
elif self.fieldType == dcgm_fields.DCGM_FT_STRING:
self.value = str(rawValue.value.str)
self.isBlank = dcgmvalue.DCGM_STR_IS_BLANK(self.value)
elif self.fieldType == dcgm_fields.DCGM_FT_BINARY:
if self.fieldId == dcgm_fields.DCGM_FI_DEV_ACCOUNTING_DATA:
accStats = dcgm_structs.c_dcgmDevicePidAccountingStats_v1()
ctypes.memmove(ctypes.addressof(accStats), rawValue.value.blob, accStats.FieldsSizeof())
if self.fieldId in [dcgm_fields_internal.DCGM_FI_DEV_COMPUTE_PIDS, dcgm_fields_internal.DCGM_FI_DEV_GRAPHICS_PIDS]:
processStats = dcgm_structs.c_dcgmRunningProcess_t()
ctypes.memmove(ctypes.addressof(processStats), rawValue.value.blob, processStats.FieldsSizeof())
self.value = processStats
self.fieldType = dcgm_fields.DCGM_FT_BINARY
# This should always be false
self.isBlank = dcgmvalue.DCGM_INT64_IS_BLANK(processStats.pid)
elif self.fieldId == dcgm_fields.DCGM_FI_SYNC_BOOST:
#Not exposed publicly for now
self.value = None
else:
raise Exception("Blobs not handled yet for fieldId %d" % self.fieldId)
else:
raise Exception("Unhandled fieldType: %s" % self.fieldType)
class DcgmFieldValueTimeSeries:
def __init__(self):
self.values = [] #Values in timestamp order
def __len__(self):
return len(self.values)
def __getitem__(self, key):
return self.values[key]
def InsertValue(self, value):
if len(self.values) < 1 or value.ts >= self.values[-1].ts:
self.values.append(value)
return
#Otherwise, we need to insert the value in the correct place. Find the place
for i, existingValue in enumerate(self.values):
if value.ts < existingValue.ts:
self.values.insert(i, value)
return
raise Exception("Unexpected no place to insert ts %d" % value.ts)
class FieldValueEncoder(json.JSONEncoder):
# Pylint does not link overloading the default method, so the comment below is WAR for the linting problem
def default(self, obj): # pylint: disable=E0202
nested_json = []
i=0
for key in obj:
if isinstance(key, DcgmFieldValue):
if(key.isBlank):
continue
nested_json.append({'Timestamp' : key.ts, 'FieldId': key.fieldId, 'Value' : key.value})
else:
return json.JSONEncoder.default(self, obj) # Let default encoder throw exception
return nested_json
def py_helper_dcgm_field_values_since_callback(gpuId, values, numValues, userData):
userData = ctypes.cast(userData, ctypes.py_object).value
userData._ProcessValues(gpuId, values[0:numValues])
return 0
helper_dcgm_field_values_since_callback = dcgm_agent.dcgmFieldValueEnumeration_f(py_helper_dcgm_field_values_since_callback)
def py_helper_dcgm_field_values_since_callback_v2(entityGroupId, entityId, values, numValues, userData):
userData = ctypes.cast(userData, ctypes.py_object).value
userData._ProcessValuesV2(entityGroupId, entityId, values[0:numValues])
return 0
helper_dcgm_field_values_since_callback_v2 = dcgm_agent.dcgmFieldValueEntityEnumeration_f(py_helper_dcgm_field_values_since_callback_v2)
'''
Helper class for handling field value update callbacks and storing them in a .values member variable
'''
class DcgmFieldValueCollection:
def __init__(self, handle, groupId):
self.values = {} #2D dictionary of [gpuId][fieldId](DcgmFieldValueTimeSeries)
self.entityValues = {} #3D dictionary of [entityGroupId][entityId][fieldId](DcgmFieldValueTimeSeries)
self._handle = handle
self._groupId = groupId
self._numValuesSeen = 0
self._nextSinceTimestamp = 0
'''
Helper function called by the callback of dcgm_agent.dcgmGetValuesSince to process individual field values
'''
def _ProcessValues(self, gpuId, values):
self._numValuesSeen += len(values)
if gpuId not in self.values:
self.values[gpuId] = {}
for rawValue in values:
#Convert to python-friendly value
value = DcgmFieldValue(rawValue)
if value.fieldId not in self.values[gpuId]:
self.values[gpuId][value.fieldId] = DcgmFieldValueTimeSeries()
self.values[gpuId][value.fieldId].InsertValue(value)
'''
Helper function called by the callback py_helper_dcgm_field_values_since_callback_v2 to process individual field values
'''
def _ProcessValuesV2(self, entityGroupId, entityId, values):
self._numValuesSeen += len(values)
if entityGroupId not in self.entityValues:
self.entityValues[entityGroupId] = {}
if entityId not in self.entityValues[entityGroupId]:
self.entityValues[entityGroupId][entityId] = {}
for rawValue in values:
#Convert to python-friendly value
value = DcgmFieldValue(rawValue)
if value.fieldId not in self.entityValues[entityGroupId][entityId]:
self.entityValues[entityGroupId][entityId][value.fieldId] = DcgmFieldValueTimeSeries()
self.entityValues[entityGroupId][entityId][value.fieldId].InsertValue(value)
'''
Get the latest values for a fieldGroup and store them to the .values member variable
Note: This class does not automatically watch fieldGroup. You must do that ahead of time with dcgmGroup.samples.WatchFields()
'''
def GetLatestValues(self, fieldGroup):
ret = dcgm_agent.dcgmGetLatestValues(self._handle, self._groupId, fieldGroup.fieldGroupId, helper_dcgm_field_values_since_callback, self)
#Will throw exception on error
dcgm_structs._dcgmCheckReturn(ret)
'''
Method to cause more field values to be retrieved from DCGM. Returns the
number of field values that were retrieved.
'''
def GetAllSinceLastCall(self, fieldGroup):
beforeCount = self._numValuesSeen
self._nextSinceTimestamp = dcgm_agent.dcgmGetValuesSince(self._handle, self._groupId, fieldGroup.fieldGroupId, self._nextSinceTimestamp, helper_dcgm_field_values_since_callback, self)
afterCount = self._numValuesSeen
return afterCount - beforeCount
def GetLatestValues_v2(self, fieldGroup):
ret = dcgm_agent.dcgmGetLatestValues_v2(self._handle, self._groupId, fieldGroup.fieldGroupId, helper_dcgm_field_values_since_callback_v2, self)
#Will throw exception on error
dcgm_structs._dcgmCheckReturn(ret)
'''
Method to cause more field values to be retrieved from DCGM. Returns the number of field values that were retrieved
'''
def GetAllSinceLastCall_v2(self, fieldGroup):
beforeCount = self._numValuesSeen
self._nextSinceTimestamp = dcgm_agent.dcgmGetValuesSince_v2(self._handle, self._groupId, fieldGroup.fieldGroupId, self._nextSinceTimestamp, helper_dcgm_field_values_since_entity_callback, self)
afterCount = self._numValuesSeen
return afterCount - beforeCount
'''
Empty .values{} so that old data is no longer present in this structure.
This can be used to prevent .values from growing over time
'''
def EmptyValues(self):
self.values = {}
self._numValuesSeen = 0
'''
Helper class for watching a field group and storing fields values returned from it
'''
class DcgmFieldGroupWatcher(DcgmFieldValueCollection):
'''
Constructor
handle is a DCGM handle from dcgm_agent.dcgmInit()
groupId is a valid DCGM group ID returned from dcgm_agent.dcgmGroupCreate
fieldGroup is the DcgmFieldGroup() instance to watch fields for
operationMode is a dcgm_structs.DCGM_OPERATION_MODE_? constant for if the host engine is running in lock step or auto mode
updateFreq is how often to update each field in usec
maxKeepAge is how long DCGM should keep values for in seconds
maxKeepSamples is the maximum number of samples DCGM should ever cache for each field
startTimestamp is a base timestamp we should start from when first reading values. This can be used to resume a
previous instance of a DcgmFieldGroupWatcher by using its _nextSinceTimestamp.
0=start with all cached data
'''
def __init__(self, handle, groupId, fieldGroup, operationMode, updateFreq, maxKeepAge, maxKeepSamples, startTimestamp):
self._fieldGroup = fieldGroup
self._operationMode = operationMode
self._updateFreq = updateFreq
self._maxKeepAge = maxKeepAge
self._maxKeepSamples = maxKeepSamples
DcgmFieldValueCollection.__init__(self, handle, groupId)
self._nextSinceTimestamp = 0 #Start from beginning of time
if startTimestamp > 0:
self._nextSinceTimestamp = startTimestamp
self._numValuesSeen = 0
#Start watches
self._WatchFieldGroup()
'''
Initiate the host engine watch on the fields
'''
def _WatchFieldGroup(self):
ret = dcgm_agent.dcgmWatchFields(self._handle, self._groupId, self._fieldGroup.fieldGroupId, self._updateFreq, self._maxKeepAge, self._maxKeepSamples)
dcgm_structs._dcgmCheckReturn(ret) #Will throw exception on error
# Force an update of the fields so that we can fetch initial values.
ret = dcgm_agent.dcgmUpdateAllFields(self._handle, 1)
dcgm_structs._dcgmCheckReturn(ret) #Will throw exception on error
# Initial update will fetch from startTimestamp.
self.GetAllSinceLastCall()
'''
Method to cause more field values to be retrieved from DCGM. Returns the
number of field values that were retrieved
'''
def GetAllSinceLastCall(self):
#If we're in manual mode, force an update
if self._operationMode == dcgm_structs.DCGM_OPERATION_MODE_MANUAL:
ret = dcgm_agent.dcgmUpdateAllFields(self._handle, 1)
dcgm_structs._dcgmCheckReturn(ret) #Will throw exception on error
return super().GetAllSinceLastCall(self._fieldGroup)
def py_helper_dcgm_field_values_since_entity_callback(entityGroupId, entityId, values, numValues, userData):
userData = ctypes.cast(userData, ctypes.py_object).value
userData._ProcessValues(entityGroupId, entityId, values[0:numValues])
return 0
helper_dcgm_field_values_since_entity_callback = dcgm_agent.dcgmFieldValueEntityEnumeration_f(py_helper_dcgm_field_values_since_entity_callback)
'''
Helper class for handling field value update callbacks and storing them in a .values member variable
'''
class DcgmFieldValueEntityCollection:
def __init__(self, handle, groupId):
self.values = {} #3D dictionary of [entityGroupId][entityId][fieldId](DcgmFieldValueTimeSeries)
self._handle = handle
self._groupId = groupId
self._numValuesSeen = 0
self._nextSinceTimestamp = 0
'''
Helper function called by the callback of dcgm_agent.dcgmGetValuesSince to process individual field values
'''
def _ProcessValues(self, entityGroupId, entityId, values):
self._numValuesSeen += len(values)
if entityGroupId not in self.values:
self.values[entityGroupId] = {}
if entityId not in self.values[entityGroupId]:
self.values[entityGroupId][entityId] = {}
for rawValue in values:
#Convert to python-friendly value
value = DcgmFieldValue(rawValue)
if value.fieldId not in self.values[entityGroupId][entityId]:
self.values[entityGroupId][entityId][value.fieldId] = DcgmFieldValueTimeSeries()
self.values[entityGroupId][entityId][value.fieldId].InsertValue(value)
'''
Get the latest values for a fieldGroup and store them to the .values member variable
Note: This class does not automatically watch fieldGroup. You must do that ahead of time with dcgmGroup.samples.WatchFields()
'''
def GetLatestValues(self, fieldGroup):
ret = dcgm_agent.dcgmGetLatestValues_v2(self._handle, self._groupId, fieldGroup.fieldGroupId, helper_dcgm_field_values_since_entity_callback, self)
#Will throw exception on error
dcgm_structs._dcgmCheckReturn(ret)
'''
Method to cause more field values to be retrieved from DCGM. Returns the
number of field values that were retrieved.
'''
def GetAllSinceLastCall(self, fieldGroup):
beforeCount = self._numValuesSeen
self._nextSinceTimestamp = dcgm_agent.dcgmGetValuesSince_v2(self._handle, self._groupId, fieldGroup.fieldGroupId, self._nextSinceTimestamp, helper_dcgm_field_values_since_entity_callback, self)
afterCount = self._numValuesSeen
return afterCount - beforeCount
'''
Empty .values{} so that old data is no longer present in this structure.
This can be used to prevent .values from growing over time
'''
def EmptyValues(self):
self.values = {}
self._numValuesSeen = 0
'''
Helper class for watching a field group and storing fields values returned from it
'''
class DcgmFieldGroupEntityWatcher(DcgmFieldValueEntityCollection):
'''
Constructor
handle is a DCGM handle from dcgm_agent.dcgmInit()
groupId is a valid DCGM group ID returned from dcgm_agent.dcgmGroupCreate
fieldGroup is the DcgmFieldGroup() instance to watch fields for
operationMode is a dcgm_structs.DCGM_OPERATION_MODE_? constant for if the host engine is running in lock step or auto mode
updateFreq is how often to update each field in usec
maxKeepAge is how long DCGM should keep values for in seconds
maxKeepSamples is the maximum number of samples DCGM should ever cache for each field
startTimestamp is a base timestamp we should start from when first reading values. This can be used to resume a
previous instance of a DcgmFieldGroupWatcher by using its _nextSinceTimestamp.
0=start with all cached data
'''
def __init__(self, handle, groupId, fieldGroup, operationMode, updateFreq, maxKeepAge, maxKeepSamples, startTimestamp):
self._fieldGroup = fieldGroup
self._operationMode = operationMode
self._updateFreq = updateFreq
self._maxKeepAge = maxKeepAge
self._maxKeepSamples = maxKeepSamples
DcgmFieldValueEntityCollection.__init__(self, handle, groupId)
self._nextSinceTimestamp = 0 #Start from beginning of time
if startTimestamp > 0:
self._nextSinceTimestamp = startTimestamp
self._numValuesSeen = 0
#Start watches
self._WatchFieldGroup()
'''
Initiate the host engine watch on the fields
'''
def _WatchFieldGroup(self):
ret = dcgm_agent.dcgmWatchFields(self._handle, self._groupId, self._fieldGroup.fieldGroupId, self._updateFreq, self._maxKeepAge, self._maxKeepSamples)
dcgm_structs._dcgmCheckReturn(ret) #Will throw exception on error
# Force an update of the fields so that we can fetch initial values.
ret = dcgm_agent.dcgmUpdateAllFields(self._handle, 1)
dcgm_structs._dcgmCheckReturn(ret) #Will throw exception on error
# Initial update will fetch from startTimestamp.
self.GetAllSinceLastCall()
'''
Method to cause more field values to be retrieved from DCGM. Returns the
number of field values that were retrieved
'''
def GetAllSinceLastCall(self):
#If we're in manual mode, force an update
if self._operationMode == dcgm_structs.DCGM_OPERATION_MODE_MANUAL:
ret = dcgm_agent.dcgmUpdateAllFields(self._handle, 1)
dcgm_structs._dcgmCheckReturn(ret) #Will throw exception on error
return super().GetAllSinceLastCall(self._fieldGroup)
#Test program for demonstrating how this module works
def main():
operationMode = dcgm_structs.DCGM_OPERATION_MODE_AUTO
timeStep = 1.0
dcgm_structs._dcgmInit()
dcgm_agent.dcgmInit() #Will throw an exception on error
handle = dcgm_agent.dcgmStartEmbedded(operationMode)
handleObj = pydcgm.DcgmHandle(handle=handle)
groupId = dcgm_structs.DCGM_GROUP_ALL_GPUS
fieldIds = [dcgm_fields.DCGM_FI_DEV_SM_CLOCK, dcgm_fields.DCGM_FI_DEV_MEM_CLOCK]
fieldGroup = pydcgm.DcgmFieldGroup(handleObj, "my_field_group", fieldIds)
updateFreq = int(timeStep * 1000000.0)
maxKeepAge = 3600.0 #1 hour
maxKeepSamples = 0 #unlimited. maxKeepAge will enforce quota
startTimestamp = 0 #beginning of time
dfcw = DcgmFieldGroupWatcher(handle, groupId, fieldGroup, operationMode, updateFreq, maxKeepAge, maxKeepSamples, startTimestamp)
dfcw2 = DcgmFieldGroupEntityWatcher(handle, groupId, fieldGroup, operationMode, updateFreq, maxKeepAge, maxKeepSamples, startTimestamp)
while(True):
newUpdateCount = dfcw.GetAllSinceLastCall()
newUpdateCount2 = dfcw2.GetAllSinceLastCall()
print("Got %d and %d new field value updates" % (newUpdateCount, newUpdateCount2))
for gpuId in list(dfcw.values.keys()):
print("gpuId %d" % gpuId)
for fieldId in list(dfcw.values[gpuId].keys()):
print(" fieldId %d: %d values. latest timestamp %d" % \
(fieldId, len(dfcw.values[gpuId][fieldId]), dfcw.values[gpuId][fieldId][-1].ts))
for entityGroupId in list(dfcw2.values.keys()):
print("entityGroupId %d" % entityGroupId)
for entityId in list(dfcw2.values[entityGroupId].keys()):
print(" entityId %d" % entityId)
for fieldId in list(dfcw2.values[entityGroupId][entityId].keys()):
print(" fieldId %d: %d values. latest timestamp %d" % \
(fieldId, len(dfcw2.values[entityGroupId][entityId][fieldId]), dfcw2.values[entityGroupId][entityId][fieldId][-1].ts))
time.sleep(timeStep)
if __name__ == "__main__":
main()
| DCGM-master | testing/python3/dcgm_field_helpers.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
## {{{ http://code.activestate.com/recipes/577479/ (r1)
from collections import namedtuple
from functools import wraps
import ctypes
import os
import sys
import fnmatch
import logger
import platform
import subprocess
import string
import stat
import getpass
import signal
import socket
try:
from distro import linux_distribution
except ImportError:
try:
from platform import linux_distribution
except ImportError:
print("Please install the distro package")
raise
import option_parser
import test_utils
from subprocess import check_output
_CacheInfo = namedtuple("CacheInfo", "hits misses maxsize currsize")
def cache():
"""Memorizing cache decorator.
Arguments to the cached function must be hashable.
View the cache statistics named tuple (hits, misses maxsize, size) with
f.cache_info(). Clear the cache and statistics with f.cache_clear().
"""
def decorating_function(user_function,
tuple=tuple, sorted=sorted, len=len, KeyError=KeyError):
cache = dict()
hits_misses = [0, 0]
kwd_mark = object() # separates positional and keyword args
@wraps(user_function)
def wrapper(*args, **kwds):
key = args
if kwds:
key += (kwd_mark,) + tuple(sorted(kwds.items()))
try:
result = cache[key]
hits_misses[0] += 1
except KeyError:
result = user_function(*args, **kwds)
cache[key] = result
hits_misses[1] += 1
return result
def cache_info():
"""Report cache statistics"""
return _CacheInfo(hits_misses[0], hits_misses[1], None, len(cache))
def cache_clear():
"""Clear the cache and cache statistics"""
cache.clear()
hits_misses = [0, 0]
wrapper.cache_info = cache_info
wrapper.cache_clear = cache_clear
return wrapper
return decorating_function
# ----- Example ----------------------------------------------------------------
if __name__ == '__main__':
@cache()
def fib(n):
if n < 2:
return 1
return fib(n-1) + fib(n-2)
from random import shuffle
inputs = list(range(30))
shuffle(inputs)
results = sorted(fib(n) for n in inputs)
print(results)
print((fib.cache_info()))
expected_output = '''[1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144,
233, 377, 610, 987, 1597, 2584, 4181, 6765, 10946, 17711, 28657,
46368, 75025, 121393, 196418, 317811, 514229, 832040]
CacheInfo(hits=56, misses=30, maxsize=None, currsize=30)
'''
## end of http://code.activestate.com/recipes/577479/ }}}
# Log an exception message before raising it.
def logException(msg):
logger.error("Exception. " + msg);
raise Exception(msg)
def is_root():
if is_linux():
return os.geteuid() == 0
else:
return ctypes.windll.shell32.IsUserAnAdmin()
def is_real_user_root():
"""
Effective user can be changed. But real user is always the same and can't be changed
"""
if is_linux():
return os.getuid() == 0
else:
# Windows can't change user so implementation is the same as in is_root()
return ctypes.windll.shell32.IsUserAnAdmin()
_UserInfo = namedtuple("UserInfo", "uid, gid, name")
@cache()
def get_user_idinfo(username):
from pwd import getpwnam
info = getpwnam(username)
return _UserInfo(info.pw_uid, info.pw_gid, info.pw_name)
@cache()
def get_name_by_uid(uid):
from pwd import getpwuid
return getpwuid(uid).pw_name
script_dir = os.path.realpath(sys.path[0])
def find_files(path = script_dir, mask = "*", skipdirs=None, recurse=True):
skipdirs = skipdirs or []
#Recurse subdirectories?
if recurse:
for root, dirnames, filenames in os.walk(path):
if skipdirs is not None:
[dirnames.remove(skip) for skip in skipdirs if skip in dirnames] # don't visit directories in skipdirs list
for filename in fnmatch.filter(filenames, mask):
yield os.path.abspath(os.path.join(root, filename))
else:
#Just list files inside "path"
filenames = os.listdir(path)
for filename in fnmatch.filter(filenames, mask):
yield os.path.abspath(os.path.join(path, filename))
def which(name):
"""
Returns True if command line application is in the PATH.
"""
if is_linux():
return 0 == os.system('which "%s" 1> /dev/null 2> /dev/null' % name)
else:
# TODO on windows os.system pops (for a brief moment) cmd console
# this function should be reimplemented so that it wouldn't happen
return 0 == os.system('where "%s" &> NUL' % name)
"""
stores string representing current platform
expected returned values
"Linux_64bit"
"Linux_32bit"
"Windows_64bit"
"Linux_ppc64le"
"Linux_aarch64"
treats VMkernel platform as Linux
"""
current_os = platform.system()
if current_os == "VMkernel":
current_os = "Linux" # Treat VMkernel as normal Linux.
def is_windows(os=current_os):
return os == "Windows"
def is_linux(os=current_os):
return os == "Linux"
def is_cuda_supported_system():
# CUDA is supported everywhere except in virtualization environments
return is_bare_metal_system()
def is_healthmon_supported_system():
return is_linux() and is_cuda_supported_system()
def is_esx_hypervisor_system():
return platform.system() == "VMkernel"
def is_microsoft_hyper_v():
try:
dmi = check_output(["which", "dmidecode"])
except Exception:
pass
return False
if is_root():
if os.path.isfile(dmi.strip()):
systemType = check_output(["dmidecode", "-s", "system-product-name"])
if systemType.strip() == "Virtual Machine":
return True
else:
return False
else:
return False
# Util method to check if QEMU VM is running
# DGX-2 VM uses QEMU
def is_qemu_vm():
"""
Returns True if QEMU VM is running on the system()
"""
cmd = 'lshw -c system | grep QEMU | wc -l'
p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = p.communicate()
if int(out) > 0:
return True
else:
return False
def is_bare_metal_system():
if is_esx_hypervisor_system():
return False
elif is_linux() and linux_distribution()[0] == "XenServer":
return False
elif is_linux() and is_microsoft_hyper_v():
return False
elif is_linux() and is_qemu_vm():
return False
else:
return True
def is_64bit():
if os.name == 'nt':
if platform.uname()[4] == 'AMD64':
return True
return platform.architecture()[0] == "64bit"
def is_32bit():
if os.name == 'nt':
if platform.uname()[4] == 'x86':
return True
return platform.architecture()[0] == "32bit"
def is_system_64bit():
return platform.machine() in ["x86_64", "AMD64"]
# 32-bit Python on 64-bit Windows reports incorrect architecture, therefore not using platform.architecture() directly
platform_identifier = current_os + "_" + ("64bit" if is_64bit() else "32bit")
if platform.machine() == "ppc64le":
platform_identifier = "Linux_ppc64le"
if platform.machine() == "aarch64":
platform_identifier = "Linux_aarch64"
assert platform_identifier in ["Linux_32bit", "Linux_64bit", "Windows_64bit", "Linux_ppc64le", "Linux_aarch64"], "Result %s is not of expected platform" % platform_identifier
valid_file_name_characters = "-_.() " + string.ascii_letters + string.digits
def string_to_valid_file_name(s):
"""
Replaces invalid characters from string and replaces with dot '.'
"""
result = []
for ch in s:
if ch in valid_file_name_characters:
result.append(ch)
else:
result.append(".")
return "".join(result)
def gen_diff(left, right):
import difflib
s = difflib.SequenceMatcher(None, left, right)
for tag, i1, i2, j1, j2 in s.get_opcodes():
if tag == "equal":
for k in range(i1,i2):
l = left[k]
r = right[k-i1+j1]
yield (" ", l, r)
elif tag == "insert":
for k in range(j1,j2):
r = right[k]
yield ("+", "", r)
elif tag == "delete":
for k in range(i1,i2):
l = left[k]
yield ("-", l, "")
elif tag == "replace":
for k in range(i1,i2):
l = left[k]
r = right[k-i1+j1]
# difflib combines blocks for some reason and returns "replace" tag
# for lines that are the same. Let's fix that
if l == r:
yield (" ", l ,r)
else:
yield ("|", l ,r)
def plural_s(val):
"""
returns "s" if val > 1 or "" otherwise.
Can be used in strings to have proper plural form.
"""
if val > 1:
return "s"
return ""
def chunks(l, n):
"""
returns list of list of length n.
E.g. chunks([1, 2, 3, 4, 5], 2) returns [[1, 2], [3, 4], [5]]
"""
return [l[i:i+n] for i in range(0, len(l), n)]
def format_dev_sub_dev_id(pciIdPair):
"""
pciIdPair (int pci device id, int pci sub device id or None)
"""
if pciIdPair[1] is None:
return "(0x%08X, None)" % pciIdPair[0]
return "(0x%08X, 0x%08X)" % pciIdPair
# permission of: other owner group
stat_everyone_read_write = stat.S_IROTH | stat.S_IWOTH | stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IWGRP
# Exit if the current (effective) user can't create a file in the base test directory
def verify_file_permissions(user):
# Not a complete check, but enough to verify absolute path permission issues
try:
filename = os.path.join(os.path.realpath(script_dir), "test.txt")
f = open(filename, "w")
f.write("Permission test")
f.close()
os.remove(filename)
except:
print("Please run the test framework under a less restrictive directory, with RW access to the full path.")
print("The user '%s' doesn't have sufficient permissions here." % user)
sys.exit(1)
# Exit if either the current user, or specified non-root user appear to lack sufficient
# file system permissions for the test framework
def verify_user_file_permissions():
# Check current user
verify_file_permissions(getpass.getuser())
# Check non-root user, if specified
user = option_parser.options.non_root_user
if user:
try:
get_user_idinfo(user)
except KeyError:
print("User '%s' doesn't exist" % user)
sys.exit(1)
with test_utils.RunAsNonRoot(reload_driver=False):
verify_file_permissions(user)
# Exit if 'localhost' does not resolve to 127.0.0.1
def verify_localhost_dns():
try:
host_ip = socket.gethostbyname("localhost")
except:
print("Unable to resolve localhost")
sys.exit(1)
if host_ip != "127.0.0.1":
print("localhost does not resolve to 127.0.0.1")
sys.exit(1)
## Util method to check if the mps server is running in the background
def is_mps_server_running():
"""
Returns True if MPS server is running on the system
"""
if is_linux():
## MPS server is only supported on Linux.
cmd = 'ps -aux | grep nvidia-cuda | tr -s " " | cut -d " " -f 11'
p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = p.communicate()
if b"nvidia-cuda-mps-server" in out.rstrip():
return True
elif b"nvidia-cuda-mps-control" in out.rstrip():
return True
else:
return False
else:
## MPS server is not supported on Windows. Return False for Windows
return False
def shorten_path(path, shorten_to_levels=2):
'''
Given a path, return a path of only the last "shorten_to_levels" levels.
For example, shorten_path('a/b/c/d/e', 2) => "d/e"
'''
path = os.path.normpath(path)
shortened_paths = path.split(os.sep)[-shorten_to_levels:]
return os.path.join(*shortened_paths)
def create_dir(path):
'''
Create the full directory structure specified by path. If the directory cannot be created
due to permission issues or because part of the path already exists and is not a directory
then an OSError is raised.
'''
try:
os.makedirs(path)
except OSError:
if not os.path.isdir(path):
raise
def wait_for_pid_to_die(pid):
'''This function returns once the pid no longer exists'''
while True:
try:
os.kill(pid, 0)
except OSError:
break
def verify_dcgm_service_not_active():
cmd = 'systemctl is-active --quiet dcgm'
p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = p.communicate()
if p.returncode == 0:
logException("Tests cannot run because the DCGM service is active")
cmd = 'systemctl is-active --quiet nvidia-dcgm'
p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = p.communicate()
if p.returncode == 0:
logException("Tests cannot run because the Nvidia DCGM service is active")
def verify_nvidia_fabricmanager_service_active_if_needed():
cmd = "find /dev -regextype egrep -regex '/dev/nvidia-nvswitch[0-9]+'"
p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out_buf, _ = p.communicate()
out = out_buf.decode('utf-8')
if not "nvswitch" in out.rstrip():
return
# Fabricmanager must be running if there are nvswitch devices.
cmd = 'systemctl status nvidia-fabricmanager'
p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out_buf, _ = p.communicate()
out = out_buf.decode('utf-8')
if not "running" in out.rstrip():
logException("Tests cannot run because the Nvidia Fabricmanager service is not active on systems with nvswitches")
def find_process_using_hostengine_port():
cmd = 'lsof -i -P -n | grep 5555'
p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out_buf, err_buf = p.communicate()
out = out_buf.decode('utf-8')
err = err_buf.decode('utf-8')
if len(out) == 0:
# Nothing is using that port, we are good
return None
he = "nv-hostengine"
process_name = None
lines = out.split('\n')
for line in lines:
info = line.split()
if len(info) < 2:
continue
if info[-1] != '(LISTEN)':
continue # not listening, ignore
process_name = info[0]
pid = int(info[1])
if he.find(info[0]) == 0:
# an nv-hostengine process is found
return pid
if process_name:
# We have some other process using port 5555
msg = "Process %s with pid %d is listening to port 5555. Cannot run tests." % (process_name, pid)
logException(msg)
'''
Attempt to clean up zombie or accidentally left-open processes using port 5555
'''
def verify_hostengine_port_is_usable():
pid = find_process_using_hostengine_port()
if not pid:
# no hostengine process, move on with life
return
verify_dcgm_service_not_active()
os.kill(pid, signal.SIGKILL)
return
def get_testing_framework_library_path():
# type: () -> string
"""
Returns platform dependent path to the dcgm libraries.
To use in testing framework for :func:`_dcgmInit` calls
:return: String with relative path to the libdcgm*.so libraries
"""
def _get_arch_string():
# type: () -> string
if platform.machine() in ["x86_64", "AMD64"]:
return 'amd64'
elif platform.machine() == 'ppc64le':
return 'ppc64le'
elif platform.machine().lower().startswith('aarch64'):
return 'aarch64'
logException("Unsupported arch: %s" % platform.machine())
return './apps/%s/' % _get_arch_string()
"""
Makes sure we can locate nvvs and dcgmi. If not, exit with an error.
If so, set NVVS_BIN_PATH appropriately and return the path to dcgmi
"""
def verify_binary_locations():
nvvs_location = "%s/apps/nvvs/nvvs" % os.getcwd()
if not os.path.isfile(nvvs_location):
print(("nvvs is NOT installed in: %s\n" % nvvs_location))
sys.exit(1)
# This will instruct the embedded hostengine to find the nvvs binary here
nvvs_location = nvvs_location[:-5]
print(("Setting NVVS_BIN_PATH to %s" % nvvs_location))
os.environ["NVVS_BIN_PATH"] = nvvs_location
dcgmi_location = os.path.join(get_testing_framework_library_path(), "dcgmi")
if not os.path.isfile(dcgmi_location):
print(("dcgmi is NOT installed in: %s\n" % dcgmi_location))
sys.exit(1)
dcgmi_prefix = dcgmi_location.strip("/dcgmi")
return dcgmi_prefix
| DCGM-master | testing/python3/utils.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
import math
def mean(series):
'''Calculate the mean of an iterable'''
length = 0
total = 0.0
for i in series: # must iterate because generators have no len()
total += i
length += 1
if length == 0:
return 0
return total / length
def correlation_coefficient(x, y):
'''
taken from: https://en.wikipedia.org/wiki/Simple_linear_regression#Fitting_the_regression_line
'''
xBar = mean(x)
yBar = mean(y)
xyBar = mean(xi*yi for xi, yi in zip(x, y))
xSquaredBar = mean(xi**2 for xi in x)
ySquaredBar = mean(yi**2 for yi in y)
return (xyBar - xBar*yBar) / (math.sqrt((xSquaredBar-xBar**2) * (ySquaredBar-yBar**2)))
def standard_deviation(x):
'''
taken from: https://en.wikipedia.org/wiki/Standard_deviation#Corrected_sample_standard_deviation
'''
xBar = mean(x)
N = len(x)
sumTerm = sum((xi - xBar)**2 for xi in x)
return math.sqrt((1./(N-1)) * sumTerm)
| DCGM-master | testing/python3/stats.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import copy
import test_utils
import utils
import pydcgm
import option_parser
import time
import glob
import shutil
import json
import logger
import argparse
import nvidia_smi_utils
import DcgmiDiag
import dcgm_structs
import dcgm_fields
import version
from subprocess import Popen, STDOUT, PIPE, check_output
logFile = "nvvs_diag.log"
debugFile = "nvvs_debug.log"
goldenValuesFile = "/tmp/golden_values.yml"
PASSED_COUNT = 0
FAILED_COUNT = 0
WAIVED_COUNT = 0
################################################################################
def remove_file_yolo(filename):
'''
Try to remove a file, not caring if any error occurs
'''
try:
os.remove(filename)
except:
pass
################################################################################
def setupEnvironment(cmdArgs):
"""
Function to prepare the test environment
"""
message = ''
# Set variable indicating we are running tests
os.environ['__DCGM_TESTING_FRAMEWORK_ACTIVE'] = '1'
# Verify if GPUs are free before running the tests
if not nvidia_smi_utils.are_gpus_free():
print("Some GPUs are in use, please check the workload and try again")
sys.exit(1)
if test_utils.is_framework_compatible() is False:
print("run_dcgm_diagnostic.py found to be a different version than DCGM. Exiting")
sys.exit(1)
else:
print("Running against Git Commit %s" % version.GIT_COMMIT)
# Enable persistence mode or the tests will fail
print("\nEnabling persistence mode")
(message, error) = nvidia_smi_utils.enable_persistence_mode()
if message:
print(message)
if error:
print(error)
print("\nWarning! Please make sure to enable persistence mode")
time.sleep(1)
# Collects the output of "nvidia-smi -q" and prints it out on the screen for debugging
print("\n###################### NVSMI OUTPUT FOR DEBUGGING ONLY ##########################")
(message, error) = nvidia_smi_utils.get_output()
if message:
print(message)
if error:
print(error)
print("\n###################### NVSMI OUTPUT FOR DEBUGGING ONLY ##########################\n\n")
# Tries to remove older log files
remove_file_yolo(logFile)
remove_file_yolo(debugFile)
print("============= TEST CONFIG ==========")
print("TEST CYLES: {}".format(cmdArgs.cycles))
print("DEVICE LIST: {}".format(cmdArgs.device_id))
print("====================================")
def trimJsonText(text):
return text[text.find('{'):text.rfind('}') + 1]
NAME_FIELD = "name"
RESULTS_FIELD = "results"
WARNING_FIELD = "warnings"
STATUS_FIELD = "status"
INFO_FIELD = "info"
GPU_FIELD = "gpu_ids"
RUNTIME_ERROR_FIELD = "runtime_error"
DIAG_THROTTLE_WARNING = "Clocks are being throttled for"
DIAG_DBE_WARNING = "ecc_dbe_volatile_total"
DIAG_ECC_MODE_WARNING = "because ECC is not enabled on GPU"
DIAG_INFOROM_WARNING = "Error calling NVML API nvmlDeviceValidateInforom"
DIAG_THERMAL_WARNING = "Thermal violations totaling "
DIAG_MIG_INCOMPATIBLE_WARNING = "is incompatible with the diagnostic because it prevents access to the entire GPU."
DIAG_MIG_MULTIPLE_GPU_WARNING = "Cannot run diagnostic: CUDA does not support enumerating GPUs with MIG mode enabled"
DIAG_THROTTLE_SUGGEST = "A GPU's clocks are being throttled due to a cooling issue. Please make sure your GPUs are properly cooled."
DIAG_DBE_SUGGEST = "This GPU needs to be drained and reset to clear the non-recoverable double bit errors."
DIAG_ECC_MODE_SUGGEST = "Run 'nvidia-smi -i <gpu id> -e 1' and then reboot to enable ECC memory."
DIAG_INFOROM_SUGGEST = "A GPU's inforom is corrupt. You should re-flash it with iromflsh or replace the GPU. Run nvidia-smi without arguments to see which GPU."
DIAG_THERMAL_SUGGEST = "A GPU has thermal violations happening. Please make sure your GPUs are properly cooled."
DIAG_MIG_INCOMPATIBLE_SUGGEST = "You must disable MIG mode or configure instances that use the entire GPU to run the diagnostic."
DIAG_MIG_MULTIPLE_GPU_SUGGEST = "You must run on only one GPU at a time when MIG is configured."
class TestRunner():
################################################################################
def __init__(self, cycles, dcgmiDiag, verbose):
self.cycles = int(cycles)
self.dcgmiDiag = dcgmiDiag
self.verbose = verbose
self.failed_runs = 0
self.failing_tests = {}
# The exclusion list is a list of [textToSearchFor, whatToPrintIfFound] entries
self.exclusions = [
[DIAG_INFOROM_WARNING, DIAG_INFOROM_SUGGEST],
[DIAG_THROTTLE_WARNING, DIAG_THROTTLE_SUGGEST],
[DIAG_THERMAL_WARNING, DIAG_THERMAL_SUGGEST],
[DIAG_MIG_INCOMPATIBLE_WARNING, DIAG_MIG_INCOMPATIBLE_SUGGEST],
[DIAG_MIG_MULTIPLE_GPU_WARNING, DIAG_MIG_MULTIPLE_GPU_SUGGEST],
]
################################################################################
def matchesExclusion(self, warning):
for exclusion in self.exclusions:
if warning.find(exclusion[0]) != -1:
return exclusion[1]
return None
def getErrorMessage(self, failureInfo, runIndex, recommendation):
msg = ''
if recommendation:
msg = "Iteration %d test '%s' is ignoring error '%s' : %s" % \
(runIndex, failureInfo.GetTestname(), failureInfo.GetFullError(), recommendation)
else:
msg = "Iteration %d test '%s' failed: '%s'" % \
(runIndex, failureInfo.GetTestname(), failureInfo.GetFullError())
return msg
################################################################################
def checkForErrors(self):
'''
Check the NVVS JSON output for errors, filtering out any errors that are environmental rather
than NVVS bugs. Returns a count of the number of errors. Anything > 0 will result in bugs against
NVVS.
Returns a tuple of [numErrors, numExclusions]
'''
numErrors = 0
numExclusions = 0
failureDetails = []
for key in self.failing_tests:
runFailures = 0
for failureInfo in self.failing_tests[key]:
recommendation = self.matchesExclusion(failureInfo.GetWarning())
if recommendation:
print(self.getErrorMessage(failureInfo, key, recommendation))
numExclusions += 1
else:
failureDetails.append(self.getErrorMessage(failureInfo, key, None))
runFailures += 1
if runFailures > 0:
self.failed_runs += 1
numErrors += runFailures
for failure in failureDetails:
print("%s\n" % failure)
return [numErrors, numExclusions]
################################################################################
def run_command(self, cycles):
"""
Helper method to run a give command
"""
global WAIVED_COUNT
print("Running command: %s " % " ".join(self.dcgmiDiag.BuildDcgmiCommand()))
ret = 0
for runIndex in range(cycles):
self.dcgmiDiag.Run()
self.failing_tests[runIndex] = self.dcgmiDiag.failed_list
if self.dcgmiDiag.diagRet and not ret:
ret = self.dcgmiDiag.diagRet
# Get the number of actual errors in the output
failCount, exclusionCount = self.checkForErrors()
if self.verbose:
print(self.dcgmiDiag.lastStdout)
if self.dcgmiDiag.lastStderr:
print(self.dcgmiDiag.lastStderr)
if (failCount != 0):
if self.failed_runs > 0:
print("%d of %d runs Failed. Please attach %s and %s to your bug report."
% (self.failed_runs, cycles, logFile, debugFile))
print("ExclusionCount: %d" % exclusionCount)
print("FailCount: %d" % failCount)
print("&&&& FAILED")
print("Diagnostic test failed with code %d.\n" % ret)
# Popen returns 0 even if diag test fails, so failing here
return 1
elif exclusionCount > 0:
WAIVED_COUNT += 1
else:
print("Success")
return 0
################################################################################
def run(self):
self.dcgmiDiag.SetRunMode(3)
self.dcgmiDiag.SetConfigFile(None)
ret = self.run_command(self.cycles)
return ret
################################################################################
def checkCmdLine(cmdArgs, settings):
if cmdArgs.device_id:
# Verify devices have been specified correctly
if len(cmdArgs.device_id) > 1 and ("," in cmdArgs.device_id):
gpuIds = cmdArgs.device_id.split(",")
for gpuId in gpuIds:
if not gpuId.isdigit(): # despite being named isdigit(), ensures the string is a valid unsigned integer
print("Please specify a comma separated list of device IDs.")
sys.exit(1)
elif len(cmdArgs.device_id) > 1 and ("," not in cmdArgs.device_id):
print("Please specify a comma separated list of device IDs.")
sys.exit(1)
elif len(cmdArgs.device_id) == 1:
if not cmdArgs.device_id[0].isdigit():
print("\"{}\" is not a valid device ID, please provide a number instead.".format(cmdArgs.device_id[0]))
sys.exit(1)
else:
print("Device list validated successfully")
if cmdArgs.vulcan or cmdArgs.verbose:
settings['verbose'] = True
else:
settings['verbose'] = False
settings['dev_id'] = cmdArgs.device_id
settings['cycles'] = cmdArgs.cycles
################################################################################
def getGoldenValueDebugFiles():
# This method copies debug files for golden values to the current directory.
gpuIdMetricsFileList = glob.glob('/tmp/dcgmgd_withgpuids*')
gpuIdMetricsFile = None
allMetricsFileList = glob.glob('/tmp/dcgmgd[!_]*')
allMetricsFile = None
if gpuIdMetricsFileList:
# Grab latest debug file
gpuIdMetricsFileList.sort()
gpuIdMetricsFile = gpuIdMetricsFileList[-1]
if allMetricsFileList:
# Grab latest debug file
allMetricsFileList.sort()
allMetricsFile = allMetricsFileList[-1]
fileList = []
try:
if gpuIdMetricsFile is not None:
shutil.copyfile(gpuIdMetricsFile, './dcgmgd_withgpuids.txt')
fileList.append('dcgmgd_withgpuids.txt')
if allMetricsFile is not None:
shutil.copyfile(allMetricsFile, './dcgmgd_allmetrics.txt')
fileList.append('dcgmgd_allmetrics.txt')
if os.path.isfile(goldenValuesFile):
shutil.copyfile(goldenValuesFile, './golden_values.yml')
fileList.append('golden_values.yml')
except (IOError, OSError) as e:
print("There was an error copying the debug files to the current directory %s" % e)
if fileList:
print("Please attach %s to the bug report." % fileList)
else:
print("No debug files were copied.")
################################################################################
def parseCommandLine():
parser = argparse.ArgumentParser(description="DCGM DIAGNOSTIC TEST FRAMEWORK")
parser.add_argument("-c", "--cycles", required=True, help="Number of test cycles to run, all tests are one cycle.")
parser.add_argument("-v", "--vulcan", action="store_true", help="Deprecated flag for running in the eris environment")
parser.add_argument("--verbose", action="store_true", help="Sets verbose mode")
parser.add_argument("-d", "--device-id", help="Comma separated list of nvml device ids.")
args = parser.parse_args()
return args
################################################################################
def main(cmdArgs):
settings = {}
checkCmdLine(cmdArgs, settings)
# Prepare the test environment and setup step
option_parser.initialize_as_stub()
setupEnvironment(cmdArgs)
prefix = utils.verify_binary_locations()
# Build a nvvs command list. Each element is an argument
current_location = os.path.realpath(sys.path[0])
# Get a list of gpus to run against
gpuIdStr = ''
if settings['dev_id'] is None:
# None specified on the command line. Build compatible lists of GPUs
dcgmHandle = pydcgm.DcgmHandle(ipAddress=None)
gpuIds = dcgmHandle.GetSystem().discovery.GetAllSupportedGpuIds()
gpuGroups = test_utils.group_gpu_ids_by_sku(dcgmHandle.handle, gpuIds)
if len(gpuGroups) > 1:
print("This system has more than one GPU SKU; DCGM Diagnostics is defaulting to just GPU(s) %s" %
gpuGroups[0])
gpuGroup = gpuGroups[0]
gpuIdStr = ",".join(map(str, gpuGroup))
del(dcgmHandle)
dcgmHandle = None
else:
gpuIdStr = settings['dev_id']
#Need to skip checks for down NvLinks or QA will file bugs
paramsStr = "pcie.test_nvlink_status=false"
paramsStr += ";pcie.h2d_d2h_single_unpinned.min_pci_width=2"
paramsStr += ";pcie.h2d_d2h_single_pinned.min_pci_width=2"
dcgmiDiag = DcgmiDiag.DcgmiDiag(gpuIds=gpuIdStr, paramsStr=paramsStr, dcgmiPrefix=prefix, runMode=3,
debugLevel=5, debugFile=debugFile)
# Start tests
run_test = TestRunner(settings['cycles'], dcgmiDiag, settings['verbose'])
print("\nRunning with the diagnostic... This may take a while, please wait...\n")
ret = run_test.run()
if ret != 0:
print("&&&& FAILED")
return ret
return ret
if __name__ == "__main__":
cmdArgs = parseCommandLine()
ret = main(cmdArgs)
if os.path.isfile(logFile):
with open(logFile, "r") as f:
log_content = f.readlines()
for log in log_content:
if "Pass" in log:
PASSED_COUNT += 1
elif "Fail" in log:
FAILED_COUNT += 1
elif "Skip" in log:
WAIVED_COUNT += 1
# QA uses these to count the tests passed
if FAILED_COUNT:
print('&&&& FAILED')
elif PASSED_COUNT == 0:
print('&&&& SKIPPED')
else:
print('&&&& PASSED')
logger.info("\n========== TEST SUMMARY ==========\n")
logger.info("Passed: {}".format(PASSED_COUNT))
logger.info("Failed: {}".format(FAILED_COUNT))
logger.info("Waived: {}".format(WAIVED_COUNT))
logger.info("Total: {}".format(PASSED_COUNT + FAILED_COUNT + WAIVED_COUNT))
logger.info("Cycles: {}".format(cmdArgs.cycles))
logger.info("==================================\n\n")
else:
print("&&&& SKIPPED")
print("Unable to provide test summary due to missing log file")
sys.exit(ret)
| DCGM-master | testing/python3/run_dcgm_diagnostic.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
# Python bindings for "dcgm_structs.h"
##
from ctypes import *
from ctypes.util import find_library
import dcgm_structs
from dcgm_fields import _PrintableStructure
# Max length of the DCGM string field
DCGM_MAX_STR_LENGTH = 256
DCGM_MAX_BLOB_LENGTH = 3200
class c_dcgmGpuInfo(dcgm_structs._PrintableStructure):
_fields_ = [
('gpuId', c_uint),
('uuid', c_char * DCGM_MAX_STR_LENGTH)
]
class value(Union):
_fields_ = [
('i64', c_int64),
('dbl', c_double),
('str', c_char * DCGM_MAX_STR_LENGTH)
]
# Below is a test API simply to make sure versioning is working correctly
class c_dcgmVersionTest_v1(dcgm_structs._PrintableStructure):
_fields_ = [
# version must always be first
('version', c_uint),
('a', c_uint)
]
class c_dcgmVersionTest_v2(dcgm_structs._PrintableStructure):
_fields_ = [
# version must always be first
('version', c_uint),
('a', c_uint),
('b', c_uint)
]
dcgmVersionTest_version1 = dcgm_structs.make_dcgm_version(c_dcgmVersionTest_v1, 1)
dcgmVersionTest_version2 = dcgm_structs.make_dcgm_version(c_dcgmVersionTest_v2, 2)
dcgmVersionTest_version3 = dcgm_structs.make_dcgm_version(c_dcgmVersionTest_v2, 3)
# Represents a command to save or load a JSON file to/from the DcgmCacheManager
_dcgmStatsFileType_t = c_uint
DCGM_STATS_FILE_TYPE_JSON = 0
class c_dcgmCacheManagerSave_v1(dcgm_structs._PrintableStructure):
_fields_ = [
# version must always be first
('version', c_uint),
('fileType', _dcgmStatsFileType_t),
('filename', c_char * 256)
]
class c_dcgmCacheManagerLoad_v1(dcgm_structs._PrintableStructure):
_fields_ = [
('version', c_uint),
]
dcgmCacheManagerSave_version1 = dcgm_structs.make_dcgm_version(c_dcgmCacheManagerSave_v1, 1)
dcgmCacheManagerLoad_version1 = dcgm_structs.make_dcgm_version(c_dcgmCacheManagerLoad_v1, 1)
class c_dcgmInjectFieldValue_v1(dcgm_structs._PrintableStructure):
_fields_ = [
('version', c_uint),
('fieldId', c_short),
('fieldType', c_short),
('status', c_uint),
('ts', c_int64),
('value', dcgm_structs.c_dcgmFieldValue_v1_value)
]
# This structure is used to represent a field value to be injected into the cache manager
dcgmInjectFieldValue_version1 = dcgm_structs.make_dcgm_version(c_dcgmInjectFieldValue_v1, 1)
#Cache Manager Info flags
DCGM_CMI_F_WATCHED = 0x00000001
#Watcher types
DcgmWatcherTypeClient = 0 # Embedded or remote client via external APIs
DcgmWatcherTypeHostEngine = 1 # Watcher is NvcmHostEngineHandler
DcgmWatcherTypeHealthWatch = 2 # Watcher is NvcmHealthWatch
DcgmWatcherTypePolicyManager = 3 # Watcher is NvcmPolicyMgr
DcgmWatcherTypeCacheManager = 4 # Watcher is DcgmCacheManager
DcgmWatcherTypeConfigManager = 5 # Watcher is NvcmConfigMgr
DcgmWatcherTypeNvSwitchManager = 6 # Watcher is NvSwitchManager
# ID of a remote client connection within the host engine
dcgm_connection_id_t = c_uint32
# Special constant for not connected
DCGM_CONNECTION_ID_NONE = 0
DCGM_CM_FIELD_INFO_NUM_WATCHERS = 10
class c_dcgm_cm_field_info_watcher_t(dcgm_structs._PrintableStructure):
_fields_ = [
('watcherType', c_uint),
('connectionId', dcgm_connection_id_t),
('monitorIntervalUsec', c_int64),
('maxAgeUsec', c_int64)
]
class dcgmCacheManagerFieldInfo_v4(dcgm_structs._PrintableStructure):
_fields_ = [
('version', c_uint32),
('flags', c_uint32),
('entityId', c_uint32),
('entityGroupId', c_uint32),
('fieldId', c_uint16),
('lastStatus', c_int16),
('oldestTimestamp', c_int64),
('newestTimestamp', c_int64),
('monitorIntervalUsec', c_int64),
('maxAgeUsec', c_int64),
('execTimeUsec', c_int64),
('fetchCount', c_int64),
('numSamples', c_int32),
('numWatchers', c_int32),
('watchers', c_dcgm_cm_field_info_watcher_t * DCGM_CM_FIELD_INFO_NUM_WATCHERS)
]
dcgmCacheManagerFieldInfo_version4 = dcgm_structs.make_dcgm_version(dcgmCacheManagerFieldInfo_v4, 4)
class c_dcgmCreateFakeEntities_v2(dcgm_structs._PrintableStructure):
_fields_ = [
('version', c_uint32),
('numToCreate', c_uint32),
('entityList', dcgm_structs.DCGM_MAX_HIERARCHY_INFO * dcgm_structs.c_dcgmMigHierarchyInfo_t),
]
dcgmCreateFakeEntities_version2 = dcgm_structs.make_dcgm_version(c_dcgmCreateFakeEntities_v2, 2)
class c_dcgmSetNvLinkLinkState_v1(dcgm_structs._PrintableStructure):
_fields_ = [
('version', c_uint32), # Version. Should be dcgmSetNvLinkLinkState_version1
('entityGroupId', c_uint32), # Entity group of the entity to set the link state of
('entityId', c_uint32), # ID of the entity to set the link state of
('linkId', c_uint32), # Link (or portId) of the link to set the state of
('linkState', c_uint32), # State to set the link to
('unused', c_uint32) # Not used for now. Set to 0
]
dcgmSetNvLinkLinkState_version1 = dcgm_structs.make_dcgm_version(c_dcgmSetNvLinkLinkState_v1, 1)
| DCGM-master | testing/python3/dcgm_structs_internal.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from DcgmiDiag import DcgmiDiag
import utils
def main():
dd = DcgmiDiag(dcgmiPrefix=utils.verify_binary_locations())
passedCount = 0
for i in range(0, 160):
print("&&&& RUNNING dcgmi_diag_test")
failed = dd.Run()
if failed:
print("&&&& FAILED dcgmi_diag_test")
dd.PrintLastRunStatus()
else:
print("&&&& PASSED dcgmi_diag_test")
passedCount += 1
if __name__ == '__main__':
main()
| DCGM-master | testing/python3/dcgmi_diag_test.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
# Python bindings for the internal API of DCGM library (dcgm_test_apis.h)
##
from ctypes import *
from ctypes.util import find_library
import dcgm_agent
import dcgm_fields
import dcgm_structs
import dcgm_structs_internal
DCGM_EMBEDDED_HANDLE = c_void_p(0x7fffffff)
# Utils
_dcgmIntCheckReturn = dcgm_structs._dcgmCheckReturn
dcgmDeviceConfig_t = dcgm_structs.c_dcgmDeviceConfig_v1
dcgmRecvUpdates_t = dcgm_structs._dcgmRecvUpdates_t
dcgmStatsFileType_t = dcgm_structs_internal._dcgmStatsFileType_t
dcgmInjectFieldValue_t = dcgm_structs_internal.c_dcgmInjectFieldValue_v1
"""
Corresponding Calls
"""
@dcgm_agent.ensure_byte_strings()
def dcgmServerRun(portNumber, socketPath, isConnectionTcp):
fn = dcgm_structs._dcgmGetFunctionPointer("dcgmEngineRun")
ret = fn(portNumber, socketPath, isConnectionTcp)
_dcgmIntCheckReturn(ret)
return ret
@dcgm_agent.ensure_byte_strings()
def dcgmGetLatestValuesForFields(dcgmHandle, gpuId, fieldIds):
fn = dcgm_structs._dcgmGetFunctionPointer("dcgmGetLatestValuesForFields")
field_values = (dcgm_structs.c_dcgmFieldValue_v1 * len(fieldIds))()
id_values = (c_uint * len(fieldIds))(*fieldIds)
ret = fn(dcgmHandle, c_int(gpuId), id_values, c_uint(len(fieldIds)), field_values)
_dcgmIntCheckReturn(ret)
return field_values
@dcgm_agent.ensure_byte_strings()
def dcgmGetMultipleValuesForField(dcgmHandle, gpuId, fieldId, maxCount, startTs, endTs, order):
fn = dcgm_structs._dcgmGetFunctionPointer("dcgmGetMultipleValuesForField")
localMaxCount = c_int(maxCount) #Going to pass by ref
#Make space to return up to maxCount records
max_field_values = (dcgm_structs.c_dcgmFieldValue_v1 * maxCount)()
ret = fn(dcgmHandle, c_int(gpuId), c_uint(fieldId), byref(localMaxCount), c_int64(startTs), c_int64(endTs), c_uint(order), max_field_values)
_dcgmIntCheckReturn(ret)
localMaxCount = localMaxCount.value #Convert to int
#We may have gotten less records back than we requested. If so, truncate our array
return max_field_values[:int(localMaxCount)]
# This method is used to tell the cache manager to watch a field value
@dcgm_agent.ensure_byte_strings()
def dcgmWatchFieldValue(dcgmHandle, gpuId, fieldId, updateFreq, maxKeepAge, maxKeepEntries):
fn = dcgm_structs._dcgmGetFunctionPointer("dcgmWatchFieldValue")
ret = fn(dcgmHandle, c_int(gpuId), c_uint(fieldId), c_longlong(updateFreq), c_double(maxKeepAge), c_int(maxKeepEntries))
_dcgmIntCheckReturn(ret)
return ret
# This method is used to tell the cache manager to unwatch a field value
def dcgmUnwatchFieldValue(dcgmHandle, gpuId, fieldId, clearCache):
fn = dcgm_structs._dcgmGetFunctionPointer("dcgmUnwatchFieldValue")
ret = fn(dcgmHandle, c_int(gpuId), c_uint(fieldId), c_int(clearCache))
_dcgmIntCheckReturn(ret)
return ret
def dcgmInjectFieldValue(dcgmHandle, gpuId, value):
fn = dcgm_structs._dcgmGetFunctionPointer("dcgmInjectFieldValue")
ret = fn(dcgmHandle, c_uint(gpuId), byref(value))
_dcgmIntCheckReturn(ret)
return ret
@dcgm_agent.ensure_byte_strings()
def dcgmInjectEntityFieldValue(dcgmHandle, entityGroupId, entityId, value):
fn = dcgm_structs._dcgmGetFunctionPointer("dcgmInjectEntityFieldValue")
ret = fn(dcgmHandle, c_uint(entityGroupId), c_uint(entityId), byref(value))
_dcgmIntCheckReturn(ret)
return ret
@dcgm_agent.ensure_byte_strings()
def dcgmInjectEntityFieldValueToNvml(dcgmHandle, entityGroupId, entityId, value):
fn = dcgm_structs._dcgmGetFunctionPointer("dcgmInjectEntityFieldValueToNvml")
ret = fn(dcgmHandle, c_uint(entityGroupId), c_uint(entityId), byref(value))
_dcgmIntCheckReturn(ret)
return ret
@dcgm_agent.ensure_byte_strings()
def dcgmCreateNvmlInjectionGpu(dcgmHandle, index):
fn = dcgm_structs._dcgmGetFunctionPointer("dcgmCreateNvmlInjectionGpu")
ret = fn(dcgmHandle, c_uint(index))
_dcgmIntCheckReturn(ret)
return ret
@dcgm_agent.ensure_byte_strings()
def dcgmInjectNvmlDevice(dcgmHandle, gpuId, key, extraKeys, extraKeyCount, value):
fn = dcgm_structs._dcgmGetFunctionPointer("dcgmInjectNvmlDevice")
ret = fn(dcgmHandle, c_uint(gpuId), key, byref(extraKeys), c_uint(extraKeyCount), byref(value))
_dcgmIntCheckReturn(ret)
return ret
@dcgm_agent.ensure_byte_strings()
def dcgmSetEntityNvLinkLinkState(dcgmHandle, entityGroupId, entityId, linkId, linkState):
linkStateStruct = dcgm_structs_internal.c_dcgmSetNvLinkLinkState_v1()
linkStateStruct.version = dcgm_structs_internal.dcgmSetNvLinkLinkState_version1
linkStateStruct.entityGroupId = entityGroupId
linkStateStruct.entityId = entityId
linkStateStruct.linkId = linkId
linkStateStruct.linkState = linkState
fn = dcgm_structs._dcgmGetFunctionPointer("dcgmSetEntityNvLinkLinkState")
ret = fn(dcgmHandle, byref(linkStateStruct))
_dcgmIntCheckReturn(ret)
return ret
@dcgm_agent.ensure_byte_strings()
def dcgmGetCacheManagerFieldInfo(dcgmHandle, entityId, entityGroupId, fieldId):
fn = dcgm_structs._dcgmGetFunctionPointer("dcgmGetCacheManagerFieldInfo")
cmfi = dcgm_structs_internal.dcgmCacheManagerFieldInfo_v4()
cmfi.entityId = entityId
cmfi.entityGroupId = entityGroupId
cmfi.fieldId = fieldId
ret = fn(dcgmHandle, byref(cmfi))
_dcgmIntCheckReturn(ret)
return cmfi
@dcgm_agent.ensure_byte_strings()
def dcgmCreateFakeEntities(dcgmHandle, cfe):
fn = dcgm_structs._dcgmGetFunctionPointer("dcgmCreateFakeEntities")
cfe.version = dcgm_structs_internal.dcgmCreateFakeEntities_version2
ret = fn(dcgmHandle, byref(cfe))
_dcgmIntCheckReturn(ret)
return cfe
#First parameter below is the return type
dcgmFieldValueEnumeration_f = CFUNCTYPE(c_int32, c_uint32, POINTER(dcgm_structs.c_dcgmFieldValue_v1), c_int32, c_void_p)
@dcgm_agent.ensure_byte_strings()
def dcgmGetFieldValuesSince(dcgmHandle, groupId, sinceTimestamp, fieldIds, enumCB, userData):
fn = dcgm_structs._dcgmGetFunctionPointer("dcgmGetFieldValuesSince")
c_fieldIds = (c_uint32 * len(fieldIds))(*fieldIds)
c_nextSinceTimestamp = c_int64()
ret = fn(dcgmHandle, groupId, c_int64(sinceTimestamp), c_fieldIds, c_int32(len(fieldIds)), byref(c_nextSinceTimestamp), enumCB, py_object(userData))
dcgm_structs._dcgmCheckReturn(ret)
return c_nextSinceTimestamp.value
@dcgm_agent.ensure_byte_strings()
def dcgmVgpuConfigSet(dcgm_handle, group_id, configToSet, status_handle):
fn = dcgm_structs._dcgmGetFunctionPointer("dcgmVgpuConfigSet")
configToSet.version = dcgm_structs.dcgmDeviceVgpuConfig_version1
ret = fn(dcgm_handle, group_id, byref(configToSet), status_handle)
dcgm_structs._dcgmCheckReturn(ret)
return ret
def dcgmVgpuConfigGet(dcgm_handle, group_id, reqCfgType, count, status_handle):
fn = dcgm_structs._dcgmGetFunctionPointer("dcgmVgpuConfigGet")
vgpu_config_values_array = count * dcgm_structs.c_dcgmDeviceVgpuConfig_v1
c_config_values = vgpu_config_values_array()
for index in range(0, count):
c_config_values[index].version = dcgm_structs.dcgmDeviceVgpuConfig_version1
ret = fn(dcgm_handle, group_id, reqCfgType, count, c_config_values, status_handle)
dcgm_structs._dcgmCheckReturn(ret)
return list(c_config_values[0:count])
@dcgm_agent.ensure_byte_strings()
def dcgmVgpuConfigEnforce(dcgm_handle, group_id, status_handle):
fn = dcgm_structs._dcgmGetFunctionPointer("dcgmVgpuConfigEnforce")
ret = fn(dcgm_handle, group_id, status_handle)
dcgm_structs._dcgmCheckReturn(ret)
return ret
@dcgm_agent.ensure_byte_strings()
def dcgmGetVgpuDeviceAttributes(dcgm_handle, gpuId):
fn = dcgm_structs._dcgmGetFunctionPointer("dcgmGetVgpuDeviceAttributes")
device_values = dcgm_structs.c_dcgmVgpuDeviceAttributes_v7()
device_values.version = dcgm_structs.dcgmVgpuDeviceAttributes_version7
ret = fn(dcgm_handle, c_int(gpuId), byref(device_values))
dcgm_structs._dcgmCheckReturn(ret)
return device_values
@dcgm_agent.ensure_byte_strings()
def dcgmGetVgpuInstanceAttributes(dcgm_handle, vgpuId):
fn = dcgm_structs._dcgmGetFunctionPointer("dcgmGetVgpuInstanceAttributes")
device_values = dcgm_structs.c_dcgmVgpuInstanceAttributes_v1()
device_values.version = dcgm_structs.dcgmVgpuInstanceAttributes_version1
ret = fn(dcgm_handle, c_int(vgpuId), byref(device_values))
dcgm_structs._dcgmCheckReturn(ret)
return device_values
def dcgmStopDiagnostic(dcgm_handle):
fn = dcgm_structs._dcgmGetFunctionPointer("dcgmStopDiagnostic")
ret = fn(dcgm_handle)
dcgm_structs._dcgmCheckReturn(ret)
return ret
def dcgmPauseTelemetryForDiag(dcgmHandle):
fn = dcgm_structs._dcgmGetFunctionPointer("dcgmPauseTelemetryForDiag")
ret = fn(dcgmHandle)
dcgm_structs._dcgmCheckReturn(ret)
return ret
def dcgmResumeTelemetryForDiag(dcgmHandle):
fn = dcgm_structs._dcgmGetFunctionPointer("dcgmResumeTelemetryForDiag")
ret = fn(dcgmHandle)
dcgm_structs._dcgmCheckReturn(ret)
return ret
| DCGM-master | testing/python3/dcgm_agent_internal.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import ctypes
import dcgm_structs
DCGM_FR_OK = 0 # No error
DCGM_FR_UNKNOWN = 1 # Unknown error code
DCGM_FR_UNRECOGNIZED = 2 # Unrecognized error code
DCGM_FR_PCI_REPLAY_RATE = 3 # Unacceptable rate of PCI errors
DCGM_FR_VOLATILE_DBE_DETECTED = 4 # Uncorrectable volatile double bit error
DCGM_FR_VOLATILE_SBE_DETECTED = 5 # Unacceptable rate of volatile single bit errors
DCGM_FR_PENDING_PAGE_RETIREMENTS = 6 # Pending page retirements detected
DCGM_FR_RETIRED_PAGES_LIMIT = 7 # Unacceptable total page retirements detected
DCGM_FR_RETIRED_PAGES_DBE_LIMIT = 8 # Unacceptable total page retirements due to uncorrectable errors
DCGM_FR_CORRUPT_INFOROM = 9 # Corrupt inforom found
DCGM_FR_CLOCK_THROTTLE_THERMAL = 10 # Clocks being throttled due to overheating
DCGM_FR_POWER_UNREADABLE = 11 # Cannot get a reading for power from NVML
DCGM_FR_CLOCK_THROTTLE_POWER = 12 # Clock being throttled due to power restrictions
DCGM_FR_NVLINK_ERROR_THRESHOLD = 13 # Unacceptable rate of NVLink errors
DCGM_FR_NVLINK_DOWN = 14 # NVLink is down
DCGM_FR_NVSWITCH_FATAL_ERROR = 15 # Fatal errors on the NVSwitch
DCGM_FR_NVSWITCH_NON_FATAL_ERROR = 16 # Non-fatal errors on the NVSwitch
DCGM_FR_NVSWITCH_DOWN = 17 # NVSwitch is down
DCGM_FR_NO_ACCESS_TO_FILE = 18 # Cannot access a file
DCGM_FR_NVML_API = 19 # Error occurred on an NVML API
DCGM_FR_DEVICE_COUNT_MISMATCH = 20 # Disagreement in GPU count between /dev and NVML
DCGM_FR_BAD_PARAMETER = 21 # Bad parameter passed to API
DCGM_FR_CANNOT_OPEN_LIB = 22 # Cannot open a library that must be accessed
DCGM_FR_DENYLISTED_DRIVER = 23 # A driver on the denylist (nouveau) is active
DCGM_FR_NVML_LIB_BAD = 24 # The NVML library is missing expected functions
DCGM_FR_GRAPHICS_PROCESSES = 25 # Graphics processes are active on this GPU
DCGM_FR_HOSTENGINE_CONN = 26 # Unstable connection to nv-hostengine (daemonized DCGM)
DCGM_FR_FIELD_QUERY = 27 # Error querying a field from DCGM
DCGM_FR_BAD_CUDA_ENV = 28 # The environment has variables that hurt CUDA
DCGM_FR_PERSISTENCE_MODE = 29 # Persistence mode is disabled
DCGM_FR_LOW_BANDWIDTH = 30 # The bandwidth is unacceptably low
DCGM_FR_HIGH_LATENCY = 31 # Latency is too high
DCGM_FR_CANNOT_GET_FIELD_TAG = 32 # Cannot find a tag for a field
DCGM_FR_FIELD_VIOLATION = 33 # The value for the specified error field is above 0
DCGM_FR_FIELD_THRESHOLD = 34 # The value for the specified field is above the threshold
DCGM_FR_FIELD_VIOLATION_DBL = 35 # The value for the specified error field is above 0
DCGM_FR_FIELD_THRESHOLD_DBL = 36 # The value for the specified field is above the threshold
DCGM_FR_UNSUPPORTED_FIELD_TYPE = 37 # Field type cannot be supported
DCGM_FR_FIELD_THRESHOLD_TS = 38 # The value for the specified field is above the threshold
DCGM_FR_FIELD_THRESHOLD_TS_DBL = 39 # The value for the specified field is above the threshold
DCGM_FR_THERMAL_VIOLATIONS = 40 # Thermal violations detected
DCGM_FR_THERMAL_VIOLATIONS_TS = 41 # Thermal violations detected with a timestamp
DCGM_FR_TEMP_VIOLATION = 42 # Temperature is too high
DCGM_FR_THROTTLING_VIOLATION = 43 # Non-benign clock throttling is occurring
DCGM_FR_INTERNAL = 44 # An internal error was detected
DCGM_FR_PCIE_GENERATION = 45 # PCIe generation is too low
DCGM_FR_PCIE_WIDTH = 46 # PCIe width is too low
DCGM_FR_ABORTED = 47 # Test was aborted by a user signal
DCGM_FR_TEST_DISABLED = 48 # This test is disabled for this GPU
DCGM_FR_CANNOT_GET_STAT = 49 # Cannot get telemetry for a needed value
DCGM_FR_STRESS_LEVEL = 50 # Stress level is too low (bad performance)
DCGM_FR_CUDA_API = 51 # Error calling the specified CUDA API
DCGM_FR_FAULTY_MEMORY = 52 # Faulty memory detected on this GPU
DCGM_FR_CANNOT_SET_WATCHES = 53 # Unable to set field watches in DCGM
DCGM_FR_CUDA_UNBOUND = 54 # CUDA context is no longer bound
DCGM_FR_ECC_DISABLED = 55 # ECC memory is disabled right now
DCGM_FR_MEMORY_ALLOC = 56 # Cannot allocate memory
DCGM_FR_CUDA_DBE = 57 # CUDA detected unrecovable double-bit error
DCGM_FR_MEMORY_MISMATCH = 58 # Memory error detected
DCGM_FR_CUDA_DEVICE = 59 # No CUDA device discoverable for existing GPU
DCGM_FR_ECC_UNSUPPORTED = 60 # ECC memory is unsupported by this SKU
DCGM_FR_ECC_PENDING = 61 # ECC memory is in a pending state
DCGM_FR_MEMORY_BANDWIDTH = 62 # Memory bandwidth is too low
DCGM_FR_TARGET_POWER = 63 # Cannot hit the target power draw
DCGM_FR_API_FAIL = 64 # The specified API call failed
DCGM_FR_API_FAIL_GPU = 65 # The specified API call failed for the specified GPU
DCGM_FR_CUDA_CONTEXT = 66 # Cannot create a CUDA context on this GPU
DCGM_FR_DCGM_API = 67 # DCGM API failure
DCGM_FR_CONCURRENT_GPUS = 68 # Need multiple GPUs to run this test
DCGM_FR_TOO_MANY_ERRORS = 69 # More errors than fit in the return struct
DCGM_FR_NVLINK_CRC_ERROR_THRESHOLD = 70 # More than 100 CRC errors are happening per second
DCGM_FR_NVLINK_ERROR_CRITICAL = 71 # NVLink error for a field that should always be 0
DCGM_FR_ENFORCED_POWER_LIMIT = 72 # The enforced power limit is too low to hit the target
DCGM_FR_MEMORY_ALLOC_HOST = 73 # Cannot allocate memory on the host
DCGM_FR_GPU_OP_MODE = 74 # Bad GPU operating mode for running plugin
DCGM_FR_NO_MEMORY_CLOCKS = 75 # No memory clocks with the needed MHz were found
DCGM_FR_NO_GRAPHICS_CLOCKS = 76 # No graphics clocks with the needed MHz were found
DCGM_FR_HAD_TO_RESTORE_STATE = 77 # Note that we had to restore a GPU's state
DCGM_FR_L1TAG_UNSUPPORTED = 78 # L1TAG test is unsupported by this SKU
DCGM_FR_L1TAG_MISCOMPARE = 79 # L1TAG test failed on a miscompare
DCGM_FR_ROW_REMAP_FAILURE = 80 # Row remapping failed (Ampere or newer GPUs)
DCGM_FR_UNCONTAINED_ERROR = 81 # Uncontained error - XID 95
DCGM_FR_EMPTY_GPU_LIST = 82 # No GPU information given to plugin
DCGM_FR_DBE_PENDING_PAGE_RETIREMENTS = 83 # Pending page retirements due to a DBE
DCGM_FR_UNCORRECTABLE_ROW_REMAP = 84 # Uncorrectable row remapping
DCGM_FR_PENDING_ROW_REMAP = 85 # Row remapping is pending
DCGM_FR_BROKEN_P2P_MEMORY_DEVICE = 86 # P2P copy test detected an error writing to this GPU
DCGM_FR_BROKEN_P2P_WRITER_DEVICE = 87 # P2P copy test detected an error writing from this GPU
DCGM_FR_NVSWITCH_NVLINK_DOWN = 88 # An NVLink is down
DCGM_FR_EUD_BINARY_PERMISSIONS = 89 # EUD binary permissions are incorrect
DCGM_FR_EUD_NON_ROOT_USER = 90 # EUD plugin is not running as root
DCGM_FR_EUD_SPAWN_FAILURE = 91 # EUD plugin failed to spawn the EUD binary
DCGM_FR_EUD_TIMEOUT = 92 # EUD plugin timed out
DCGM_FR_EUD_ZOMBIE = 93 # EUD process remains running after the plugin considers it finished
DCGM_FR_EUD_NON_ZERO_EXIT_CODE = 94 # EUD process exited with a non-zero exit code
DCGM_FR_EUD_TEST_FAILED = 95 # EUD test failed
DCGM_FR_FILE_CREATE_PERMISSIONS = 96 # We cannot write a file in this directory.
DCGM_FR_PAUSE_RESUME_FAILED = 97 # Pause/Resume failed
DCGM_FR_PCIE_REPLAYS = 98 # Pause/Resume failed
DCGM_FR_GPU_EXPECTED_NVLINKS_UP = 99 # Expected nvlinks up per gpu */
DCGM_FR_NVSWITCH_EXPECTED_NVLINKS_UP = 100 # Expected nvlinks up per nvswitch */
DCGM_FR_XID_ERROR = 101 # XID error detected
DCGM_FR_ERROR_SENTINEL = 102 # MUST BE THE LAST ERROR CODE
# Standard message for running a field diagnostic
TRIAGE_RUN_FIELD_DIAG_MSG = "Run a field diagnostic on the GPU."
DEBUG_COOLING_MSG = "Verify that the cooling on this machine is functional, including external, thermal "\
"material interface, fans, and any other components."
BUG_REPORT_MSG = "Please capture an nvidia-bug-report and send it to NVIDIA."
# Define DCGM error priorities
DCGM_ERROR_MONITOR = 0 # Can perform workload, but needs to be monitored.
DCGM_ERROR_ISOLATE = 1 # Cannot perform workload. GPU should be isolated.
DCGM_ERROR_UNKNOWN = 2 # This error code is not recognized
# Messages for the error codes. All messages must be defined in the ERROR_CODE_MSG <msg> format
# where <msg> is the actual message.
DCGM_FR_OK_MSG = "The operation completed successfully."
DCGM_FR_UNKNOWN_MSG = "Unknown error."
DCGM_FR_UNRECOGNIZED_MSG = "Unrecognized error code."
# replay limit, gpu id, replay errors detected
DCGM_FR_PCI_REPLAY_RATE_MSG = "Detected more than %u PCIe replays per minute for GPU %u : %d"
# dbes deteced, gpu id
DCGM_FR_VOLATILE_DBE_DETECTED_MSG = "Detected %d volatile double-bit ECC error(s) in GPU %u."
# sbe limit, gpu id, sbes detected
DCGM_FR_VOLATILE_SBE_DETECTED_MSG = "More than %u single-bit ECC error(s) detected in GPU %u Volatile SBEs: %lld"
# gpu id
DCGM_FR_PENDING_PAGE_RETIREMENTS_MSG = "A pending retired page has been detected in GPU %u."
# retired pages detected, gpud id
DCGM_FR_RETIRED_PAGES_LIMIT_MSG = "%u or more retired pages have been detected in GPU %u. "
# retired pages due to dbes detected, gpu id
DCGM_FR_RETIRED_PAGES_DBE_LIMIT_MSG = "An excess of %u retired pages due to DBEs have been detected and" \
" more than one page has been retired due to DBEs in the past" \
" week in GPU %u."
# gpu id
DCGM_FR_CORRUPT_INFOROM_MSG = "A corrupt InfoROM has been detected in GPU %u."
# gpu id
DCGM_FR_CLOCK_THROTTLE_THERMAL_MSG = "Detected clock throttling due to thermal violation in GPU %u."
# gpu id
DCGM_FR_POWER_UNREADABLE_MSG = "Cannot reliably read the power usage for GPU %u."
# gpu id
DCGM_FR_CLOCK_THROTTLE_POWER_MSG = "Detected clock throttling due to power violation in GPU %u."
# nvlink errors detected, nvlink id, error threshold
DCGM_FR_NVLINK_ERROR_THRESHOLD_MSG = "Detected %ld NvLink errors on NvLink %u which exceeds threshold of %u"
# gpu id, nvlink id
DCGM_FR_NVLINK_DOWN_MSG = "GPU %u's NvLink link %d is currently down"
# nvswitch id, nvlink id
DCGM_FR_NVSWITCH_FATAL_ERROR_MSG = "Detected fatal errors on NvSwitch %u link %u"
# nvswitch id, nvlink id
DCGM_FR_NVSWITCH_NON_FATAL_ERROR_MSG = "Detected nonfatal errors on NvSwitch %u link %u"
# nvswitch id, nvlink port
DCGM_FR_NVSWITCH_DOWN_MSG = "NvSwitch physical ID %u's NvLink port %d is currently down."
# file path, error detail
DCGM_FR_NO_ACCESS_TO_FILE_MSG = "File %s could not be accessed directly: %s"
# purpose for communicating with NVML, NVML error as string, NVML error
DCGM_FR_NVML_API_MSG = "Error calling NVML API %s: %s"
DCGM_FR_DEVICE_COUNT_MISMATCH_MSG = "The number of devices NVML returns is different than the number "\
"of devices in /dev."
# function name
DCGM_FR_BAD_PARAMETER_MSG = "Bad parameter to function %s cannot be processed"
# library name, error returned from dlopen
DCGM_FR_CANNOT_OPEN_LIB_MSG = "Cannot open library %s: '%s'"
# the name of the driver on the denylist
DCGM_FR_DENYLISTED_DRIVER_MSG = "Found driver on the denylist: %s"
# the name of the function that wasn't found
DCGM_FR_NVML_LIB_BAD_MSG = "Cannot get pointer to %s from libnvidia-ml.so"
DCGM_FR_GRAPHICS_PROCESSES_MSG = "NVVS has detected graphics processes running on at least one "\
"GPU. This may cause some tests to fail."
# error message from the API call
DCGM_FR_HOSTENGINE_CONN_MSG = "Could not connect to the host engine: '%s'"
# field name, gpu id
DCGM_FR_FIELD_QUERY_MSG = "Could not query field %s for GPU %u"
# environment variable name
DCGM_FR_BAD_CUDA_ENV_MSG = "Found CUDA performance-limiting environment variable '%s'."
# gpu id
DCGM_FR_PERSISTENCE_MODE_MSG = "Persistence mode for GPU %u is currently disabled. The DCGM "\
"diagnostic requires peristence mode to be enabled."
DCGM_FR_LOW_BANDWIDTH_MSG = "Bandwidth of GPU %u in direction %s of %.2f did not exceed "\
"minimum required bandwidth of %.2f."
DCGM_FR_HIGH_LATENCY_MSG = "Latency type %s of GPU %u value %.2f exceeded maximum allowed "\
"latency of %.2f."
DCGM_FR_CANNOT_GET_FIELD_TAG_MSG = "Unable to get field information for field id %hu"
DCGM_FR_FIELD_VIOLATION_MSG = "Detected %ld %s for GPU %u"
DCGM_FR_FIELD_THRESHOLD_MSG = "Detected %ld %s for GPU %u which is above the threshold %ld"
DCGM_FR_FIELD_VIOLATION_DBL_MSG = "Detected %.1f %s for GPU %u"
DCGM_FR_FIELD_THRESHOLD_DBL_MSG = "Detected %.1f %s for GPU %u which is above the threshold %.1f"
DCGM_FR_UNSUPPORTED_FIELD_TYPE_MSG = "Field %s is not supported by this API because it is neither an "\
"int64 nor a double type."
DCGM_FR_FIELD_THRESHOLD_TS_MSG = "%s met or exceeded the threshold of %lu per second: %lu at "\
"%.1f seconds into the test."
DCGM_FR_FIELD_THRESHOLD_TS_DBL_MSG = "%s met or exceeded the threshold of %.1f per second: %.1f at "\
"%.1f seconds into the test."
DCGM_FR_THERMAL_VIOLATIONS_MSG = "There were thermal violations totaling %lu seconds for GPU %u"
DCGM_FR_THERMAL_VIOLATIONS_TS_MSG = "Thermal violations totaling %lu samples started at %.1f seconds "\
"into the test for GPU %u"
DCGM_FR_TEMP_VIOLATION_MSG = "Temperature %lld of GPU %u exceeded user-specified maximum "\
"allowed temperature %lld"
DCGM_FR_THROTTLING_VIOLATION_MSG = "Clocks are being throttling for GPU %u because of clock "\
"throttling starting %.1f seconds into the test. %s"
DCGM_FR_INTERNAL_MSG = "There was an internal error during the test: '%s'"
DCGM_FR_PCIE_GENERATION_MSG = "GPU %u is running at PCI link generation %d, which is below "\
"the minimum allowed link generation of %d (parameter '%s')"
DCGM_FR_PCIE_WIDTH_MSG = "GPU %u is running at PCI link width %dX, which is below the "\
"minimum allowed link generation of %d (parameter '%s')"
DCGM_FR_ABORTED_MSG = "Test was aborted early due to user signal"
DCGM_FR_TEST_DISABLED_MSG = "The %s test is skipped for this GPU."
DCGM_FR_CANNOT_GET_STAT_MSG = "Unable to generate / collect stat %s for GPU %u"
DCGM_FR_STRESS_LEVEL_MSG = "Max stress level of %.1f did not reach desired stress level of "\
"%.1f for GPU %u"
DCGM_FR_CUDA_API_MSG = "Error using CUDA API %s"
DCGM_FR_FAULTY_MEMORY_MSG = "Found %d faulty memory elements on GPU %u"
DCGM_FR_CANNOT_SET_WATCHES_MSG = "Unable to add field watches to DCGM: %s"
DCGM_FR_CUDA_UNBOUND_MSG = "Cuda GPU %d is no longer bound to a CUDA context...Aborting"
DCGM_FR_ECC_DISABLED_MSG = "Skipping test %s because ECC is not enabled on GPU %u"
DCGM_FR_MEMORY_ALLOC_MSG = "Couldn't allocate at least %.1f%% of GPU memory on GPU %u"
DCGM_FR_CUDA_DBE_MSG = "CUDA APIs have indicated that a double-bit ECC error has "\
"occured on GPU %u."
DCGM_FR_MEMORY_MISMATCH_MSG = "A memory mismatch was detected on GPU %u, but no error was "\
"reported by CUDA or NVML."
DCGM_FR_CUDA_DEVICE_MSG = "Unable to find a corresponding CUDA device for GPU %u: '%s'"
DCGM_FR_ECC_UNSUPPORTED_MSG = "This card does not support ECC Memory. Skipping test."
DCGM_FR_ECC_PENDING_MSG = "ECC memory for GPU %u is in a pending state."
DCGM_FR_MEMORY_BANDWIDTH_MSG = "GPU %u only achieved a memory bandwidth of %.2f GB/s, failing "\
"to meet %.2f GB/s for test %d"
DCGM_FR_TARGET_POWER_MSG = "Max power of %.1f did not reach desired power minimum %s of "\
"%.1f for GPU %u"
DCGM_FR_API_FAIL_MSG = "API call %s failed: '%s'"
DCGM_FR_API_FAIL_GPU_MSG = "API call %s failed for GPU %u: '%s'"
DCGM_FR_CUDA_CONTEXT_MSG = "GPU %u failed to create a CUDA context: %s"
DCGM_FR_DCGM_API_MSG = "Error using DCGM API %s"
DCGM_FR_CONCURRENT_GPUS_MSG = "Unable to run concurrent pair bandwidth test without 2 or more "\
"gpus. Skipping"
DCGM_FR_TOO_MANY_ERRORS_MSG = "This API can only return up to four errors per system. "\
"Additional errors were found for this system that couldn't be "\
"communicated."
DCGM_FR_NVLINK_CRC_ERROR_THRESHOLD_MSG = "%.1f %s NvLink errors found occuring per second on GPU %u, "\
"exceeding the limit of 100 per second."
DCGM_FR_NVLINK_ERROR_CRITICAL_MSG = "Detected %ld %s NvLink errors on GPU %u's NVLink (should be 0)"
DCGM_FR_ENFORCED_POWER_LIMIT_MSG = "Enforced power limit on GPU %u set to %.1f, which is too low to "\
"attempt to achieve target power %.1f"
DCGM_FR_MEMORY_ALLOC_HOST_MSG = "Cannot allocate %zu bytes on the host"
DCGM_FR_GPU_OP_MODE_MSG = "Skipping plugin due to a GPU being in GPU Operating Mode: LOW_DP."
DCGM_FR_NO_MEMORY_CLOCKS_MSG = "No memory clocks <= %u MHZ were found in %u supported memory clocks."
DCGM_FR_NO_GRAPHICS_CLOCKS_MSG = "No graphics clocks <= %u MHZ were found in %u supported graphics clocks for memory clock %u MHZ."
DCGM_FR_HAD_TO_RESTORE_STATE_MSG = "Had to restore GPU state on NVML GPU(s): %s"
DCGM_FR_L1TAG_UNSUPPORTED_MSG = "This card does not support the L1 cache test. Skipping test."
DCGM_FR_L1TAG_MISCOMPARE_MSG = "The L1 cache test failed with a miscompare."
DCGM_FR_ROW_REMAP_FAILURE_MSG = "Row remapping failed."
DCGM_FR_UNCONTAINED_ERROR_MSG = "GPU had an uncontained error (XID 95)"
DCGM_FR_EMPTY_GPU_LIST_MSG = "No valid GPUs passed to plugin"
DCGM_FR_DBE_PENDING_PAGE_RETIREMENTS_MSG = "Pending page retirements together with a DBE were detected on GPU %u."
DCGM_FR_UNCORRECTABLE_ROW_REMAP_MSG = "GPU %u has uncorrectable row remappings"
DCGM_FR_PENDING_ROW_REMAP_MSG = "GPU %u has pending row remappings"
DCGM_FR_BROKEN_P2P_MEMORY_DEVICE_MSG = "GPU %u was unsuccessfully written to in a peer-to-peer test: %s"
DCGM_FR_BROKEN_P2P_WRITER_DEVICE_MSG = "GPU %u unsuccessfully wrote data in a peer-to-peer test: %s"
DCGM_FR_NVSWITCH_NVLINK_DOWN_MSG = "NVSwitch %u's NvLink %u is down."
DCGM_FR_PCIE_REPLAYS = "GPU %u had correctable PCIe errors, see dmesg for more information."
DCGM_FR_FILE_CREATE_PERMISSIONS_MSG = "The DCGM Diagnostic does not have permissions to create a file in directory '%s'"
DCGM_FR_XID_ERROR_MSG = "Detected XID %u for GPU %u"
# Suggestions for next steps for the corresponding error message
DCGM_FR_OK_NEXT = "N/A"
DCGM_FR_UNKNOWN_NEXT = ""
DCGM_FR_UNRECOGNIZED_NEXT = ""
DCGM_FR_PCI_REPLAY_RATE_NEXT = "Reconnect PCIe card. Run system side PCIE diagnostic utilities "\
"to verify hops off the GPU board. If issue is on the board, run "\
"the field diagnostic."
DCGM_FR_VOLATILE_DBE_DETECTED_NEXT = "Drain the GPU and reset it or reboot the node."
DCGM_FR_VOLATILE_SBE_DETECTED_NEXT = "Monitor - this GPU can still perform workload."
DCGM_FR_PENDING_PAGE_RETIREMENTS_NEXT = "If volatile double bit errors exist, drain the GPU and reset it "\
"or reboot the node. Otherwise, monitor - GPU can still perform "\
"workload."
DCGM_FR_RETIRED_PAGES_LIMIT_NEXT = TRIAGE_RUN_FIELD_DIAG_MSG
DCGM_FR_RETIRED_PAGES_DBE_LIMIT_NEXT = TRIAGE_RUN_FIELD_DIAG_MSG
DCGM_FR_CORRUPT_INFOROM_NEXT = "Flash the InfoROM to clear this corruption."
DCGM_FR_CLOCK_THROTTLE_THERMAL_NEXT = DEBUG_COOLING_MSG
DCGM_FR_POWER_UNREADABLE_NEXT = ""
DCGM_FR_CLOCK_THROTTLE_POWER_NEXT = "Monitor the power conditions. This GPU can still perform workload."
DCGM_FR_NVLINK_ERROR_THRESHOLD_NEXT = TRIAGE_RUN_FIELD_DIAG_MSG
DCGM_FR_NVLINK_DOWN_NEXT = TRIAGE_RUN_FIELD_DIAG_MSG
DCGM_FR_NVSWITCH_FATAL_ERROR_NEXT = TRIAGE_RUN_FIELD_DIAG_MSG
DCGM_FR_NVSWITCH_NON_FATAL_ERROR_NEXT = "Monitor the NVSwitch. It can still perform workload."
DCGM_FR_NVSWITCH_DOWN_NEXT = ""
DCGM_FR_NO_ACCESS_TO_FILE_NEXT = "Check relevant permissions, access, and existence of the file."
DCGM_FR_NVML_API_NEXT = "Check the error condition and ensure that appropriate libraries "\
"are present and accessible."
DCGM_FR_DEVICE_COUNT_MISMATCH_NEXT = "Check for the presence of cgroups, operating system blocks, and "\
"or unsupported / older cards"
DCGM_FR_BAD_PARAMETER_NEXT = ""
DCGM_FR_CANNOT_OPEN_LIB_NEXT = "Check for the existence of the library and set LD_LIBRARY_PATH "\
"if needed."
DCGM_FR_DENYLISTED_DRIVER_NEXT = "Please load the appropriate driver."
DCGM_FR_NVML_LIB_BAD_NEXT = "Make sure that the required version of libnvidia-ml.so "\
"is present and accessible on the system."
DCGM_FR_GRAPHICS_PROCESSES_NEXT = "Stop the graphics processes or run this diagnostic on a server "\
"that is not being used for display purposes."
DCGM_FR_HOSTENGINE_CONN_NEXT = "If hostengine is run separately, please ensure that it is up "\
"and responsive."
DCGM_FR_FIELD_QUERY_NEXT = ""
DCGM_FR_BAD_CUDA_ENV_NEXT = "Please unset this environment variable to address test failures."
DCGM_FR_PERSISTENCE_MODE_NEXT = "Enable persistence mode by running \"nvidia-smi -i <gpuId> -pm "\
"1 \" as root."
DCGM_FR_LOW_BANDWIDTH_NEXT = "Verify that your minimum bandwidth setting is appropriate for "\
"all topological consequences."
DCGM_FR_HIGH_LATENCY_NEXT = ""
DCGM_FR_CANNOT_GET_FIELD_TAG_NEXT = ""
DCGM_FR_FIELD_VIOLATION_NEXT = ""
DCGM_FR_FIELD_THRESHOLD_NEXT = ""
DCGM_FR_FIELD_VIOLATION_DBL_NEXT = ""
DCGM_FR_FIELD_THRESHOLD_DBL_NEXT = ""
DCGM_FR_UNSUPPORTED_FIELD_TYPE_NEXT = ""
DCGM_FR_FIELD_THRESHOLD_TS_NEXT = ""
DCGM_FR_FIELD_THRESHOLD_TS_DBL_NEXT = ""
DCGM_FR_THERMAL_VIOLATIONS_NEXT = DEBUG_COOLING_MSG
DCGM_FR_THERMAL_VIOLATIONS_TS_NEXT = DEBUG_COOLING_MSG
DCGM_FR_TEMP_VIOLATION_NEXT = "Verify that the user-specified temperature maximum is set "\
"correctly. If it is, %s" % DEBUG_COOLING_MSG
DCGM_FR_THROTTLING_VIOLATION_NEXT = ""
DCGM_FR_INTERNAL_NEXT = ""
DCGM_FR_PCIE_GENERATION_NEXT = ""
DCGM_FR_PCIE_WIDTH_NEXT = ""
DCGM_FR_ABORTED_NEXT = ""
DCGM_FR_TEST_DISABLED_NEXT = ""
DCGM_FR_CANNOT_GET_STAT_NEXT = "If running a standalone nv-hostengine, verify that it is up "\
"and responsive."
DCGM_FR_STRESS_LEVEL_NEXT = ""
DCGM_FR_CUDA_API_NEXT = ""
DCGM_FR_FAULTY_MEMORY_NEXT = TRIAGE_RUN_FIELD_DIAG_MSG
DCGM_FR_CANNOT_SET_WATCHES_NEXT = ""
DCGM_FR_CUDA_UNBOUND_NEXT = ""
DCGM_FR_ECC_DISABLED_NEXT = "Enable ECC memory by running \"nvidia-smi -i <gpuId> -e 1\" "\
"to enable. This may require a GPU reset or reboot to take effect."
DCGM_FR_MEMORY_ALLOC_NEXT = ""
DCGM_FR_CUDA_DBE_NEXT = TRIAGE_RUN_FIELD_DIAG_MSG
DCGM_FR_MEMORY_MISMATCH_NEXT = TRIAGE_RUN_FIELD_DIAG_MSG
DCGM_FR_CUDA_DEVICE_NEXT = ""
DCGM_FR_ECC_UNSUPPORTED_NEXT = ""
DCGM_FR_ECC_PENDING_NEXT = "Please reboot to activate it."
DCGM_FR_MEMORY_BANDWIDTH_NEXT = ""
DCGM_FR_TARGET_POWER_NEXT = ""
DCGM_FR_API_FAIL_NEXT = ""
DCGM_FR_API_FAIL_GPU_NEXT = ""
DCGM_FR_CUDA_CONTEXT_NEXT = "Please make sure the correct driver version is installed and "\
"verify that no conflicting libraries are present."
DCGM_FR_DCGM_API_NEXT = ""
DCGM_FR_CONCURRENT_GPUS_NEXT = ""
DCGM_FR_TOO_MANY_ERRORS_NEXT = ""
DCGM_FR_NVLINK_CRC_ERROR_THRESHOLD_NEXT = TRIAGE_RUN_FIELD_DIAG_MSG
DCGM_FR_NVLINK_ERROR_CRITICAL_NEXT = TRIAGE_RUN_FIELD_DIAG_MSG
DCGM_FR_ENFORCED_POWER_LIMIT_NEXT = "If this enforced power limit is necessary, then this test "\
"cannot be run. If it is unnecessary, then raise the enforced "\
"power limit setting to be able to run this test."
DCGM_FR_MEMORY_ALLOC_HOST_NEXT = "Manually kill processes or restart your machine."
DCGM_FR_GPU_OP_MODE_NEXT = "Fix by running nvidia-smi as root with: nvidia-smi --gom=0 -i "\
"<gpu index>"
DCGM_FR_NO_MEMORY_CLOCKS_NEXT = ""
DCGM_FR_NO_GRAPHICS_CLOCKS_NEXT = ""
DCGM_FR_HAD_TO_RESTORE_STATE_NEXT = ""
DCGM_FR_L1TAG_UNSUPPORTED_NEXT = ""
DCGM_FR_L1TAG_MISCOMPARE_NEXT = TRIAGE_RUN_FIELD_DIAG_MSG
DCGM_FR_ROW_REMAP_FAILURE_NEXT = DCGM_FR_VOLATILE_DBE_DETECTED_NEXT
DCGM_FR_UNCONTAINED_ERROR_NEXT = DCGM_FR_VOLATILE_DBE_DETECTED_NEXT
DCGM_FR_EMPTY_GPU_LIST_NEXT = ""
DCGM_FR_DBE_PENDING_PAGE_RETIREMENTS_NEXT = "Drain the GPU and reset it or reboot the node to resolve this issue."
DCGM_FR_UNCORRECTABLE_ROW_REMAP_NEXT = ""
DCGM_FR_PENDING_ROW_REMAP_NEXT = ""
DCGM_FR_BROKEN_P2P_MEMORY_DEVICE_NEXT = BUG_REPORT_MSG
DCGM_FR_BROKEN_P2P_WRITER_DEVICE_NEXT = BUG_REPORT_MSG
DCGM_FR_NVSWITCH_NVLINK_DOWN_NEXT = "Please check fabric manager and initialization logs to figure out why the link is down. You may also need to run a field diagnostic."
DCGM_FR_FILE_CREATE_PERMISSIONS_NEXT = "Please restart the hostengine with parameter --home-dir to specify a different home directory for the " \
"diagnostic or change permissions in the current directory to allow the user to write files there."
DCGM_FR_PCIE_REPLAYS_NEXT = ""
DCGM_FR_XID_ERROR_NEXT = "Please consult the documentation for details of this XID."
def dcgmErrorGetPriorityByCode(code):
fn = dcgm_structs._dcgmGetFunctionPointer("dcgmErrorGetPriorityByCode")
ret = fn(code)
return ret
def dcgmErrorGetFormatMsgByCode(code):
fn = dcgm_structs._dcgmGetFunctionPointer("dcgmErrorGetFormatMsgByCode")
fn.restype = ctypes.c_char_p
ret = fn(code)
return ret.decode('utf-8') if isinstance(ret, bytes) else ret
| DCGM-master | testing/python3/dcgm_errors.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
# Python bindings for the internal API of DCGM library (dcgm_agent.h)
##
import dcgm_structs
import dcgm_fields
from ctypes import *
import functools
def ensure_byte_strings():
"""
Ensures that we don't call C APIs with unicode strings in the arguments
every unicode args gets converted to UTF-8 before the function is called
"""
def convert_result_from_bytes(result):
if isinstance(result, bytes):
return result.decode('utf-8')
if isinstance(result, list):
return list(map(convert_result_from_bytes, result))
if isinstance(result, tuple):
return tuple(map(convert_result_from_bytes, result))
return result
def decorator(fn):
@functools.wraps(fn)
def wrapper(*args, **kwargs):
newargs = []
newkwargs = {}
for arg in args:
if isinstance(arg, str):
newargs.append(bytes(arg, 'utf-8'))
else:
newargs.append(arg)
for k, v in kwargs.items():
if isinstance(v, str):
newkwargs[k] = bytes(v, 'utf-8')
else:
newkwargs[k] = v
newargs = tuple(newargs)
return fn(*newargs, **newkwargs)
return wrapper
return decorator
# Provides access to functions from dcgm_agent_internal
dcgmFP = dcgm_structs._dcgmGetFunctionPointer
# This method is used to initialize DCGM
@ensure_byte_strings()
def dcgmInit():
dcgm_handle = c_void_p()
fn = dcgmFP("dcgmInit")
ret = fn(byref(dcgm_handle))
dcgm_structs._dcgmCheckReturn(ret)
return ret
# This method is used to shutdown DCGM Engine
@ensure_byte_strings()
def dcgmShutdown():
fn = dcgmFP("dcgmShutdown")
ret = fn()
dcgm_structs._dcgmCheckReturn(ret)
return ret
@ensure_byte_strings()
def dcgmStartEmbedded(opMode):
dcgm_handle = c_void_p()
fn = dcgmFP("dcgmStartEmbedded")
ret = fn(opMode, byref(dcgm_handle))
dcgm_structs._dcgmCheckReturn(ret)
return dcgm_handle
@ensure_byte_strings()
def dcgmStopEmbedded(dcgm_handle):
fn = dcgmFP("dcgmStopEmbedded")
ret = fn(dcgm_handle)
dcgm_structs._dcgmCheckReturn(ret)
return ret
@ensure_byte_strings()
def dcgmConnect(ip_address):
dcgm_handle = c_void_p()
fn = dcgmFP("dcgmConnect")
ret = fn(ip_address, byref(dcgm_handle))
dcgm_structs._dcgmCheckReturn(ret)
return dcgm_handle
@ensure_byte_strings()
def dcgmConnect_v2(ip_address, connectParams, version=dcgm_structs.c_dcgmConnectV2Params_version):
connectParams.version = version
dcgm_handle = c_void_p()
fn = dcgmFP("dcgmConnect_v2")
ret = fn(ip_address, byref(connectParams), byref(dcgm_handle))
dcgm_structs._dcgmCheckReturn(ret)
return dcgm_handle
@ensure_byte_strings()
def dcgmDisconnect(dcgm_handle):
fn = dcgmFP("dcgmDisconnect")
ret = fn(dcgm_handle)
dcgm_structs._dcgmCheckReturn(ret)
return ret
@ensure_byte_strings()
def dcgmGetAllSupportedDevices(dcgm_handle):
c_count = c_uint()
gpuid_list = c_uint * dcgm_structs.DCGM_MAX_NUM_DEVICES
c_gpuid_list = gpuid_list()
fn = dcgmFP("dcgmGetAllSupportedDevices")
ret = fn(dcgm_handle, c_gpuid_list, byref(c_count))
dcgm_structs._dcgmCheckReturn(ret)
return list(c_gpuid_list[0:int(c_count.value)])
@ensure_byte_strings()
def dcgmGetAllDevices(dcgm_handle):
c_count = c_uint()
gpuid_list = c_uint * dcgm_structs.DCGM_MAX_NUM_DEVICES
c_gpuid_list = gpuid_list()
fn = dcgmFP("dcgmGetAllDevices")
ret = fn(dcgm_handle, c_gpuid_list, byref(c_count))
dcgm_structs._dcgmCheckReturn(ret)
return list(c_gpuid_list[0:int(c_count.value)])
@ensure_byte_strings()
def dcgmGetDeviceAttributes(dcgm_handle, gpuId, version=dcgm_structs.dcgmDeviceAttributes_version3):
fn = dcgmFP("dcgmGetDeviceAttributes")
if version == dcgm_structs.dcgmDeviceAttributes_version3:
device_values = dcgm_structs.c_dcgmDeviceAttributes_v3()
device_values.version = dcgm_structs.dcgmDeviceAttributes_version3
else:
dcgm_structs._dcgmCheckReturn(dcgm_structs.DCGM_ST_VER_MISMATCH)
ret = fn(dcgm_handle, c_int(gpuId), byref(device_values))
dcgm_structs._dcgmCheckReturn(ret)
return device_values
@ensure_byte_strings()
def dcgmGetEntityGroupEntities(dcgm_handle, entityGroup, flags):
capacity = dcgm_structs.DCGM_GROUP_MAX_ENTITIES
c_count = c_int32(capacity)
entityIds = c_uint32 * capacity
c_entityIds = entityIds()
fn = dcgmFP("dcgmGetEntityGroupEntities")
ret = fn(dcgm_handle, entityGroup, c_entityIds, byref(c_count), flags)
dcgm_structs._dcgmCheckReturn(ret)
return c_entityIds[0:int(c_count.value)]
@ensure_byte_strings()
def dcgmGetNvLinkLinkStatus(dcgm_handle):
linkStatus = dcgm_structs.c_dcgmNvLinkStatus_v3()
linkStatus.version = dcgm_structs.dcgmNvLinkStatus_version3
fn = dcgmFP("dcgmGetNvLinkLinkStatus")
ret = fn(dcgm_handle, byref(linkStatus))
dcgm_structs._dcgmCheckReturn(ret)
return linkStatus
@ensure_byte_strings()
def dcgmGetGpuInstanceHierarchy(dcgm_handle):
hierarchy = dcgm_structs.c_dcgmMigHierarchy_v2()
hierarchy.version = dcgm_structs.c_dcgmMigHierarchy_version2
fn = dcgmFP("dcgmGetGpuInstanceHierarchy")
ret = fn(dcgm_handle, byref(hierarchy))
dcgm_structs._dcgmCheckReturn(ret)
return hierarchy
@ensure_byte_strings()
def dcgmCreateMigEntity(dcgm_handle, parentId, profile, createOption, flags):
fn = dcgmFP("dcgmCreateMigEntity")
cme = dcgm_structs.c_dcgmCreateMigEntity_v1()
cme.version = dcgm_structs.c_dcgmCreateMigEntity_version1
cme.parentId = parentId
cme.createOption = createOption
cme.profile = profile
cme.flags = flags
ret = fn(dcgm_handle, byref(cme))
dcgm_structs._dcgmCheckReturn(ret)
@ensure_byte_strings()
def dcgmDeleteMigEntity(dcgm_handle, entityGroupId, entityId, flags):
fn = dcgmFP("dcgmDeleteMigEntity")
dme = dcgm_structs.c_dcgmDeleteMigEntity_v1()
dme.version = dcgm_structs.c_dcgmDeleteMigEntity_version1
dme.entityGroupId = entityGroupId
dme.entityId = entityId
dme.flags = flags
ret = fn(dcgm_handle, byref(dme))
dcgm_structs._dcgmCheckReturn(ret)
@ensure_byte_strings()
def dcgmGroupCreate(dcgm_handle, type, groupName):
c_group_id = c_void_p()
fn = dcgmFP("dcgmGroupCreate")
ret = fn(dcgm_handle, type, groupName, byref(c_group_id))
dcgm_structs._dcgmCheckReturn(ret)
return c_group_id
@ensure_byte_strings()
def dcgmGroupDestroy(dcgm_handle, group_id):
fn = dcgmFP("dcgmGroupDestroy")
ret = fn(dcgm_handle, group_id)
dcgm_structs._dcgmCheckReturn(ret)
return ret
@ensure_byte_strings()
def dcgmGroupAddDevice(dcgm_handle, group_id, gpu_id):
fn = dcgmFP("dcgmGroupAddDevice")
ret = fn(dcgm_handle, group_id, gpu_id)
dcgm_structs._dcgmCheckReturn(ret)
return ret
@ensure_byte_strings()
def dcgmGroupAddEntity(dcgm_handle, group_id, entityGroupId, entityId):
fn = dcgmFP("dcgmGroupAddEntity")
ret = fn(dcgm_handle, group_id, entityGroupId, entityId)
dcgm_structs._dcgmCheckReturn(ret)
return ret
@ensure_byte_strings()
def dcgmGroupRemoveDevice(dcgm_handle, group_id, gpu_id):
fn = dcgmFP("dcgmGroupRemoveDevice")
ret = fn(dcgm_handle, group_id, gpu_id)
dcgm_structs._dcgmCheckReturn(ret)
return ret
@ensure_byte_strings()
def dcgmGroupRemoveEntity(dcgm_handle, group_id, entityGroupId, entityId):
fn = dcgmFP("dcgmGroupRemoveEntity")
ret = fn(dcgm_handle, group_id, entityGroupId, entityId)
dcgm_structs._dcgmCheckReturn(ret)
return ret
@ensure_byte_strings()
def dcgmGroupGetInfo(dcgm_handle, group_id, version=dcgm_structs.c_dcgmGroupInfo_version2):
fn = dcgmFP("dcgmGroupGetInfo")
#support the old version of the request since the host engine does
if version == dcgm_structs.c_dcgmGroupInfo_version2:
device_values = dcgm_structs.c_dcgmGroupInfo_v2()
device_values.version = dcgm_structs.c_dcgmGroupInfo_version2
else:
dcgm_structs._dcgmCheckReturn(dcgm_structs.DCGM_ST_VER_MISMATCH)
ret = fn(dcgm_handle, group_id, byref(device_values))
dcgm_structs._dcgmCheckReturn(ret)
return device_values
@ensure_byte_strings()
def dcgmGroupGetAllIds(dcgmHandle):
fn = dcgmFP("dcgmGroupGetAllIds")
c_count = c_uint()
groupIdList = c_void_p * dcgm_structs.DCGM_MAX_NUM_GROUPS
c_groupIdList = groupIdList()
ret = fn(dcgmHandle, c_groupIdList, byref(c_count))
dcgm_structs._dcgmCheckReturn(ret)
return list(c_groupIdList[0:int(c_count.value)])
@ensure_byte_strings()
def dcgmFieldGroupCreate(dcgm_handle, fieldIds, fieldGroupName):
c_field_group_id = c_void_p()
c_num_field_ids = c_int32(len(fieldIds))
c_field_ids = (c_uint16 * len(fieldIds))(*fieldIds)
fn = dcgmFP("dcgmFieldGroupCreate")
ret = fn(dcgm_handle, c_num_field_ids, byref(c_field_ids), fieldGroupName, byref(c_field_group_id))
dcgm_structs._dcgmCheckReturn(ret)
return c_field_group_id
@ensure_byte_strings()
def dcgmFieldGroupDestroy(dcgm_handle, fieldGroupId):
fn = dcgmFP("dcgmFieldGroupDestroy")
ret = fn(dcgm_handle, fieldGroupId)
dcgm_structs._dcgmCheckReturn(ret)
return ret
@ensure_byte_strings()
def dcgmFieldGroupGetInfo(dcgm_handle, fieldGroupId):
c_fieldGroupInfo = dcgm_structs.c_dcgmFieldGroupInfo_v1()
c_fieldGroupInfo.version = dcgm_structs.dcgmFieldGroupInfo_version1
c_fieldGroupInfo.fieldGroupId = fieldGroupId
fn = dcgmFP("dcgmFieldGroupGetInfo")
ret = fn(dcgm_handle, byref(c_fieldGroupInfo))
dcgm_structs._dcgmCheckReturn(ret)
return c_fieldGroupInfo
@ensure_byte_strings()
def dcgmFieldGroupGetAll(dcgm_handle):
c_allGroupInfo = dcgm_structs.c_dcgmAllFieldGroup_v1()
c_allGroupInfo.version = dcgm_structs.dcgmAllFieldGroup_version1
fn = dcgmFP("dcgmFieldGroupGetAll")
ret = fn(dcgm_handle, byref(c_allGroupInfo))
dcgm_structs._dcgmCheckReturn(ret)
return c_allGroupInfo
@ensure_byte_strings()
def dcgmStatusCreate():
c_status_handle = c_void_p()
fn = dcgmFP("dcgmStatusCreate")
ret = fn(byref(c_status_handle))
dcgm_structs._dcgmCheckReturn(ret)
return c_status_handle
@ensure_byte_strings()
def dcgmStatusDestroy(status_handle):
fn = dcgmFP("dcgmStatusDestroy")
ret = fn(status_handle)
dcgm_structs._dcgmCheckReturn(ret)
return ret
@ensure_byte_strings()
def dcgmStatusGetCount(status_handle):
c_count = c_uint()
fn = dcgmFP("dcgmStatusGetCount")
ret = fn(status_handle, byref(c_count))
dcgm_structs._dcgmCheckReturn(ret)
return c_count.value
@ensure_byte_strings()
def dcgmStatusPopError(status_handle):
c_errorInfo = dcgm_structs.c_dcgmErrorInfo_v1()
fn = dcgmFP("dcgmStatusPopError")
ret = fn(status_handle, byref(c_errorInfo))
if ret == dcgm_structs.DCGM_ST_OK:
return c_errorInfo
else:
return None
return c_errorInfo
@ensure_byte_strings()
def dcgmStatusClear(status_handle):
fn = dcgmFP("dcgmStatusClear")
ret = fn(status_handle)
dcgm_structs._dcgmCheckReturn(ret)
return ret
@ensure_byte_strings()
def dcgmConfigSet(dcgm_handle, group_id, configToSet, status_handle):
fn = dcgmFP("dcgmConfigSet")
configToSet.version = dcgm_structs.dcgmDeviceConfig_version1
ret = fn(dcgm_handle, group_id, byref(configToSet), status_handle)
dcgm_structs._dcgmCheckReturn(ret)
return ret
@ensure_byte_strings()
def dcgmConfigGet(dcgm_handle, group_id, reqCfgType, count, status_handle):
fn = dcgmFP("dcgmConfigGet")
config_values_array = count * dcgm_structs.c_dcgmDeviceConfig_v1
c_config_values = config_values_array()
for index in range(0, count):
c_config_values[index].version = dcgm_structs.dcgmDeviceConfig_version1
ret = fn(dcgm_handle, group_id, reqCfgType, count, c_config_values, status_handle)
dcgm_structs._dcgmCheckReturn(ret)
return list(c_config_values[0:count])
@ensure_byte_strings()
def dcgmConfigEnforce(dcgm_handle, group_id, status_handle):
fn = dcgmFP("dcgmConfigEnforce")
ret = fn(dcgm_handle, group_id, status_handle)
dcgm_structs._dcgmCheckReturn(ret)
return ret
# This method is used to tell the cache manager to update all fields
@ensure_byte_strings()
def dcgmUpdateAllFields(dcgm_handle, waitForUpdate):
fn = dcgmFP("dcgmUpdateAllFields")
ret = fn(dcgm_handle, c_int(waitForUpdate))
dcgm_structs._dcgmCheckReturn(ret)
return ret
# This method is used to get the policy information
@ensure_byte_strings()
def dcgmPolicyGet(dcgm_handle, group_id, count, status_handle):
fn = dcgmFP("dcgmPolicyGet")
policy_array = count * dcgm_structs.c_dcgmPolicy_v1
c_policy_values = policy_array()
for index in range(0, count):
c_policy_values[index].version = dcgm_structs.dcgmPolicy_version1
ret = fn(dcgm_handle, group_id, count, c_policy_values, status_handle)
dcgm_structs._dcgmCheckReturn(ret)
return c_policy_values[0:count]
# This method is used to set the policy information
@ensure_byte_strings()
def dcgmPolicySet(dcgm_handle, group_id, policy, status_handle):
fn = dcgmFP("dcgmPolicySet")
ret = fn(dcgm_handle, group_id, byref(policy), status_handle)
dcgm_structs._dcgmCheckReturn(ret)
return ret
#First parameter below is the return type
dcgmFieldValueEnumeration_f = CFUNCTYPE(c_int32, c_uint32, POINTER(dcgm_structs.c_dcgmFieldValue_v1), c_int32, c_void_p)
dcgmFieldValueEntityEnumeration_f = CFUNCTYPE(c_int32, c_uint32, c_uint32, POINTER(dcgm_structs.c_dcgmFieldValue_v1), c_int32, c_void_p)
@ensure_byte_strings()
def dcgmGetValuesSince(dcgm_handle, groupId, fieldGroupId, sinceTimestamp, enumCB, userData):
fn = dcgmFP("dcgmGetValuesSince")
c_nextSinceTimestamp = c_int64()
ret = fn(dcgm_handle, groupId, fieldGroupId, c_int64(sinceTimestamp), byref(c_nextSinceTimestamp), enumCB, py_object(userData))
dcgm_structs._dcgmCheckReturn(ret)
return c_nextSinceTimestamp.value
@ensure_byte_strings()
def dcgmGetValuesSince_v2(dcgm_handle, groupId, fieldGroupId, sinceTimestamp, enumCB, userData):
fn = dcgmFP("dcgmGetValuesSince_v2")
c_nextSinceTimestamp = c_int64()
ret = fn(dcgm_handle, groupId, fieldGroupId, c_int64(sinceTimestamp), byref(c_nextSinceTimestamp), enumCB, py_object(userData))
dcgm_structs._dcgmCheckReturn(ret)
return c_nextSinceTimestamp.value
@ensure_byte_strings()
def dcgmGetLatestValues(dcgm_handle, groupId, fieldGroupId, enumCB, userData):
fn = dcgmFP("dcgmGetLatestValues")
ret = fn(dcgm_handle, groupId, fieldGroupId, enumCB, py_object(userData))
dcgm_structs._dcgmCheckReturn(ret)
return ret
@ensure_byte_strings()
def dcgmGetLatestValues_v2(dcgm_handle, groupId, fieldGroupId, enumCB, userData):
fn = dcgmFP("dcgmGetLatestValues_v2")
ret = fn(dcgm_handle, groupId, fieldGroupId, enumCB, py_object(userData))
dcgm_structs._dcgmCheckReturn(ret)
return ret
@ensure_byte_strings()
def dcgmWatchFields(dcgm_handle, groupId, fieldGroupId, updateFreq, maxKeepAge, maxKeepSamples):
fn = dcgmFP("dcgmWatchFields")
ret = fn(dcgm_handle, groupId, fieldGroupId, c_int64(updateFreq), c_double(maxKeepAge), c_int32(maxKeepSamples))
dcgm_structs._dcgmCheckReturn(ret)
return ret
@ensure_byte_strings()
def dcgmUnwatchFields(dcgm_handle, groupId, fieldGroupId):
fn = dcgmFP("dcgmUnwatchFields")
ret = fn(dcgm_handle, groupId, fieldGroupId)
dcgm_structs._dcgmCheckReturn(ret)
return ret
@ensure_byte_strings()
def dcgmHealthSet(dcgm_handle, groupId, systems):
fn = dcgmFP("dcgmHealthSet")
ret = fn(dcgm_handle, groupId, systems)
dcgm_structs._dcgmCheckReturn(ret)
return ret
@ensure_byte_strings()
def dcgmHealthSet_v2(dcgm_handle, groupId, systems, updateInterval, maxKeepAge):
params = dcgm_structs.c_dcgmHealthSetParams_v2()
params.version = dcgm_structs.dcgmHealthSetParams_version2
params.groupId = groupId
params.systems = systems
params.updateInterval = updateInterval
params.maxKeepAge = maxKeepAge
fn = dcgmFP("dcgmHealthSet_v2")
ret = fn(dcgm_handle, byref(params))
dcgm_structs._dcgmCheckReturn(ret)
return ret
@ensure_byte_strings()
def dcgmHealthGet(dcgm_handle, groupId):
c_systems = c_int32()
fn = dcgmFP("dcgmHealthGet")
ret = fn(dcgm_handle, groupId, byref(c_systems))
dcgm_structs._dcgmCheckReturn(ret)
return c_systems.value
@ensure_byte_strings()
def dcgmHealthCheck(dcgm_handle, groupId, version=dcgm_structs.dcgmHealthResponse_version4):
if version != dcgm_structs.dcgmHealthResponse_version4:
dcgm_structs._dcgmCheckReturn(dcgm_structs.DCGM_ST_VER_MISMATCH)
c_results = dcgm_structs.c_dcgmHealthResponse_v4()
c_results.version = dcgm_structs.dcgmHealthResponse_version4
fn = dcgmFP("dcgmHealthCheck")
ret = fn(dcgm_handle, groupId, byref(c_results))
dcgm_structs._dcgmCheckReturn(ret)
return c_results
@ensure_byte_strings()
def dcgmPolicyRegister(dcgm_handle, groupId, condition, beginCallback, finishCallback):
fn = dcgmFP("dcgmPolicyRegister")
ret = fn(dcgm_handle, groupId, condition, beginCallback, finishCallback)
dcgm_structs._dcgmCheckReturn(ret)
return ret
@ensure_byte_strings()
def dcgmPolicyUnregister(dcgm_handle, groupId, condition):
fn = dcgmFP("dcgmPolicyUnregister")
ret = fn(dcgm_handle, groupId, condition)
dcgm_structs._dcgmCheckReturn(ret)
return ret
@ensure_byte_strings()
def dcgmPolicyTrigger(dcgm_handle):
fn = dcgmFP("dcgmPolicyTrigger")
ret = fn(dcgm_handle)
dcgm_structs._dcgmCheckReturn(ret)
return ret
def helperDiagCheckReturn(ret, response):
try:
dcgm_structs._dcgmCheckReturn(ret)
except dcgm_structs.DCGMError as e:
if response.systemError.msg != "":
# Add systemError information to the raised exception.
import sys
info = "%s" % response.systemError.msg
e.SetAdditionalInfo(info)
raise e
else:
raise
return response
@ensure_byte_strings()
def dcgmActionValidate_v2(dcgm_handle, runDiagInfo, runDiagVersion=dcgm_structs.dcgmRunDiag_version7):
response = dcgm_structs.c_dcgmDiagResponse_v8()
runDiagInfo.version = runDiagVersion
response.version = dcgm_structs.dcgmDiagResponse_version8
fn = dcgmFP("dcgmActionValidate_v2")
ret = fn(dcgm_handle, byref(runDiagInfo), byref(response))
return helperDiagCheckReturn(ret, response)
@ensure_byte_strings()
def dcgmActionValidate(dcgm_handle, group_id, validate):
response = dcgm_structs.c_dcgmDiagResponse_v8()
response.version = dcgm_structs.dcgmDiagResponse_version8
# Put the group_id and validate into a dcgmRunDiag struct
runDiagInfo = dcgm_structs.c_dcgmRunDiag_v7()
runDiagInfo.version = dcgm_structs.dcgmRunDiag_version7
runDiagInfo.validate = validate
runDiagInfo.groupId = group_id
fn = dcgmFP("dcgmActionValidate_v2")
ret = fn(dcgm_handle, byref(runDiagInfo), byref(response))
return helperDiagCheckReturn(ret, response)
@ensure_byte_strings()
def dcgmRunDiagnostic(dcgm_handle, group_id, diagLevel):
response = dcgm_structs.c_dcgmDiagResponse_v8()
response.version = dcgm_structs.dcgmDiagResponse_version8
fn = dcgmFP("dcgmRunDiagnostic")
ret = fn(dcgm_handle, group_id, diagLevel, byref(response))
return helperDiagCheckReturn(ret, response)
@ensure_byte_strings()
def dcgmWatchPidFields(dcgm_handle, groupId, updateFreq, maxKeepAge, maxKeepSamples):
fn = dcgmFP("dcgmWatchPidFields")
ret = fn(dcgm_handle, groupId, c_int64(updateFreq), c_double(maxKeepAge), c_int32(maxKeepSamples))
dcgm_structs._dcgmCheckReturn(ret)
return ret
@ensure_byte_strings()
def dcgmGetPidInfo(dcgm_handle, groupId, pid):
fn = dcgmFP("dcgmGetPidInfo")
pidInfo = dcgm_structs.c_dcgmPidInfo_v2()
pidInfo.version = dcgm_structs.dcgmPidInfo_version2
pidInfo.pid = pid
ret = fn(dcgm_handle, groupId, byref(pidInfo))
dcgm_structs._dcgmCheckReturn(ret)
return pidInfo
@ensure_byte_strings()
def dcgmGetDeviceTopology(dcgm_handle, gpuId):
devtopo = dcgm_structs.c_dcgmDeviceTopology_v1()
fn = dcgmFP("dcgmGetDeviceTopology")
ret = fn(dcgm_handle, gpuId, byref(devtopo))
dcgm_structs._dcgmCheckReturn(ret)
return devtopo
@ensure_byte_strings()
def dcgmGetGroupTopology(dcgm_handle, groupId):
grouptopo = dcgm_structs.c_dcgmGroupTopology_v1()
fn = dcgmFP("dcgmGetGroupTopology")
ret = fn(dcgm_handle, groupId, byref(grouptopo))
dcgm_structs._dcgmCheckReturn(ret)
return grouptopo
@ensure_byte_strings()
def dcgmWatchJobFields(dcgm_handle, groupId, updateFreq, maxKeepAge, maxKeepSamples):
fn = dcgmFP("dcgmWatchJobFields")
ret = fn(dcgm_handle, groupId, c_int64(updateFreq), c_double(maxKeepAge), c_int32(maxKeepSamples))
dcgm_structs._dcgmCheckReturn(ret)
return ret
@ensure_byte_strings()
def dcgmJobStartStats(dcgm_handle, groupId, jobid):
fn = dcgmFP("dcgmJobStartStats")
ret = fn(dcgm_handle, groupId, jobid)
dcgm_structs._dcgmCheckReturn(ret)
return ret
@ensure_byte_strings()
def dcgmJobStopStats(dcgm_handle, jobid):
fn = dcgmFP("dcgmJobStopStats")
ret = fn(dcgm_handle, jobid)
dcgm_structs._dcgmCheckReturn(ret)
return ret
@ensure_byte_strings()
def dcgmJobGetStats(dcgm_handle, jobid):
fn = dcgmFP("dcgmJobGetStats")
jobInfo = dcgm_structs.c_dcgmJobInfo_v3()
jobInfo.version = dcgm_structs.dcgmJobInfo_version3
ret = fn(dcgm_handle, jobid, byref(jobInfo))
dcgm_structs._dcgmCheckReturn(ret)
return jobInfo
@ensure_byte_strings()
def dcgmJobRemove(dcgm_handle, jobid):
fn = dcgmFP("dcgmJobRemove")
ret = fn(dcgm_handle, jobid)
dcgm_structs._dcgmCheckReturn(ret)
return ret
@ensure_byte_strings()
def dcgmJobRemoveAll(dcgm_handle):
fn = dcgmFP("dcgmJobRemoveAll")
ret = fn(dcgm_handle)
dcgm_structs._dcgmCheckReturn(ret)
return ret
@ensure_byte_strings()
def dcgmIntrospectGetHostengineMemoryUsage(dcgm_handle, waitIfNoData=True):
fn = dcgmFP("dcgmIntrospectGetHostengineMemoryUsage")
memInfo = dcgm_structs.c_dcgmIntrospectMemory_v1()
memInfo.version = dcgm_structs.dcgmIntrospectMemory_version1
ret = fn(dcgm_handle, byref(memInfo), waitIfNoData)
dcgm_structs._dcgmCheckReturn(ret)
return memInfo
@ensure_byte_strings()
def dcgmIntrospectGetHostengineCpuUtilization(dcgm_handle, waitIfNoData=True):
fn = dcgmFP("dcgmIntrospectGetHostengineCpuUtilization")
cpuUtil = dcgm_structs.c_dcgmIntrospectCpuUtil_v1()
cpuUtil.version = dcgm_structs.dcgmIntrospectCpuUtil_version1
ret = fn(dcgm_handle, byref(cpuUtil), waitIfNoData)
dcgm_structs._dcgmCheckReturn(ret)
return cpuUtil
@ensure_byte_strings()
def dcgmEntityGetLatestValues(dcgmHandle, entityGroup, entityId, fieldIds):
fn = dcgmFP("dcgmEntityGetLatestValues")
field_values = (dcgm_structs.c_dcgmFieldValue_v1 * len(fieldIds))()
id_values = (c_uint16 * len(fieldIds))(*fieldIds)
ret = fn(dcgmHandle, c_uint(entityGroup), dcgm_fields.c_dcgm_field_eid_t(entityId), id_values, c_uint(len(fieldIds)), field_values)
dcgm_structs._dcgmCheckReturn(ret)
return field_values
@ensure_byte_strings()
def dcgmEntitiesGetLatestValues(dcgmHandle, entities, fieldIds, flags):
fn = dcgmFP("dcgmEntitiesGetLatestValues")
numFvs = len(fieldIds) * len(entities)
field_values = (dcgm_structs.c_dcgmFieldValue_v2 * numFvs)()
entities_values = (dcgm_structs.c_dcgmGroupEntityPair_t * len(entities))(*entities)
field_id_values = (c_uint16 * len(fieldIds))(*fieldIds)
ret = fn(dcgmHandle, entities_values, c_uint(len(entities)), field_id_values, c_uint(len(fieldIds)), flags, field_values)
dcgm_structs._dcgmCheckReturn(ret)
return field_values
@ensure_byte_strings()
def dcgmSelectGpusByTopology(dcgmHandle, inputGpuIds, numGpus, hintFlags):
fn = dcgmFP("dcgmSelectGpusByTopology")
outputGpuIds = c_int64()
ret = fn(dcgmHandle, c_uint64(inputGpuIds), c_uint32(numGpus), byref(outputGpuIds), c_uint64(hintFlags))
dcgm_structs._dcgmCheckReturn(ret)
return outputGpuIds
@ensure_byte_strings()
def dcgmGetFieldSummary(dcgmHandle, fieldId, entityGroupType, entityId, summaryMask, startTime, endTime):
fn = dcgmFP("dcgmGetFieldSummary")
request = dcgm_structs.c_dcgmFieldSummaryRequest_v1()
request.version = dcgm_structs.dcgmFieldSummaryRequest_version1
request.fieldId = fieldId
request.entityGroupType =entityGroupType
request.entityId = entityId
request.summaryTypeMask = summaryMask
request.startTime = startTime
request.endTime = endTime
ret = fn(dcgmHandle, byref(request))
dcgm_structs._dcgmCheckReturn(ret)
return request
@ensure_byte_strings()
def dcgmModuleDenylist(dcgmHandle, moduleId):
fn = dcgmFP("dcgmModuleDenylist")
ret = fn(dcgmHandle, c_uint32(moduleId))
dcgm_structs._dcgmCheckReturn(ret)
return ret
@ensure_byte_strings()
def dcgmModuleGetStatuses(dcgmHandle):
moduleStatuses = dcgm_structs.c_dcgmModuleGetStatuses_v1()
moduleStatuses.version = dcgm_structs.dcgmModuleGetStatuses_version1
fn = dcgmFP("dcgmModuleGetStatuses")
ret = fn(dcgmHandle, byref(moduleStatuses))
dcgm_structs._dcgmCheckReturn(ret)
return moduleStatuses
@ensure_byte_strings()
def dcgmProfGetSupportedMetricGroups(dcgmHandle, gpuId):
msg = dcgm_structs.c_dcgmProfGetMetricGroups_v3()
msg.version = dcgm_structs.dcgmProfGetMetricGroups_version3
msg.gpuId = gpuId
fn = dcgmFP("dcgmProfGetSupportedMetricGroups")
ret = fn(dcgmHandle, byref(msg))
dcgm_structs._dcgmCheckReturn(ret)
return msg
@ensure_byte_strings()
def dcgmProfPause(dcgmHandle):
fn = dcgmFP("dcgmProfPause")
ret = fn(dcgmHandle)
dcgm_structs._dcgmCheckReturn(ret)
return ret
@ensure_byte_strings()
def dcgmProfResume(dcgmHandle):
fn = dcgmFP("dcgmProfResume")
ret = fn(dcgmHandle)
dcgm_structs._dcgmCheckReturn(ret)
return ret
@ensure_byte_strings()
def dcgmVersionInfo():
msg = dcgm_structs.c_dcgmVersionInfo_v2()
msg.version = dcgm_structs.dcgmVersionInfo_version2
fn = dcgmFP("dcgmVersionInfo")
ret = fn(byref(msg))
dcgm_structs._dcgmCheckReturn(ret)
return msg
@ensure_byte_strings()
def dcgmHostengineIsHealthy(dcgmHandle):
heHealth = dcgm_structs.c_dcgmHostengineHealth_v1()
heHealth.version = dcgm_structs.dcgmHostengineHealth_version1
fn = dcgmFP("dcgmHostengineIsHealthy")
ret = fn(dcgmHandle, byref(heHealth))
dcgm_structs._dcgmCheckReturn(ret)
return heHealth
| DCGM-master | testing/python3/dcgm_agent.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from optparse import OptionParser, OptionGroup
import apps
import utils
import sys
import test_utils
import logger
import re
def parse_options():
"""
Parses command line options but doesn't perform error checking of them.
For the entire process to be completed run later_parse next (after logging is initalized).
"""
global options
global args
parser = OptionParser()
parser.add_option(
"--test-info",
dest="test_info",
action="store_true",
help="Prints to list of all tests available in the testing framework (can be combined with --verbose)"
)
parser.add_option(
"--no-process-check",
dest="no_process_check",
action="store_true",
default=False,
help="Does not check if GPUs are being used by other processes"
)
parser.add_option(
"--no-env-check",
dest="no_env_check",
action="store_true",
default=False,
help="Skips test environment checks for debugging"
)
parser.add_option(
"-v", "--verbose",
dest="verbose",
action="store_true",
help="Prints additional information to stdout"
)
parser.add_option(
"--no-dcgm-trace-patching",
dest="no_dcgm_trace_patching",
action="store_true",
help="Disables trace log patching with information from data/dcgm_decode_db.txt. Use when target ncm version doesn't match exactly data/version.txt CL"
)
parser.add_option(
"--burn",
dest="burn",
action="store_true",
help="Runs a single iteration of the burn_in_stress.py test for sanity check"
)
parser.add_option(
"--eris",
dest="eris",
action="store_true",
help="Prints additional Eris-formatted summary of results"
)
parser.add_option(
"--log-dir",
dest="log_dir",
help="Creates all logging files in the specified directory"
)
parser.add_option(
"--dev-mode",
dest="developer_mode",
action="store_true",
help="Run the test framework in developer mode. This mode runs additional tests "+
"that should only be run by DCGM developers, either due to intermittency "+
"or heavy reliance on environmental conditions."
)
parser.add_option(
"--no-lint",
dest="lint",
action="store_false",
default=True,
help="[deprecated] noop preserved to avoid breaking script invocations"
)
parser.add_option(
"-c", "--clear-lint-artifacts",
dest="clear_lint_artifacts",
action="store_true",
default=False,
help="Delete any files in your development environment that are a product of " +
"running the test linter. This will cause test linting to re-lint ALL python " +
"files instead of just those that have changed since their last successful lint.")
parser.add_option(
"--profile",
dest="profile",
choices=apps.nv_hostengine_app.NvHostEngineApp.supported_profile_tools,
help="Can be one of: %s " % apps.nv_hostengine_app.NvHostEngineApp.supported_profile_tools +
"Turns on profiling of nv-hostengine while tests are running. " +
"This only works for tests that run a standalone hostengine (not embedded). " +
"Valgrind must also be installed. The selected tool will profile nv-hostengine and " +
"generate files for each test that runs. These files can then be examined using " +
"other tools (like KCachegrind for callgrind files). " +
"The tests will output the directory where these files can be found."
)
parser.add_option(
"--use-running-hostengine",
dest="use_running_hostengine",
action="store_true",
default=False,
help="Can be used to run the test framework against a remote host engine that is already running on the system." +
"This option is useful for debugging the stand-alone host engine, which can be started separately inside of" +
"valgrind or gdb. This will skip embedded-only tests due to the host engine already being running."
)
parser.add_option(
"--coverage",
dest="coverage",
action="store_true",
default=False,
help="Informs the framework that this is a coverage build of DCGM and we want to aggregate coverage numbers for the files"
)
parser.add_option(
"--dvssc-testing",
dest="dvssc_testing",
action="store_true",
help="Tests run in DVS-SC"
)
test_group = OptionGroup(parser, "Testing modifiers")
test_group.add_option(
"-d", "--device",
dest="device",
help="Run only on target device (DEVICE_NVML_ID) + global tests"
)
test_group.add_option(
"-f", "--filter-tests",
dest="filter_tests",
help="Runs module.test_fn tests that match provided regular expression"
)
test_group.add_option(
"-u", "--non-root-user",
dest="non_root_user",
help="User name that can be used to run tests that should be run as non-root." +
" Without this option some test will not be run. Can be used ONLY when current user is root." +
" (Linux only)")
parser.add_option_group(test_group)
debug_group = OptionGroup(parser, "Debugging options")
debug_group.add_option(
"--debug",
action="store_true",
help="Disables some of the features as to not interfere with debugging session"
)
debug_group.add_option(
"-b", "--break-at-failure",
dest="break_at_failure",
action="store_true",
help="Start debugger if test fails"
)
debug_group.add_option(
"--force-logging",
dest="force_logging",
action="store_true",
help="Force logging even for actions that have logging disabled by default"
)
parser.add_option_group(debug_group)
parser.add_option(
"--no-library-check",
dest="no_library_check",
action="store_true",
default=False,
help="Skips the test which verifies that all modules are present."
)
parser.add_option(
"--no-root-check",
dest="no_root_check",
action="store_true",
default=False,
help="Skips the check that the test framework is being run as root."
)
(options, args) = parser.parse_args()
if options.debug:
logger.stdout_loglevel = logger.DEBUG
# by default some actions shouldn't generate any log
if options.test_info:
test_utils.noLogging = True
# unless force log is specified
if options.force_logging:
test_utils.noLogging = False
#Change the backup value as well
test_utils.noLoggingBackup = test_utils.noLogging
#Use a different logging level for ERIS as we log to the console
if options.eris:
test_utils.loggingLevel = "WARNING"
else:
test_utils.loggingLevel = "DEBUG"
class OptionParserStub():
def __init__(self):
self.profile = False
self.eris = False
self.break_at_failure = False
self.force_logging = False
self.test_info = False
self.non_root_user = None
self.lint = False
self.clear_lint_artifacts = False
self.burn = False
self.log_dir = None
self.verbose = False
self.no_dcgm_trace_patching = True
self.use_running_hostengine = False
self.no_process_check = False
self.developer_mode = False
self.no_env_check = False
self.coverage = ''
self.dvssc_testing = False
self.no_root_check = False
def initialize_as_stub():
"""
Initialize the values of this library as a stub module. This is so we can call DCGM framework classes
from outside the DCGM framework
"""
global options
options = OptionParserStub()
def validate():
"""
Should be run after logging is enabled.
"""
logger.debug("Running script with argv: " + str(sys.argv))
logger.debug("Parsed options: " + str(options))
logger.debug("Unparsed args: " + str(args))
if args:
logger.fatal("Unrecognized command line arguments: " + " ".join(args))
if options.non_root_user:
try:
utils.get_user_idinfo(options.non_root_user)
except KeyError:
logger.fatal("User '%s' doesn't exist" % options.non_root_user)
if options.non_root_user and not utils.is_root():
logger.fatal("[-u | --non-root-user] flags are invalid when current user is not root")
if options.break_at_failure:
options.debug = True
if options.filter_tests:
options.filter_tests = re.compile(options.filter_tests)
logger.debug("Preprocessed options: " + str(options))
| DCGM-master | testing/python3/option_parser.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dcgm_field_helpers import FieldValueEncoder
from DcgmReader import DcgmReader
import abc
import dcgm_fields
import json
import dcgm_structs
import time
import logger
try:
from prometheus_client import start_http_server, Gauge
except ImportError:
pass
logger.warning("prometheus_client not installed, please run: \"pip install prometheus_client\"")
ignore_List = [dcgm_fields.DCGM_FI_DEV_PCI_BUSID, dcgm_fields.DCGM_FI_DEV_UUID]
publishFieldIds = [
dcgm_fields.DCGM_FI_DEV_PCI_BUSID, #Needed for plugin_instance
dcgm_fields.DCGM_FI_DEV_POWER_USAGE,
dcgm_fields.DCGM_FI_DEV_GPU_TEMP,
dcgm_fields.DCGM_FI_DEV_SM_CLOCK,
dcgm_fields.DCGM_FI_DEV_GPU_UTIL,
dcgm_fields.DCGM_FI_DEV_RETIRED_PENDING,
dcgm_fields.DCGM_FI_DEV_RETIRED_SBE,
dcgm_fields.DCGM_FI_DEV_RETIRED_DBE,
dcgm_fields.DCGM_FI_DEV_ECC_SBE_AGG_TOTAL,
dcgm_fields.DCGM_FI_DEV_ECC_DBE_AGG_TOTAL,
dcgm_fields.DCGM_FI_DEV_FB_TOTAL,
dcgm_fields.DCGM_FI_DEV_FB_FREE,
dcgm_fields.DCGM_FI_DEV_FB_USED,
dcgm_fields.DCGM_FI_DEV_PCIE_REPLAY_COUNTER,
dcgm_fields.DCGM_FI_DEV_UUID
]
class DcgmJson(DcgmReader):
def __init__(self):
DcgmReader.__init__(self, fieldIds=publishFieldIds, ignoreList=ignore_List)
self.m_jsonData = {} #Json data for each field.
self.m_list=[] # list of jsons of all the fields.
###########################################################################
'''
The customDataHandler creates a json from the fvs dictionary. All jsons are appended to a list which is then returned from
the function.
@params:
fvs : The fieldvalue dictionary that contains info about the values of field Ids for each gpuId.
@return :
list of all the jsons for each gpuID.
'''
def CustomDataHandler(self,fvs):
for gpuId in list(fvs.keys()):
gpuFv = fvs[gpuId]
typeInstance = str(gpuId)
for fieldId in list(gpuFv.keys()):
if fieldId in self.m_dcgmIgnoreFields:
continue
self.m_jsonData = {
"GpuId": typeInstance,
"UUID": (gpuFv[dcgm_fields.DCGM_FI_DEV_UUID][-1]).value,
"FieldTag": self.m_fieldIdToInfo[fieldId].tag,
"FieldValues": json.dumps(gpuFv[fieldId], cls=FieldValueEncoder),
}
self.m_list.append(json.dumps(self.m_jsonData))
###########################################################################
'''
function to create json from the field value dictionary.
'''
def CreateJson(self,data=None):
self.Process()
return self.m_list
###########################################################################
###############################################################################
# Usage: #
# #
# obj = DcgmJson() #
# #
# obj.createJson() #
# #
# obj.shutdown() #
###############################################################################
| DCGM-master | testing/python3/dcgm_json.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import subprocess
import json
import dcgm_structs
import dcgm_agent
import dcgm_fields
import nvidia_smi_utils
def trimJsonText(text):
return text[text.find('{'):text.rfind('}') + 1]
logFile = "nvvs_diag.log"
NAME_FIELD = "name"
RESULTS_FIELD = "results"
WARNING_FIELD = "warnings"
STATUS_FIELD = "status"
INFO_FIELD = "info"
GPU_FIELD = "gpu_ids"
RUNTIME_ERROR_FIELD = "runtime_error"
DIAG_THROTTLE_WARNING = "Clocks are being throttled for"
DIAG_DBE_WARNING = "ecc_dbe_volatile_total"
DIAG_ECC_MODE_WARNING = "Skipping test because ECC is not enabled on this GPU"
DIAG_INFOROM_WARNING = "nvmlDeviceValidateInforom for nvml device"
DIAG_THERMAL_WARNING = "Thermal violations totaling "
DIAG_THROTTLE_SUGGEST = "A GPU's clocks are being throttled due to a cooling issue. Please make sure your GPUs are properly cooled."
DIAG_DBE_SUGGEST = "This GPU needs to be drained and reset to clear the non-recoverable double bit errors."
DIAG_ECC_MODE_SUGGEST = "Run nvidia-smi -i <gpu id> -e 1 and then reboot to enable."
DIAG_INFOROM_SUGGEST = "A GPU's inforom is corrupt. You should re-flash it with iromflash or replace the GPU. run nvidia-smi without arguments to see which GPU."
DIAG_THERMAL_SUGGEST = "A GPU has thermal violations happening. Please make sure your GPUs are properly cooled."
DIAG_VARY_SUGGEST = "Please check for transient conditions on this machine that can disrupt consistency from run to run"
errorTuples = [(dcgm_fields.DCGM_FI_DEV_CLOCK_THROTTLE_REASONS, DIAG_THROTTLE_WARNING, DIAG_THROTTLE_SUGGEST),
(dcgm_fields.DCGM_FI_DEV_ECC_DBE_VOL_TOTAL, DIAG_DBE_WARNING, DIAG_DBE_SUGGEST),
(dcgm_fields.DCGM_FI_DEV_ECC_CURRENT, DIAG_ECC_MODE_WARNING, DIAG_ECC_MODE_SUGGEST),
(dcgm_fields.DCGM_FI_DEV_INFOROM_CONFIG_VALID, DIAG_INFOROM_WARNING, DIAG_INFOROM_SUGGEST),
(dcgm_fields.DCGM_FI_DEV_THERMAL_VIOLATION, DIAG_THERMAL_WARNING, DIAG_THERMAL_SUGGEST)
]
################################################################################
class FailedTestInfo():
################################################################################
def __init__(self, testname, warning, gpuInfo=None):
self.m_warning = warning
self.m_testname = testname
self.m_info = ''
self.m_gpuField = gpuInfo
self.m_gpuId = None
self.m_isAnError = True
if gpuInfo:
self.m_gpuId = int(gpuInfo)
self.m_fieldId = None
self.m_suggestion = ''
self.m_evaluatedMsg = ''
for errorTuple in errorTuples:
if self.m_warning.find(errorTuple[1]) != -1:
# Matched, record field ID and suggestion
self.m_fieldId = errorTuple[0]
self.m_suggestion = errorTuple[2]
################################################################################
def SetInfo(self, info):
self.m_info = info
################################################################################
def GetFullError(self):
if self.m_evaluatedMsg:
return self.m_evaluatedMsg
if not self.m_warning:
full = "%s is reported as failed but has no warning message" % self.m_testname
else:
full = "%s failed: '%s'" % (self.m_testname, self.m_warning)
if self.m_info:
full += "\n%s" % self.m_info
if self.m_gpuField:
full += "\n for GPU(s) %s" % self.m_gpuField
return full
################################################################################
def GetFieldId(self):
return self.m_fieldId
################################################################################
def GetGpuId(self):
return self.m_gpuId
################################################################################
def GetWarning(self):
return self.m_warning
################################################################################
def GetTestname(self):
return self.m_testname
################################################################################
def SetFailureMessage(self, val, correct_val):
fieldName = dcgm_fields.DcgmFieldGetTagById(self.m_fieldId)
if fieldName is None:
fieldName = "Cannot find field id %d" % self.m_fieldId
if val is None:
# Our Nvidia-smi checker doesn't support this value yet
self.m_evaluatedMsg = "%s\nOur nvidia-smi checker doesn't support evaluating field %s yet." % \
(self.GetFullError(), fieldName)
elif val != correct_val:
self.m_evaluatedMsg = None
self.m_isAnError = False # nvidia-smi reports an error in this field, so this is not a DCGM mistake
if (self.m_fieldId):
self.m_evaluatedMsg = "%s\nnvidia-smi found a value of %s for field %s instead of %s" % \
(self.GetFullError(), str(val), fieldName, str(correct_val))
else:
self.m_evaluatedMsg = self.GetFullError()
else:
self.m_evaluatedMsg = "%s\nnvidia-smi found the correct value %s for field %s" %\
(self.GetFullError(), str(val), fieldName)
################################################################################
def IsAnError(self):
"""
Simply return the error status in m_isAnError.
"""
return self.m_isAnError
################################################################################
class DcgmiDiag:
################################################################################
def __init__(self, gpuIds=None, testNamesStr='', paramsStr='', verbose=True,
dcgmiPrefix='', runMode=0, configFile='', debugLevel=0, debugFile=''):
#gpuList is expected to be a string. Convert it if it was provided
self.gpuList = None
if gpuIds is not None:
if isinstance(gpuIds, str):
self.gpuList = gpuIds
else:
self.gpuList = ",".join(map(str,gpuIds))
self.testNamesStr = testNamesStr
self.paramsStr = paramsStr
self.verbose = verbose
self.dcgmiPrefix = dcgmiPrefix
self.runMode = runMode
self.configFile = configFile
self.debugLevel = debugLevel
self.debugFile = debugFile
################################################################################
def DebugLevelToString(self):
if self.debugLevel == 0:
return 'NONE'
elif self.debugLevel == 1:
return 'FATAL'
elif self.debugLevel == 2:
return 'ERROR'
elif self.debugLevel == 3:
return 'WARN'
elif self.debugLevel == 4:
return 'INFO'
elif self.debugLevel == 5:
return 'DEBUG'
else:
return 'VERB'
################################################################################
def BuildDcgmiCommand(self):
cmd = []
if self.dcgmiPrefix:
cmd.append("%s/dcgmi" % self.dcgmiPrefix)
else:
cmd.append("dcgmi")
cmd.append("diag")
if self.runMode == 0:
# Use the test names string if a run mode was not specified
cmd.append('-r')
if self.testNamesStr:
cmd.append(self.testNamesStr)
else:
# default to running level 3 tests
cmd.append('3')
else:
# If the runMode has been specified, then use that over the test names string
cmd.append('-r')
cmd.append(str(self.runMode))
if self.paramsStr:
cmd.append('-p')
cmd.append(self.paramsStr)
if self.debugFile:
cmd.append('--debugLogFile')
cmd.append(self.debugFile)
if self.debugLevel:
cmd.append('-d')
cmd.append(self.DebugLevelToString())
cmd.append('-j')
if self.verbose:
cmd.append('-v')
if self.configFile:
cmd.append('-c')
cmd.append(self.configFile)
if self.gpuList is not None:
cmd.append('-i')
cmd.append(self.gpuList)
return cmd
################################################################################
def AddGpuList(self, gpu_list):
self.gpuList = gpu_list
################################################################################
def FindFailedTests(self, jsondict, failed_list):
if not isinstance(jsondict, dict):
# Only inspect dictionaries
return
if RESULTS_FIELD in jsondict:
# We've found the test dictionary
testname = jsondict[NAME_FIELD]
for item in jsondict[RESULTS_FIELD]:
if item[STATUS_FIELD] == "Fail":
warn = ''
gpuInfo = ''
if WARNING_FIELD in item:
warn = item[WARNING_FIELD]
if GPU_FIELD in item:
gpuInfo = item[GPU_FIELD]
failed_test = FailedTestInfo(testname, warn, gpuInfo)
if INFO_FIELD in item:
failed_test.SetInfo(item[INFO_FIELD])
failed_list.append(failed_test)
elif RUNTIME_ERROR_FIELD in jsondict:
# Experienced a complete failure while trying to run the diagnostic. No need
# to parse for further errors because there will be no other json entries.
failInfo = FailedTestInfo('System_Failure', jsondict[RUNTIME_ERROR_FIELD])
failed_list.append(failInfo)
else:
for key in jsondict:
if isinstance(jsondict[key], list):
for item in jsondict[key]:
self.FindFailedTests(item, failed_list)
else:
self.FindFailedTests(jsondict[key], failed_list)
################################################################################
def IdentifyFailingTests(self, jsondict, nsc):
failed_list = []
self.FindFailedTests(jsondict, failed_list)
for failInfo in failed_list:
fieldId = failInfo.GetFieldId()
if fieldId:
val, correct_val = nsc.GetErrorValue(failInfo.GetGpuId(), fieldId)
failInfo.SetFailureMessage(val, correct_val)
return failed_list
################################################################################
def SetAndCheckOutput(self, stdout, stderr, ret=0, nsc=None):
self.lastStdout = stdout
self.lastStderr = stderr
self.diagRet = ret
if not nsc:
nsc = nvidia_smi_utils.NvidiaSmiJob()
return self.CheckOutput(nsc)
################################################################################
def CheckOutput(self, nsc):
failed_list = []
if self.lastStdout:
try:
jsondict = json.loads(trimJsonText(self.lastStdout))
except ValueError as e:
print(("Couldn't parse json from '%s'" % self.lastStdout))
return None, 1
failed_list = self.IdentifyFailingTests(jsondict, nsc)
# Saves diag stdout into a log file - use append to get multiple runs in
# the same file if we're called repeatedly.
with open(logFile, "a") as f:
f.seek(0)
f.write(str(self.lastStdout))
if self.lastStderr:
# Saves diag stderr into a log file - use append to get multiple runs in
# the same file if we're called repeatedly.
with open(logFile, "a") as f:
f.seek(0)
f.write(str(self.lastStderr))
return failed_list, self.diagRet
################################################################################
def __RunDcgmiDiag__(self, cmd):
self.lastCmd = cmd
self.lastStdout = ''
self.lastStderr = ''
nsc = nvidia_smi_utils.NvidiaSmiJob()
nsc.start()
runner = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
(stdout_buf, stderr_buf) = runner.communicate()
self.lastStdout = stdout_buf and stdout_buf.decode('utf-8')
self.lastStderr = stderr_buf and stderr_buf.decode('utf-8')
self.diagRet = runner.returncode
nsc.m_shutdownFlag.set()
nsc.join()
return self.CheckOutput(nsc)
################################################################################
def DidIFail(self):
if self.failed_list:
for failure in self.failed_list:
if failure.IsAnError():
return True
break
if self.diagRet != 0:
return True
return False
################################################################################
def RunDcgmiDiag(self, config_file, runMode=0):
oldConfig = self.configFile
oldRunMode = self.runMode
if config_file:
self.configFile = config_file
else:
self.configFile = ''
if runMode:
self.runMode = runMode
cmd = self.BuildDcgmiCommand()
self.failed_list, self.diagRet = self.__RunDcgmiDiag__(cmd)
self.configFile = oldConfig
self.runMode = oldRunMode
return self.DidIFail()
################################################################################
def RunAtLevel(self, runMode, configFile=None):
if runMode < 1 or runMode > 3:
return dcgm_structs.DCGM_ST_BADPARAM
return self.RunDcgmiDiag(configFile, runMode)
################################################################################
def Run(self):
cmd = self.BuildDcgmiCommand()
self.failed_list, self.diagRet = self.__RunDcgmiDiag__(cmd)
return self.DidIFail()
################################################################################
def SetConfigFile(self, config_file):
self.configFile = config_file
################################################################################
def SetRunMode(self, run_mode):
self.runMode = run_mode
################################################################################
def PrintFailures(self):
for failure in self.failed_list:
print(failure.GetFullError())
################################################################################
def PrintLastRunStatus(self):
print("Ran '%s' and got return code %d" % (self.lastCmd, self.diagRet))
print("stdout: \n\n%s" % self.lastStdout)
if self.lastStderr:
print("\nstderr: \n\n%s" % self.lastStderr)
else:
print("\nNo stderr output")
self.PrintFailures()
def main():
dd = DcgmiDiag()
failed = dd.Run()
dd.PrintLastRunStatus()
if __name__ == '__main__':
main()
| DCGM-master | testing/python3/DcgmiDiag.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
# Python bindings for "dcgm_structs.h"
##
from ctypes import *
from ctypes.util import find_library
import sys
import os
import threading
import string
import json
import dcgmvalue
import platform
from inspect import isclass
DCGM_MAX_STR_LENGTH = 256
DCGM_MAX_NUM_DEVICES = 32 # DCGM 2.0 and newer = 32. DCGM 1.8 and older = 16
DCGM_MAX_NUM_SWITCHES = 12
DCGM_NVLINK_MAX_LINKS_PER_GPU = 18
DCGM_NVLINK_MAX_LINKS_PER_GPU_LEGACY1 = 6
DCGM_NVLINK_MAX_LINKS_PER_GPU_LEGACY2 = 12
DCGM_NVLINK_MAX_LINKS_PER_NVSWITCH_V1 = 36 # Max NvLinks per NvSwitch pre-Hopper
DCGM_NVLINK_MAX_LINKS_PER_NVSWITCH = 64
DCGM_LANE_MAX_LANES_PER_NVSWICH_LINK = 4
DCGM_MAX_CLOCKS = 256
DCGM_MAX_NUM_GROUPS = 64
DCGM_MAX_BLOB_LENGTH = 4096
DCGM_MAX_VGPU_INSTANCES_PER_PGPU = 32
DCGM_VGPU_NAME_BUFFER_SIZE = 64
DCGM_GRID_LICENSE_BUFFER_SIZE = 128
DCGM_MAX_VGPU_TYPES_PER_PGPU = 32
DCGM_DEVICE_UUID_BUFFER_SIZE = 80
DCGM_MAX_FBC_SESSIONS = 256
#When more than one value is returned from a query, which order should it be returned in?
DCGM_ORDER_ASCENDING = 1
DCGM_ORDER_DESCENDING = 2
DCGM_OPERATION_MODE_AUTO = 1
DCGM_OPERATION_MODE_MANUAL = 2
DCGM_ENCODER_QUERY_H264 = 0
DCGM_ENCODER_QUERY_HEVC = 1
DCGM_FBC_SESSION_TYPE_UNKNOWN = 0 # Unknown
DCGM_FBC_SESSION_TYPE_TOSYS = 1 # FB capture for a system buffer
DCGM_FBC_SESSION_TYPE_CUDA = 2 # FB capture for a cuda buffer
DCGM_FBC_SESSION_TYPE_VID = 3 # FB capture for a Vid buffer
DCGM_FBC_SESSION_TYPE_HWENC = 4 # FB capture for a NVENC HW buffer
## C Type mappings ##
## Enums
# Return types
_dcgmReturn_t = c_uint
DCGM_ST_OK = 0 # Success
DCGM_ST_BADPARAM = -1 # A bad parameter was passed to a function
DCGM_ST_GENERIC_ERROR = -3 # A generic, unspecified error
DCGM_ST_MEMORY = -4 # An out of memory error occured
DCGM_ST_NOT_CONFIGURED = -5 # Setting not configured
DCGM_ST_NOT_SUPPORTED = -6 # Feature not supported
DCGM_ST_INIT_ERROR = -7 # DCGM Init error
DCGM_ST_NVML_ERROR = -8 # When NVML returns error.
DCGM_ST_PENDING = -9 # Object is in pending state of something else
DCGM_ST_UNINITIALIZED = -10 # Object is in undefined state
DCGM_ST_TIMEOUT = -11 # Requested operation timed out
DCGM_ST_VER_MISMATCH = -12 # Version mismatch between received and understood API
DCGM_ST_UNKNOWN_FIELD = -13 # Unknown field id
DCGM_ST_NO_DATA = -14 # No data is available
DCGM_ST_STALE_DATA = -15
DCGM_ST_NOT_WATCHED = -16 # The given field is not being updated by the cache manager
DCGM_ST_NO_PERMISSION = -17 # We are not permissioned to perform the desired action
DCGM_ST_GPU_IS_LOST = -18 # GPU is no longer reachable
DCGM_ST_RESET_REQUIRED = -19 # GPU requires a reset
DCGM_ST_FUNCTION_NOT_FOUND = -20 # Unable to find function
DCGM_ST_CONNECTION_NOT_VALID = -21 # Connection to the host engine is not valid any longer
DCGM_ST_GPU_NOT_SUPPORTED = -22 # This GPU is not supported by DCGM
DCGM_ST_GROUP_INCOMPATIBLE = -23 # The GPUs of the provided group are not compatible with each other for the requested operation
DCGM_ST_MAX_LIMIT = -24
DCGM_ST_LIBRARY_NOT_FOUND = -25 # DCGM library could not be found
DCGM_ST_DUPLICATE_KEY = -26 #Duplicate key passed to the function
DCGM_ST_GPU_IN_SYNC_BOOST_GROUP = -27 #GPU is already a part of a sync boost group
DCGM_ST_GPU_NOT_IN_SYNC_BOOST_GROUP = -28 #GPU is a not a part of sync boost group
DCGM_ST_REQUIRES_ROOT = -29 #This operation cannot be performed when the host engine is running as non-root
DCGM_ST_NVVS_ERROR = -30 #DCGM GPU Diagnostic was successfully executed, but reported an error.
DCGM_ST_INSUFFICIENT_SIZE = -31 #An input argument is not large enough
DCGM_ST_FIELD_UNSUPPORTED_BY_API = -32 #The given field ID is not supported by the API being called
DCGM_ST_MODULE_NOT_LOADED = -33 #This request is serviced by a module of DCGM that is not currently loaded
DCGM_ST_IN_USE = -34 #The requested operation could not be completed because the affected resource is in use
DCGM_ST_GROUP_IS_EMPTY = -35 # The specified group is empty and this operation is not valid with an empty group
DCGM_ST_PROFILING_NOT_SUPPORTED = -36 # Profiling is not supported for this group of GPUs or GPU
DCGM_ST_PROFILING_LIBRARY_ERROR = -37 # The third-party Profiling module returned an unrecoverable error
DCGM_ST_PROFILING_MULTI_PASS = -38 # The requested profiling metrics cannot be collected in a single pass
DCGM_ST_DIAG_ALREADY_RUNNING = -39 # A diag instance is already running, cannot run a new diag until the current one finishes.
DCGM_ST_DIAG_BAD_JSON = -40 # The DCGM GPU Diagnostic returned JSON that cannot be parsed
DCGM_ST_DIAG_BAD_LAUNCH = -41 # Error while launching the DCGM GPU Diagnostic
DCGM_ST_DIAG_UNUSED = -42 # Unused
DCGM_ST_DIAG_THRESHOLD_EXCEEDED = -43 # A field value met or exceeded the error threshold.
DCGM_ST_INSUFFICIENT_DRIVER_VERSION = -44 # The installed driver version is insufficient for this API
DCGM_ST_INSTANCE_NOT_FOUND = -45 # The specified GPU instance does not exist
DCGM_ST_COMPUTE_INSTANCE_NOT_FOUND = -46 # The specified GPU compute instance does not exist
DCGM_ST_CHILD_NOT_KILLED = -47 # Couldn't kill a child process within the retries
DCGM_ST_3RD_PARTY_LIBRARY_ERROR = -48 # Detected an error in a 3rd-party library
DCGM_ST_INSUFFICIENT_RESOURCES = -49 # Not enough resources available
DCGM_ST_PLUGIN_EXCEPTION = -50 # Exception thrown from a diagnostic plugin
DCGM_ST_NVVS_ISOLATE_ERROR = -51 # The diagnostic returned an error that indicates the need for isolation
DCGM_ST_NVVS_BINARY_NOT_FOUND = -52 # The NVVS binary was not found in the specified location
DCGM_ST_NVVS_KILLED = -53 # The NVVS process was killed by a signal
DCGM_ST_PAUSED = -54 # The hostengine and all modules are paused
DCGM_GROUP_DEFAULT = 0 # All the GPUs on the node are added to the group
DCGM_GROUP_EMPTY = 1 # Creates an empty group
DCGM_GROUP_DEFAULT_NVSWITCHES = 2 # All NvSwitches of the node are added to the group
DCGM_GROUP_DEFAULT_INSTANCES = 3 # All GPU instances of the node are added to the group
DCGM_GROUP_DEFAULT_COMPUTE_INSTANCES = 4 # All compute instances of the node are added to the group
DCGM_GROUP_DEFAULT_ENTITIES = 5 # All entities are added to this default group
DCGM_GROUP_ALL_GPUS = 0x7fffffff
DCGM_GROUP_ALL_NVSWITCHES = 0x7ffffffe
DCGM_GROUP_ALL_INSTANCES = 0x7ffffffd
DCGM_GROUP_ALL_COMPUTE_INSTANCES = 0x7ffffffc
DCGM_GROUP_ALL_ENTITIES = 0x7ffffffb
DCGM_GROUP_MAX_ENTITIES = 64 #Maximum number of entities per entity group
DCGM_CONFIG_TARGET_STATE = 0 # The target configuration values to be applied
DCGM_CONFIG_CURRENT_STATE = 1 # The current configuration state
DCGM_CONFIG_POWER_CAP_INDIVIDUAL = 0 # Represents the power cap to be applied for each member of the group
DCGM_CONFIG_POWER_BUDGET_GROUP = 1 # Represents the power budget for the entire group
DCGM_CONFIG_COMPUTEMODE_DEFAULT = 0 # Default compute mode -- multiple contexts per device
DCGM_CONFIG_COMPUTEMODE_PROHIBITED = 1 # Compute-prohibited mode -- no contexts per device
DCGM_CONFIG_COMPUTEMODE_EXCLUSIVE_PROCESS = 2 #* Compute-exclusive-process mode -- only one context per device, usable from multiple threads at a time
DCGM_TOPOLOGY_BOARD = 0x1
DCGM_TOPOLOGY_SINGLE = 0x2
DCGM_TOPOLOGY_MULTIPLE = 0x4
DCGM_TOPOLOGY_HOSTBRIDGE = 0x8
DCGM_TOPOLOGY_CPU = 0x10
DCGM_TOPOLOGY_SYSTEM = 0x20
DCGM_TOPOLOGY_NVLINK1 = 0x0100
DCGM_TOPOLOGY_NVLINK2 = 0x0200
DCGM_TOPOLOGY_NVLINK3 = 0x0400
DCGM_TOPOLOGY_NVLINK4 = 0x0800
DCGM_TOPOLOGY_NVLINK5 = 0x1000
DCGM_TOPOLOGY_NVLINK6 = 0x2000
DCGM_TOPOLOGY_NVLINK7 = 0x4000
DCGM_TOPOLOGY_NVLINK8 = 0x8000
DCGM_TOPOLOGY_NVLINK9 = 0x10000
DCGM_TOPOLOGY_NVLINK10 = 0x20000
DCGM_TOPOLOGY_NVLINK11 = 0x40000
DCGM_TOPOLOGY_NVLINK12 = 0x80000
# Diagnostic per gpu tests - fixed indices for dcgmDiagResponsePerGpu_t.results[]
DCGM_MEMORY_INDEX = 0
DCGM_DIAGNOSTIC_INDEX = 1
DCGM_PCI_INDEX = 2
DCGM_SM_STRESS_INDEX = 3
DCGM_TARGETED_STRESS_INDEX = 4
DCGM_TARGETED_POWER_INDEX = 5
DCGM_MEMORY_BANDWIDTH_INDEX = 6
DCGM_MEMTEST_INDEX = 7
DCGM_PULSE_TEST_INDEX = 8
DCGM_EUD_TEST_INDEX = 9
DCGM_UNUSED2_TEST_INDEX = 10
DCGM_UNUSED3_TEST_INDEX = 11
DCGM_UNUSED4_TEST_INDEX = 12
DCGM_UNUSED5_TEST_INDEX = 13
DCGM_PER_GPU_TEST_COUNT_V7 = 9
DCGM_PER_GPU_TEST_COUNT_V8 = 13
# DCGM Diag Level One test indices
DCGM_SWTEST_DENYLIST = 0
DCGM_SWTEST_NVML_LIBRARY = 1
DCGM_SWTEST_CUDA_MAIN_LIBRARY = 2
DCGM_SWTEST_CUDA_RUNTIME_LIBRARY = 3
DCGM_SWTEST_PERMISSIONS = 4
DCGM_SWTEST_PERSISTENCE_MODE = 5
DCGM_SWTEST_ENVIRONMENT = 6
DCGM_SWTEST_PAGE_RETIREMENT = 7
DCGM_SWTEST_GRAPHICS_PROCESSES = 8
DCGM_SWTEST_INFOROM = 9
# This test is only run by itself, so it can use the 0 slot
DCGM_CONTEXT_CREATE_INDEX = 0
class DCGM_INTROSPECT_STATE(object):
DISABLED = 0
ENABLED = 1
# Lib loading
dcgmLib = None
libLoadLock = threading.Lock()
_dcgmLib_refcount = 0 # Incremented on each dcgmInit and decremented on dcgmShutdown
class DCGMError(Exception):
""" Class to return error values for DCGM """
_valClassMapping = dict()
# List of currently known error codes
_error_code_to_string = {
DCGM_ST_OK: "Success",
DCGM_ST_BADPARAM: "Bad parameter passed to function",
DCGM_ST_GENERIC_ERROR: "Generic unspecified error",
DCGM_ST_MEMORY: "Out of memory error",
DCGM_ST_NOT_CONFIGURED: "Setting not configured",
DCGM_ST_NOT_SUPPORTED: "Feature not supported",
DCGM_ST_INIT_ERROR: "DCGM initialization error",
DCGM_ST_NVML_ERROR: "NVML error",
DCGM_ST_PENDING: "Object is in a pending state",
DCGM_ST_UNINITIALIZED: "Object is in an undefined state",
DCGM_ST_TIMEOUT: "Timeout",
DCGM_ST_VER_MISMATCH: "API version mismatch",
DCGM_ST_UNKNOWN_FIELD: "Unknown field",
DCGM_ST_NO_DATA: "No data is available",
DCGM_ST_STALE_DATA: "Data is considered stale",
DCGM_ST_NOT_WATCHED: "Field is not being updated",
DCGM_ST_NO_PERMISSION: "Not permissioned",
DCGM_ST_GPU_IS_LOST: "GPU is unreachable",
DCGM_ST_RESET_REQUIRED: "GPU requires a reset",
DCGM_ST_FUNCTION_NOT_FOUND: "Unable to find function",
DCGM_ST_CONNECTION_NOT_VALID: "The connection to the host engine is not valid any longer",
DCGM_ST_GPU_NOT_SUPPORTED: "This GPU is not supported by DCGM",
DCGM_ST_GROUP_INCOMPATIBLE: "GPUs are incompatible with each other for the requested operation",
DCGM_ST_MAX_LIMIT: "Max limit reached for the object",
DCGM_ST_LIBRARY_NOT_FOUND: "DCGM library could not be found",
DCGM_ST_DUPLICATE_KEY: "Duplicate key passed to function",
DCGM_ST_GPU_IN_SYNC_BOOST_GROUP: "GPU is already a part of a sync boost group",
DCGM_ST_GPU_NOT_IN_SYNC_BOOST_GROUP: "GPU is not a part of the sync boost group",
DCGM_ST_REQUIRES_ROOT: "This operation is not supported when the host engine is running as non root",
DCGM_ST_NVVS_ERROR: "DCGM GPU Diagnostic returned an error.",
DCGM_ST_INSUFFICIENT_SIZE: "An input argument is not large enough",
DCGM_ST_FIELD_UNSUPPORTED_BY_API: "The given field ID is not supported by the API being called",
DCGM_ST_MODULE_NOT_LOADED: "This request is serviced by a module of DCGM that is not currently loaded",
DCGM_ST_IN_USE: "The requested operation could not be completed because the affected resource is in use",
DCGM_ST_GROUP_IS_EMPTY: "The specified group is empty, and this operation is incompatible with an empty group",
DCGM_ST_PROFILING_NOT_SUPPORTED: "Profiling is not supported for this group of GPUs or GPU",
DCGM_ST_PROFILING_LIBRARY_ERROR: "The third-party Profiling module returned an unrecoverable error",
DCGM_ST_PROFILING_MULTI_PASS: "The requested profiling metrics cannot be collected in a single pass",
DCGM_ST_DIAG_ALREADY_RUNNING: "A diag instance is already running, cannot run a new diag until the current one finishes",
DCGM_ST_DIAG_BAD_JSON: "The GPU Diagnostic returned Json that cannot be parsed.",
DCGM_ST_DIAG_BAD_LAUNCH: "Error while launching the GPU Diagnostic.",
DCGM_ST_DIAG_UNUSED: "Unused error code",
DCGM_ST_DIAG_THRESHOLD_EXCEEDED: "A field value met or exceeded the error threshold.",
DCGM_ST_INSUFFICIENT_DRIVER_VERSION: "The installed driver version is insufficient for this API",
DCGM_ST_INSTANCE_NOT_FOUND: "The specified GPU instance does not exist",
DCGM_ST_COMPUTE_INSTANCE_NOT_FOUND: "The specified GPU compute instance does not exist",
DCGM_ST_CHILD_NOT_KILLED: "Couldn't kill a child process within the retries",
DCGM_ST_3RD_PARTY_LIBRARY_ERROR: "Detected an error in a 3rd-party library",
DCGM_ST_INSUFFICIENT_RESOURCES: "Not enough resources available",
DCGM_ST_PLUGIN_EXCEPTION: "Exception thrown from a diagnostic plugin",
DCGM_ST_NVVS_ISOLATE_ERROR: "The diagnostic returned an error that indicates the need for isolation",
}
def __new__(typ, value):
"""
Maps value to a proper subclass of DCGMError.
"""
if typ == DCGMError:
typ = DCGMError._valClassMapping.get(value, typ)
obj = Exception.__new__(typ)
obj.info = None
obj.value = value
return obj
def __str__(self):
msg = None
try:
if self.value not in DCGMError._error_code_to_string:
DCGMError._error_code_to_string[self.value] = str(_dcgmErrorString(self.value))
msg = DCGMError._error_code_to_string[self.value]
# Ensure we catch all exceptions, otherwise the error code will be hidden in a traceback
except BaseException:
msg = "DCGM Error with code %d" % self.value
if self.info is not None:
if msg[-1] == ".":
msg = msg[:-1]
msg += ": '%s'" % self.info
return msg
def __eq__(self, other):
return self.value == other.value
def __hash__(self):
return hash(self.value)
def SetAdditionalInfo(self, msg):
"""
Sets msg as additional information returned by the string representation of DCGMError and subclasses.
Example output for DCGMError_Uninitialized subclass, with msg set to 'more info msg here' is
"DCGMError_Uninitialized: Object is in an undefined state: 'more info msg here'".
Ensure that msg is a string or an object for which the __str__() method does not throw an error
"""
self.info = msg
def dcgmExceptionClass(error_code):
return DCGMError._valClassMapping.get(error_code)
def _extractDCGMErrorsAsClasses():
'''
Generates a hierarchy of classes on top of DCGMLError class.
Each DCGM Error gets a new DCGMError subclass. This way try,except blocks can filter appropriate
exceptions more easily.
DCGMError is a parent class. Each DCGM_ST_* gets it's own subclass.
e.g. DCGM_ST_UNINITIALIZED will be turned into DCGMError_Uninitialized
'''
this_module = sys.modules[__name__]
dcgmErrorsNames = [x for x in dir(this_module) if x.startswith("DCGM_ST_")]
for err_name in dcgmErrorsNames:
# e.g. Turn DCGM_ST_UNINITIALIZED into DCGMError_Uninitialized
class_name = "DCGMError_" + string.capwords(err_name.replace("DCGM_ST_", ""), "_").replace("_", "")
err_val = getattr(this_module, err_name)
def gen_new(val):
def new(typ):
# pylint: disable=E1121
obj = DCGMError.__new__(typ, val)
return obj
return new
new_error_class = type(class_name, (DCGMError,), {'__new__': gen_new(err_val)})
new_error_class.__module__ = __name__
setattr(this_module, class_name, new_error_class)
DCGMError._valClassMapping[err_val] = new_error_class
_extractDCGMErrorsAsClasses()
class struct_c_dcgmUnit_t(Structure):
# Unit structures
pass # opaque handle
_dcgmUnit_t = POINTER(struct_c_dcgmUnit_t)
class _WrappedStructure():
def __init__(self, obj):
self.__dict__["_obj"] = obj
def __getattr__(self, key):
value = getattr(self._obj, key)
if isinstance(value, bytes):
return value.decode('utf-8')
if isclass(value):
return _WrappedStructure(value)
return value
def __getitem__(self, key):
value = self._obj[key]
if isinstance(value, bytes):
return value.decode('utf-8')
if isclass(value):
return _WrappedStructure(value)
return value
def __setattr__(self, key, raw_value):
def find_field_type(fields, key):
field = (f[1] for f in fields if f[0] == key)
try:
return next(field)
except StopIteration:
return None
if (key == '_obj'):
raise RuntimeError("Cannot set _obj")
value = raw_value
fieldtype = find_field_type(self._obj._fields_, key)
if fieldtype == c_uint and not isinstance(value, c_uint32):
value = int(value)
elif fieldtype == c_int and not isinstance(value, c_int32):
value = int(value)
elif isinstance(raw_value, str):
value = raw_value.encode('utf-8')
self._obj[key] = value
return value
class _DcgmStructure(Structure):
def __getattribute__(self, key):
value = super().__getattribute__(key)
if isinstance(value, bytes):
return value.decode('utf-8')
if isclass(value):
return _WrappedStructure(value)
return value
def __setattr__(self, key, raw_value):
def find_field_type(fields, key):
field = (f[1] for f in fields if f[0] == key)
try:
return next(field)
except StopIteration:
return None
value = raw_value
fieldtype = find_field_type(self._fields_, key)
if fieldtype == c_uint and not isinstance(value, c_uint32):
value = int(value)
elif fieldtype == c_int and not isinstance(value, c_int32):
value = int(value)
elif isinstance(raw_value, str):
value = raw_value.encode('utf-8')
return super().__setattr__(key, value)
class DcgmUnion(Union):
def __getattribute__(self, key):
value = super().__getattribute__(key)
if isinstance(value, bytes):
return value.decode('utf-8')
if isclass(value):
return _WrappedStructure(value)
return value
def __setattr__(self, key, raw_value):
def find_field_type(fields, key):
field = (f[1] for f in fields if f[0] == key)
try:
return next(field)
except StopIteration:
return None
value = raw_value
fieldtype = find_field_type(self._fields_, key)
if fieldtype == c_uint and not isinstance(value, c_uint32):
value = int(value)
elif fieldtype == c_int and not isinstance(value, c_int32):
value = int(value)
elif isinstance(raw_value, str):
value = raw_value.encode('utf-8')
return super().__setattr__(key, value)
class _PrintableStructure(_DcgmStructure):
"""
Abstract class that produces nicer __str__ output than ctypes.Structure.
e.g. instead of:
>>> print str(obj)
<class_name object at 0x7fdf82fef9e0>
this class will print
class_name(field_name: formatted_value, field_name: formatted_value)
_fmt_ dictionary of <str _field_ name> -> <str format>
e.g. class that has _field_ 'hex_value', c_uint could be formatted with
_fmt_ = {"hex_value" : "%08X"}
to produce nicer output.
Default fomratting string for all fields can be set with key "<default>" like:
_fmt_ = {"<default>" : "%d MHz"} # e.g all values are numbers in MHz.
If not set it's assumed to be just "%s"
Exact format of returned str from this class is subject to change in the future.
"""
_fmt_ = {}
def __str__(self):
result = []
for x in self._fields_:
key = x[0]
value = getattr(self, key)
fmt = "%s"
if key in self._fmt_:
fmt = self._fmt_[key]
elif "<default>" in self._fmt_:
fmt = self._fmt_["<default>"]
result.append(("%s: " + fmt) % (key, value))
return self.__class__.__name__ + "(" + ", ".join(result) + ")"
def FieldsSizeof(self):
size = 0
for s,t in self._fields_:
size = size + sizeof(t)
return size
#JSON serializer for DCGM structures
class DcgmJSONEncoder(json.JSONEncoder):
def default(self, o): # pylint: disable=method-hidden
if isinstance(o, _PrintableStructure):
retVal = {}
for fieldName, fieldType in o._fields_:
subObj = getattr(o, fieldName)
if isinstance(subObj, _PrintableStructure):
subObj = self.default(subObj)
retVal[fieldName] = subObj
return retVal
elif isinstance(o, Array):
retVal = []
for i in range(len(o)):
subVal = {}
for fieldName, fieldType in o[i]._fields_:
subObj = getattr(o[i], fieldName)
if isinstance(subObj, _PrintableStructure):
subObj = self.default(subObj)
subVal[fieldName] = subObj
retVal.append(subVal)
return retVal
#Let the parent class handle this/fail
return json.JSONEncoder.default(self, o)
# Creates a unique version number for each struct
def make_dcgm_version(struct, ver):
return sizeof(struct) | (ver << 24)
# Function access ##
_dcgmGetFunctionPointer_cache = dict() # function pointers are cached to prevent unnecessary libLoadLock locking
def _dcgmGetFunctionPointer(name):
global dcgmLib
if name in _dcgmGetFunctionPointer_cache:
return _dcgmGetFunctionPointer_cache[name]
libLoadLock.acquire()
try:
# ensure library was loaded
if dcgmLib is None:
raise DCGMError(DCGM_ST_UNINITIALIZED)
try:
_dcgmGetFunctionPointer_cache[name] = getattr(dcgmLib, name)
return _dcgmGetFunctionPointer_cache[name]
except AttributeError:
raise DCGMError(DCGM_ST_FUNCTION_NOT_FOUND)
finally:
# lock is always freed
libLoadLock.release()
# C function wrappers ##
def _LoadDcgmLibrary(libDcgmPath=None):
"""
Load the library if it isn't loaded already
:param libDcgmPath: Optional path to the libdcgm*.so libraries. Will use system defaults if not specified.
:type libDcgmPath: str
:return: None
"""
global dcgmLib
if dcgmLib is None:
# lock to ensure only one caller loads the library
libLoadLock.acquire()
try:
# ensure the library still isn't loaded
if dcgmLib is None:
if sys.platform[:3] == "win":
# cdecl calling convention
# load nvml.dll from %ProgramFiles%/NVIDIA Corporation/NVSMI/nvml.dll
dcgmLib = CDLL(os.path.join(os.getenv("ProgramFiles", "C:/Program Files"), "NVIDIA Corporation/NVSMI/dcgm.dll"))
else:
libPaths = []
if libDcgmPath:
libPaths.append(os.path.join(libDcgmPath, "libdcgm.so.3"))
if 'LD_LIBRARY_PATH' in os.environ:
envLibPaths = os.environ['LD_LIBRARY_PATH'].split(':')
for envLibPath in envLibPaths:
libPaths.append('{}/libdcgm.so.3'.format(envLibPath))
libPaths.append('/usr/lib/{}-linux-gnu/libdcgm.so.3'.format(platform.machine()))
libPaths.append('/usr/lib64/libdcgm.so.3')
for lib_file in libPaths:
if os.path.isfile(lib_file):
try:
dcgmLib = CDLL(lib_file)
if dcgmLib:
break
except OSError as ose:
continue
if dcgmLib is None:
_dcgmCheckReturn(DCGM_ST_LIBRARY_NOT_FOUND)
except OSError as ose:
_dcgmCheckReturn(DCGM_ST_LIBRARY_NOT_FOUND)
finally:
# lock is always freed
libLoadLock.release()
def _dcgmInit(libDcgmPath=None):
_LoadDcgmLibrary(libDcgmPath)
# Atomically update refcount
global _dcgmLib_refcount
libLoadLock.acquire()
_dcgmLib_refcount += 1
libLoadLock.release()
return None
def _dcgmCheckReturn(ret):
if ret != DCGM_ST_OK:
raise DCGMError(ret)
return ret
def _dcgmShutdown():
# Leave the library loaded, but shutdown the interface
fn = _dcgmGetFunctionPointer("dcgmShutdown")
ret = fn()
_dcgmCheckReturn(ret)
# Atomically update refcount
global _dcgmLib_refcount
libLoadLock.acquire()
if 0 < _dcgmLib_refcount:
_dcgmLib_refcount -= 1
libLoadLock.release()
return None
def _dcgmErrorString(result):
fn = _dcgmGetFunctionPointer("dcgmErrorString")
fn.restype = c_char_p # otherwise return is an int
str = fn(result)
return str
# Represents a link object. type should be one of DCGM_FE_GPU or
# DCGM_FE_SWITCH. gpuId or switchID the associated gpu or switch;
#
class c_dcgm_link_t(_PrintableStructure):
_fields = [
('type', c_uint8),
('index', c_uint8),
('id', c_uint16)
]
class c_dcgmConnectV2Params_v1(_PrintableStructure):
_fields_ = [
('version', c_uint),
('persistAfterDisconnect', c_uint)
]
c_dcgmConnectV2Params_version1 = make_dcgm_version(c_dcgmConnectV2Params_v1, 1)
class c_dcgmConnectV2Params_v2(_PrintableStructure):
_fields_ = [
('version', c_uint),
('persistAfterDisconnect', c_uint),
('timeoutMs', c_uint),
('addressIsUnixSocket', c_uint)
]
c_dcgmConnectV2Params_version2 = make_dcgm_version(c_dcgmConnectV2Params_v2, 2)
c_dcgmConnectV2Params_version = c_dcgmConnectV2Params_version2
class c_dcgmHostengineHealth_v1(_PrintableStructure):
_fields_ = [
('version', c_uint),
('overallHealth', c_uint),
]
dcgmHostengineHealth_version1 = make_dcgm_version(c_dcgmHostengineHealth_v1, 1)
dcgmHostengineHealth_version = dcgmHostengineHealth_version1
#Represents memory and proc clocks for a device
class c_dcgmClockSet_v1(_PrintableStructure):
_fields_ = [
('version', c_uint),
('memClock', c_uint), #/* Memory Clock */
('smClock',c_uint) #/* SM Clock */
]
# Represents a entityGroupId + entityId pair to uniquely identify a given entityId inside
# a group of entities
# Added in DCGM 1.5.0
class c_dcgmGroupEntityPair_t(_PrintableStructure):
_fields_ = [
('entityGroupId', c_uint32), #Entity Group ID entity belongs to
('entityId', c_uint32) #Entity ID of the entity
]
def __eq__(self, other):
return (self.entityGroupId == other.entityGroupId) and (self.entityId == other.entityId)
# /**
# * Structure to store information for DCGM group (v2)
# * Added in DCGM 1.5.0
# */
class c_dcgmGroupInfo_v2(_PrintableStructure):
_fields_ = [
('version', c_uint),
('count', c_uint),
('groupName', c_char * DCGM_MAX_STR_LENGTH),
('entityList', c_dcgmGroupEntityPair_t * DCGM_GROUP_MAX_ENTITIES)
]
c_dcgmGroupInfo_version2 = make_dcgm_version(c_dcgmGroupInfo_v2, 2)
DcgmiMigProfileNone = 0 # No profile (for GPUs)
DcgmMigProfileGpuInstanceSlice1 = 1 # GPU instance slice 1
DcgmMigProfileGpuInstanceSlice2 = 2 # GPU instance slice 2
DcgmMigProfileGpuInstanceSlice3 = 3 # GPU instance slice 3
DcgmMigProfileGpuInstanceSlice4 = 4 # GPU instance slice 4
DcgmMigProfileGpuInstanceSlice7 = 5 # GPU instance slice 7
DcgmMigProfileGpuInstanceSlice8 = 6 # GPU instance slice 8
DcgmMigProfileGpuInstanceSlice6 = 7 # GPU instance slice 6
DcgmMigProfileGpuInstanceSlice1Rev1 = 8 # GPU instance slice 1 revision 1
DcgmMigProfileGpuInstanceSlice2Rev1 = 9 # GPU instance slice 2 revision 1
DcgmMigProfileGpuInstanceSlice1Rev2 = 10 # GPU instance slice 1 revision 2
DcgmMigProfileComputeInstanceSlice1 = 30 # compute instance slice 1
DcgmMigProfileComputeInstanceSlice2 = 31 # compute instance slice 2
DcgmMigProfileComputeInstanceSlice3 = 32 # compute instance slice 3
DcgmMigProfileComputeInstanceSlice4 = 33 # compute instance slice 4
DcgmMigProfileComputeInstanceSlice7 = 34 # compute instance slice 7
DcgmMigProfileComputeInstanceSlice8 = 35 # compute instance slice 8
DcgmMigProfileComputeInstanceSlice6 = 36 # compute instance slice 6
DcgmMigProfileComputeInstanceSlice1Rev1 = 37 # compute instance slice 1 revision 1
# /**
# * Represents a pair of entity pairings to uniquely identify an entity and its place in the hierarchy.
# */
class c_dcgmMigHierarchyInfo_t(_PrintableStructure):
_fields_ = [
('entity', c_dcgmGroupEntityPair_t),
('parent', c_dcgmGroupEntityPair_t),
('sliceProfile', c_uint),
]
class c_dcgmMigEntityInfo_t(_PrintableStructure):
_fields_ = [
('gpuUuid', c_char * 128), # GPU UUID
('nvmlGpuIndex', c_uint), # GPU index from NVML
('nvmlInstanceId', c_uint), # GPU instance index within GPU
('nvmlComputeInstanceId', c_uint), # GPU Compute instance index within GPU instance
('nvmlMigProfileId', c_uint), # Unique profile ID for GPU or Compute instances
('nvmlProfileSlices', c_uint), # Number of slices in the MIG profile
]
class c_dcgmMigHierarchyInfo_v2(_PrintableStructure):
_fields_ = [
('entity', c_dcgmGroupEntityPair_t),
('parent', c_dcgmGroupEntityPair_t),
('info', c_dcgmMigEntityInfo_t),
]
DCGM_MAX_INSTANCES_PER_GPU = 8
# There can never be more compute instances per GPU than instances per GPU because a compute instance
# is part of an instance
DCGM_MAX_COMPUTE_INSTANCES_PER_GPU = DCGM_MAX_INSTANCES_PER_GPU
# Currently, there cannot be more than 14 instances + compute instances. There are always 7 compute instances
# and never more than 7 instances
DCGM_MAX_TOTAL_INSTANCES = 14
DCGM_MAX_HIERARCHY_INFO = DCGM_MAX_NUM_DEVICES * DCGM_MAX_TOTAL_INSTANCES
DCGM_MAX_INSTANCES = DCGM_MAX_NUM_DEVICES * DCGM_MAX_INSTANCES_PER_GPU
# The maximum compute instances are always the same as the maximum instances because each compute instances
# is part of an instance
DCGM_MAX_COMPUTE_INSTANCES = DCGM_MAX_INSTANCES
DCGM_MIG_RECONFIG_DELAY_PROCESSING = 0x1 # Ask the hostengine to wait to process reconfiguring the GPUs
class c_dcgmMigHierarchy_v2(_PrintableStructure):
_fields_ = [
('version', c_uint),
('count', c_uint),
('entityList', c_dcgmMigHierarchyInfo_v2 * DCGM_MAX_HIERARCHY_INFO)
]
c_dcgmMigHierarchy_version2 = make_dcgm_version(c_dcgmMigHierarchy_v2, 2)
class c_dcgmDeleteMigEntity_v1(_PrintableStructure):
_fields_ = [
('version', c_uint),
('entityGroupId', c_uint32),
('entityId', c_uint32),
('flags', c_uint),
]
c_dcgmDeleteMigEntity_version1 = make_dcgm_version(c_dcgmDeleteMigEntity_v1, 1)
# /**
# * Enum values for the kinds of MIG creations
# */
DcgmMigCreateGpuInstance = 0 # Create a GPU instance
DcgmMigCreateComputeInstance = 1 # Create a compute instance
class c_dcgmCreateMigEntity_v1(_PrintableStructure):
_fields_ = [
('version', c_uint),
('parentId', c_uint32),
('profile', c_uint32),
('createOption', c_uint32),
('flags', c_uint),
]
c_dcgmCreateMigEntity_version1 = make_dcgm_version(c_dcgmCreateMigEntity_v1, 1)
# /**
# * Structure to represent error attributes
# */
class c_dcgmErrorInfo_v1(_PrintableStructure):
_fields_ = [
('gpuId', c_uint),
('fieldId', c_ushort),
('status', c_int)
]
# /**
# * Represents list of supported clocks for a device
# */
class c_dcgmDeviceSupportedClockSets_v1(_PrintableStructure):
_fields_ = [
('version', c_uint),
('count', c_uint),
('clockSet', c_dcgmClockSet_v1 * DCGM_MAX_CLOCKS)
]
# /**
# * Represents accounting information for a device and pid
# */
class c_dcgmDevicePidAccountingStats_v1(_PrintableStructure):
_fields_ = [
('version', c_uint32),
('pid', c_uint32),
('gpuUtilization', c_uint32),
('memoryUtilization', c_uint32),
('maxMemoryUsage', c_uint64),
('startTimestamp', c_uint64),
('activeTimeUsec', c_uint64)
]
# /**
# * Represents thermal information
# */
class c_dcgmDeviceThermals_v1(_PrintableStructure):
_fields_ = [
('version', c_uint),
('slowdownTemp', c_uint),
('shutdownTemp', c_uint)
]
# /**
# * Represents various power limits
# */
class c_dcgmDevicePowerLimits_v1(_PrintableStructure):
_fields_ = [
('version', c_uint),
('curPowerLimit', c_uint),
('defaultPowerLimit', c_uint),
('enforcedPowerLimit', c_uint),
('minPowerLimit', c_uint),
('maxPowerLimit', c_uint)
]
# /**
# * Represents device identifiers
# */
class c_dcgmDeviceIdentifiers_v1(_PrintableStructure):
_fields_ = [
('version', c_uint),
('brandName', c_char * DCGM_MAX_STR_LENGTH),
('deviceName', c_char * DCGM_MAX_STR_LENGTH),
('pciBusId', c_char * DCGM_MAX_STR_LENGTH),
('serial', c_char * DCGM_MAX_STR_LENGTH),
('uuid', c_char * DCGM_MAX_STR_LENGTH),
('vbios', c_char * DCGM_MAX_STR_LENGTH),
('inforomImageVersion', c_char * DCGM_MAX_STR_LENGTH),
('pciDeviceId', c_uint32),
('pciSubSystemId', c_uint32),
('driverVersion', c_char * DCGM_MAX_STR_LENGTH),
('virtualizationMode', c_uint32)
]
# /**
# * Represents memory utilization
# */
class c_dcgmDeviceMemoryUsage_v1(_PrintableStructure):
_fields_ = [
('version', c_uint),
('bar1Total', c_uint),
('fbTotal', c_uint),
('fbUsed', c_uint),
('fbFree', c_uint)
]
# /**
# * Represents utilization values of vGPUs running on the device
# */
class c_dcgmDeviceVgpuUtilInfo_v1(_PrintableStructure):
_fields_ = [
('version', c_uint),
('vgpuId', c_uint),
('smUtil', c_uint),
('memUtil', c_uint),
('encUtil', c_uint),
('decUtil', c_uint)
]
# /**
# * Utilization values for processes running within vGPU VMs using the device
# */
class c_dcgmDeviceVgpuProcessUtilInfo_v1(_PrintableStructure):
_fields_ = [
('version', c_uint),
('vgpuId', c_uint),
('pid', c_uint),
('processName', c_char * DCGM_VGPU_NAME_BUFFER_SIZE),
('smUtil', c_uint),
('memUtil', c_uint),
('encUtil', c_uint),
('decUtil', c_uint)
]
# /**
# * Represents current encoder statistics for the given device/vGPU instance
# */
class c_dcgmDeviceEncStats_v1(_PrintableStructure):
_fields_ = [
('version', c_uint),
('sessionCount', c_uint),
('averageFps', c_uint),
('averageLatency', c_uint)
]
# /**
# * Represents information about active encoder sessions on the given vGPU instance
# */
class c_dcgmDeviceVgpuEncSessions_v1(_PrintableStructure):
_fields_ = [
('version', c_uint),
('vgpuId', c_uint),
('sessionId', c_uint),
('pid', c_uint),
('codecType', c_uint),
('hResolution', c_uint),
('vResolution', c_uint),
('averageFps', c_uint),
('averageLatency', c_uint)
]
# /**
# * Represents current frame buffer capture sessions statistics for the given device/vGPU instance
# */
class c_dcgmDeviceFbcStats_v1(_PrintableStructure):
_fields_ = [
('version', c_uint),
('sessionCount', c_uint),
('averageFps', c_uint),
('averageLatency', c_uint)
]
# /**
# * Represents information about active FBC session on the given device/vGPU instance
# */
class c_dcgmDeviceFbcSessionInfo_t(_PrintableStructure):
_fields_ = [
('version', c_uint),
('sessionId', c_uint),
('pid', c_uint),
('vgpuId', c_uint),
('displayOrdinal', c_uint),
('sessionType', c_uint),
('sessionFlags', c_uint),
('hMaxResolution', c_uint),
('vMaxResolution', c_uint),
('hResolution', c_uint),
('vResolution', c_uint),
('averageFps', c_uint),
('averageLatency', c_uint)
]
# /**
# * Represents all the active FBC sessions on the given device/vGPU instance
# */
class c_dcgmDeviceFbcSessions_v1(_PrintableStructure):
_fields_ = [
('version', c_uint),
('sessionCount', c_uint),
('sessionInfo', c_dcgmDeviceFbcSessionInfo_t * DCGM_MAX_FBC_SESSIONS)
]
# /**
# * Represents static info related to vGPU types supported on the device
# */
class c_dcgmDeviceVgpuTypeInfo_v1(_PrintableStructure):
_fields_ = [
('version', c_uint),
('vgpuTypeId', c_uint),
('vgpuTypeName', c_char * DCGM_VGPU_NAME_BUFFER_SIZE),
('vgpuTypeClass', c_char * DCGM_VGPU_NAME_BUFFER_SIZE),
('vgpuTypeLicense', c_char * DCGM_GRID_LICENSE_BUFFER_SIZE),
('deviceId', c_uint),
('subsystemId', c_uint),
('numDisplayHeads', c_uint),
('maxInstances', c_uint),
('frameRateLimit', c_uint),
('maxResolutionX', c_uint),
('maxResolutionY', c_uint),
('fbTotal', c_uint)
]
class c_dcgmDeviceVgpuTypeInfo_v2(_PrintableStructure):
_fields_ = [
('version', c_uint),
('vgpuTypeId', c_uint),
('vgpuTypeName', c_char * DCGM_VGPU_NAME_BUFFER_SIZE),
('vgpuTypeClass', c_char * DCGM_VGPU_NAME_BUFFER_SIZE),
('vgpuTypeLicense', c_char * DCGM_GRID_LICENSE_BUFFER_SIZE),
('deviceId', c_uint),
('subsystemId', c_uint),
('numDisplayHeads', c_uint),
('maxInstances', c_uint),
('frameRateLimit', c_uint),
('maxResolutionX', c_uint),
('maxResolutionY', c_uint),
('fbTotal', c_uint),
('gpuInstanceProfileId', c_uint)
]
dcgmDeviceVgpuTypeInfo_version2 = make_dcgm_version(c_dcgmDeviceVgpuTypeInfo_v2, 2)
class c_dcgmDeviceSettings_v2(_PrintableStructure):
_fields_ = [
('version', c_uint),
('persistenceModeEnabled', c_uint),
('migModeEnabled', c_uint),
('confidentialComputeMode', c_uint),
]
# /**
# * Represents attributes corresponding to a device
# */
class c_dcgmDeviceAttributes_deprecated_v1(_PrintableStructure):
_fields_ = [
('version', c_uint),
('clockSets', c_dcgmDeviceSupportedClockSets_v1),
('thermalSettings', c_dcgmDeviceThermals_v1),
('powerLimits', c_dcgmDevicePowerLimits_v1),
('identifiers', c_dcgmDeviceIdentifiers_v1),
('memoryUsage', c_dcgmDeviceMemoryUsage_v1),
('unused', c_char * 208)
]
dcgmDeviceAttributes_deprecated_version1 = make_dcgm_version(c_dcgmDeviceAttributes_deprecated_v1, 1)
# /**
# * Represents attributes corresponding to a device
# */
class c_dcgmDeviceAttributes_v3(_PrintableStructure):
_fields_ = [
('version', c_uint),
('clockSets', c_dcgmDeviceSupportedClockSets_v1),
('thermalSettings', c_dcgmDeviceThermals_v1),
('powerLimits', c_dcgmDevicePowerLimits_v1),
('identifiers', c_dcgmDeviceIdentifiers_v1),
('memoryUsage', c_dcgmDeviceMemoryUsage_v1),
('settings', c_dcgmDeviceSettings_v2),
]
dcgmDeviceAttributes_version3 = make_dcgm_version(c_dcgmDeviceAttributes_v3, 3)
# /**
# * Represents attributes info for a MIG device
# */
class c_dcgmDeviceMigAttributesInfo_v1(_PrintableStructure):
_fields_ = [
('version', c_uint),
('gpuInstanceId', c_uint),
('computeInstanceId', c_uint),
('multiprocessorCount', c_uint),
('sharedCopyEngineCount', c_uint),
('sharedDecoderCount', c_uint),
('sharedEncoderCount', c_uint),
('sharedJpegCount', c_uint),
('sharedOfaCount', c_uint),
('gpuInstanceSliceCount', c_uint),
('computeInstanceSliceCount', c_uint),
('memorySizeMB', c_uint64),
]
dcgmDeviceMigAttributesInfo_version1 = make_dcgm_version(c_dcgmDeviceMigAttributesInfo_v1, 1)
# /**
# * Represents attributes for a MIG device
# */
class c_dcgmDeviceMigAttributes_v1(_PrintableStructure):
_fields_ = [
('version', c_uint),
('migDevicesCount', c_uint),
('migAttributesInfo', c_dcgmDeviceMigAttributesInfo_v1),
]
dcgmDeviceMigAttributes_version1 = make_dcgm_version(c_dcgmDeviceMigAttributes_v1, 1)
# /**
# * Represents GPU instance profile information
# */
class c_dcgmGpuInstanceProfileInfo_v1(_PrintableStructure):
_fields_ = [
('version', c_uint),
('id', c_uint),
('isP2pSupported', c_uint),
('sliceCount', c_uint),
('instanceCount', c_uint),
('multiprocessorCount', c_uint),
('copyEngineCount', c_uint),
('decoderCount', c_uint),
('encoderCount', c_uint),
('jpegCount', c_uint),
('ofaCount', c_uint),
('memorySizeMB', c_uint64),
]
dcgmGpuInstanceProfileInfo_version1 = make_dcgm_version(c_dcgmGpuInstanceProfileInfo_v1, 1)
# /**
# * Represents GPU instance profiles
# */
class c_dcgmGpuInstanceProfiles_v1(_PrintableStructure):
_fields_ = [
('version', c_uint),
('profileCount', c_uint),
('profileInfo', c_dcgmGpuInstanceProfileInfo_v1),
]
dcgmGpuInstanceProfiles_version1 = make_dcgm_version(c_dcgmGpuInstanceProfiles_v1, 1)
# /**
# * Represents Compute instance profile information
# */
class c_dcgmComputeInstanceProfileInfo_v1(_PrintableStructure):
_fields_ = [
('version', c_uint),
('gpuInstanceId', c_uint),
('id', c_uint),
('sliceCount', c_uint),
('instanceCount', c_uint),
('multiprocessorCount', c_uint),
('sharedCopyEngineCount', c_uint),
('sharedDecoderCount', c_uint),
('sharedEncoderCount', c_uint),
('sharedJpegCount', c_uint),
('sharedOfaCount', c_uint),
]
dcgmComputeInstanceProfileInfo_version1 = make_dcgm_version(c_dcgmComputeInstanceProfileInfo_v1, 1)
# /**
# * Represents Compute instance profiles
# */
class c_dcgmComputeInstanceProfiles_v1(_PrintableStructure):
_fields_ = [
('version', c_uint),
('profileCount', c_uint),
('profileInfo', c_dcgmComputeInstanceProfileInfo_v1),
]
dcgmComputeInstanceProfiles_version1 = make_dcgm_version(c_dcgmComputeInstanceProfiles_v1, 1)
# /**
# * Represents vGPU attributes corresponding to a device
# */
class c_dcgmVgpuDeviceAttributes_v6(_PrintableStructure):
_fields_ = [
('version', c_uint),
('activeVgpuInstanceCount', c_uint),
('activeVgpuInstanceIds', c_uint * DCGM_MAX_VGPU_INSTANCES_PER_PGPU),
('creatableVgpuTypeCount', c_uint),
('creatableVgpuTypeIds', c_uint * DCGM_MAX_VGPU_TYPES_PER_PGPU),
('supportedVgpuTypeCount', c_uint),
('supportedVgpuTypeInfo', c_dcgmDeviceVgpuTypeInfo_v1 * DCGM_MAX_VGPU_TYPES_PER_PGPU),
('vgpuUtilInfo', c_dcgmDeviceVgpuUtilInfo_v1 * DCGM_MAX_VGPU_TYPES_PER_PGPU),
('gpuUtil', c_uint),
('memCopyUtil', c_uint),
('encUtil', c_uint),
('decUtil', c_uint)
]
dcgmVgpuDeviceAttributes_version6 = make_dcgm_version(c_dcgmVgpuDeviceAttributes_v6, 1)
class c_dcgmVgpuDeviceAttributes_v7(_PrintableStructure):
_fields_ = [
('version', c_uint),
('activeVgpuInstanceCount', c_uint),
('activeVgpuInstanceIds', c_uint * DCGM_MAX_VGPU_INSTANCES_PER_PGPU),
('creatableVgpuTypeCount', c_uint),
('creatableVgpuTypeIds', c_uint * DCGM_MAX_VGPU_TYPES_PER_PGPU),
('supportedVgpuTypeCount', c_uint),
('supportedVgpuTypeInfo', c_dcgmDeviceVgpuTypeInfo_v2 * DCGM_MAX_VGPU_TYPES_PER_PGPU),
('vgpuUtilInfo', c_dcgmDeviceVgpuUtilInfo_v1 * DCGM_MAX_VGPU_TYPES_PER_PGPU),
('gpuUtil', c_uint),
('memCopyUtil', c_uint),
('encUtil', c_uint),
('decUtil', c_uint)
]
dcgmVgpuDeviceAttributes_version7 = make_dcgm_version(c_dcgmVgpuDeviceAttributes_v7, 7)
# /**
# * Represents attributes specific to vGPU instance
# */
class c_dcgmVgpuInstanceAttributes_v1(_PrintableStructure):
_fields_ = [
('version', c_uint),
('vmId', c_char * DCGM_DEVICE_UUID_BUFFER_SIZE),
('vmName', c_char * DCGM_DEVICE_UUID_BUFFER_SIZE),
('vgpuTypeId', c_uint),
('vgpuUuid', c_char * DCGM_DEVICE_UUID_BUFFER_SIZE),
('vgpuDriverVersion', c_char * DCGM_DEVICE_UUID_BUFFER_SIZE),
('fbUsage', c_uint),
('licenseStatus', c_uint),
('frameRateLimit', c_uint)
]
dcgmVgpuInstanceAttributes_version1 = make_dcgm_version(c_dcgmVgpuInstanceAttributes_v1, 1)
class c_dcgmConfigPowerLimit(_PrintableStructure):
_fields_ = [
('type', c_uint),
('val', c_uint)
]
class c_dcgmConfigPerfStateSettings_t(_PrintableStructure):
_fields_ = [
('syncBoost', c_uint),
('targetClocks', c_dcgmClockSet_v1),
]
# Structure to represent default configuration for a device
class c_dcgmDeviceConfig_v1(_PrintableStructure):
_fields_ = [
# version must always be first
('version', c_uint),
('gpuId', c_uint),
('mEccMode', c_uint),
('mComputeMode', c_uint),
('mPerfState', c_dcgmConfigPerfStateSettings_t),
('mPowerLimit', c_dcgmConfigPowerLimit)
]
dcgmDeviceConfig_version1 = make_dcgm_version(c_dcgmDeviceConfig_v1, 1)
# Structure to represent default vGPU configuration for a device
class c_dcgmDeviceVgpuConfig_v1(_PrintableStructure):
_fields_ = [
# version must always be first
('version', c_uint),
('gpuId', c_uint),
('mEccMode', c_uint),
('mComputeMode', c_uint),
('mPerfState', c_dcgmConfigPerfStateSettings_t),
('mPowerLimit', c_dcgmConfigPowerLimit)
]
def SetBlank(self):
#Does not set version or gpuId
self.mEccMode = dcgmvalue.DCGM_INT32_BLANK
self.mPerfState.syncBoost = dcgmvalue.DCGM_INT32_BLANK
self.mPerfState.targetClocks.memClock = dcgmvalue.DCGM_INT32_BLANK
self.mPerfState.targetClocks.smClock = dcgmvalue.DCGM_INT32_BLANK
self.mComputeMode = dcgmvalue.DCGM_INT32_BLANK
self.mPowerLimit.type = DCGM_CONFIG_POWER_CAP_INDIVIDUAL
self.mPowerLimit.val = dcgmvalue.DCGM_INT32_BLANK
dcgmDeviceVgpuConfig_version1 = make_dcgm_version(c_dcgmDeviceVgpuConfig_v1, 1)
# Structure to receive update on the list of metrics.
class c_dcgmPolicyUpdate_v1(_PrintableStructure):
_fields_ = [
# version must always be first
('version', c_uint),
('power', c_uint)
]
dcgmPolicyUpdate_version1 = make_dcgm_version(c_dcgmPolicyUpdate_v1, 1)
# Represents a Callback to receive power updates from the host engine
_dcgmRecvUpdates_t = c_void_p
# Define the structure that contains specific policy information
class c_dcgmPolicyViolation_v1(_PrintableStructure):
_fields_ = [
# version must always be first
('version', c_uint),
('notifyOnEccDbe', c_uint),
('notifyOnPciEvent', c_uint),
('notifyOnMaxRetiredPages', c_uint)
]
dcgmPolicyViolation_version1 = make_dcgm_version(c_dcgmPolicyViolation_v1, 1)
class c_dcgmWatchFieldValue_v1(_PrintableStructure):
_fields_ = []
dcgmWatchFieldValue_version1 = make_dcgm_version(c_dcgmWatchFieldValue_v1, 1)
class c_dcgmUnwatchFieldValue_v1(_PrintableStructure):
_fields_ = []
dcgmUnwatchFieldValue_version1 = make_dcgm_version(c_dcgmUnwatchFieldValue_v1, 1)
class c_dcgmUpdateAllFields_v1(_PrintableStructure):
_fields_ = []
dcgmUpdateAllFields_version1 = make_dcgm_version(c_dcgmUpdateAllFields_v1, 1)
dcgmGetMultipleValuesForFieldResponse_version1 = 1
# policy enums (and table indices)
DCGM_POLICY_COND_IDX_DBE = 0
DCGM_POLICY_COND_IDX_PCI = 1
DCGM_POLICY_COND_IDX_MAX_PAGES_RETIRED = 2
DCGM_POLICY_COND_IDX_THERMAL = 3
DCGM_POLICY_COND_IDX_POWER = 4
DCGM_POLICY_COND_IDX_NVLINK = 5
DCGM_POLICY_COND_IDX_XID = 6
DCGM_POLICY_COND_IDX_MAX = 7
# policy enum bitmasks
DCGM_POLICY_COND_DBE = 0x1
DCGM_POLICY_COND_PCI = 0x2
DCGM_POLICY_COND_MAX_PAGES_RETIRED = 0x4
DCGM_POLICY_COND_THERMAL = 0x8
DCGM_POLICY_COND_POWER = 0x10
DCGM_POLICY_COND_NVLINK = 0x20
DCGM_POLICY_COND_XID = 0x40
DCGM_POLICY_COND_MAX = 7
DCGM_POLICY_MODE_AUTOMATED = 0
DCGM_POLICY_MODE_MANUAL = 1
DCGM_POLICY_ISOLATION_NONE = 0
DCGM_POLICY_ACTION_NONE = 0
DCGM_POLICY_ACTION_GPURESET = 1 #Deprecated
DCGM_POLICY_VALID_NONE = 0
DCGM_POLICY_VALID_SV_SHORT = 1
DCGM_POLICY_VALID_SV_MED = 2
DCGM_POLICY_VALID_SV_LONG = 3
DCGM_POLICY_VALID_SV_XLONG = 4
DCGM_POLICY_FAILURE_NONE = 0
DCGM_DIAG_LVL_INVALID = 0
DCGM_DIAG_LVL_SHORT = 10
DCGM_DIAG_LVL_MED = 20
DCGM_DIAG_LVL_LONG = 30
DCGM_DIAG_LVL_XLONG = 40
DCGM_DIAG_RESULT_PASS = 0
DCGM_DIAG_RESULT_SKIP = 1
DCGM_DIAG_RESULT_WARN = 2
DCGM_DIAG_RESULT_FAIL = 3
DCGM_DIAG_RESULT_NOT_RUN = 4
class c_dcgmPolicyConditionParmTypes_t(DcgmUnion):
_fields_ = [
('boolean', c_bool),
('llval', c_longlong),
]
class c_dcgmPolicyConditionParms_t(_PrintableStructure):
_fields_ = [
('tag', c_uint),
('val', c_dcgmPolicyConditionParmTypes_t)
]
class c_dcgmPolicy_v1(_PrintableStructure):
_fields_ = [
# version must always be first
('version', c_uint),
('condition', c_uint), # an OR'd list of DCGM_POLICY_COND_*
('mode', c_uint),
('isolation', c_uint),
('action', c_uint),
('validation', c_uint),
('response', c_uint),
('parms', c_dcgmPolicyConditionParms_t * DCGM_POLICY_COND_MAX)
]
dcgmPolicy_version1 = make_dcgm_version(c_dcgmPolicy_v1, 1)
class c_dcgmPolicyConditionPci_t(_PrintableStructure):
_fields_ = [
("timestamp", c_longlong), # timestamp of the error
("counter", c_uint) # value of the PCIe replay counter
]
class c_dcgmPolicyConditionDbe_t(_PrintableStructure):
LOCATIONS = {
'L1': 0,
'L2': 1,
'DEVICE': 2,
'REGISTER': 3,
'TEXTURE': 4
}
_fields_ = [
("timestamp", c_longlong), # timestamp of the error
("location", c_int), # location of the error (one of self.LOCATIONS)
("numerrors", c_uint) # number of errors
]
class c_dcgmPolicyConditionMpr_t(_PrintableStructure):
_fields_ = [
("timestamp", c_longlong), # timestamp of the error
("sbepages", c_uint), # number of pending pages due to SBE
("dbepages", c_uint) # number of pending pages due to DBE
]
class c_dcgmPolicyConditionThermal_t(_PrintableStructure):
_fields_ = [
("timestamp", c_longlong), # timestamp of the error
("thermalViolation", c_uint) # Temperature reached that violated policy
]
class c_dcgmPolicyConditionPower_t(_PrintableStructure):
_fields_ = [
("timestamp", c_longlong), # timestamp of the error
("powerViolation", c_uint) # Power value reached that violated policyy
]
class c_dcgmPolicyConditionNvlink_t(_PrintableStructure):
_fields_ = [
("timestamp", c_longlong), # timestamp of the error
("fieldId", c_ushort), # FieldId of the nvlink error counter
("counter", c_uint) # Error value reached that violated policyy
]
class c_dcgmPolicyConditionXID_t(_PrintableStructure):
_fields_ = [
("timestamp", c_longlong), # timestamp of the error
("errnum", c_uint) # XID error number
]
class c_dcgmPolicyCallbackResponse_v1(_PrintableStructure):
class Value(DcgmUnion):
# implement more of the fields when a test requires them
_fields_ = [
("dbe", c_dcgmPolicyConditionDbe_t), # ECC DBE return structure
("pci", c_dcgmPolicyConditionPci_t), # PCI replay error return structure
("mpr", c_dcgmPolicyConditionMpr_t), # Max retired pages limit return structure
("thermal", c_dcgmPolicyConditionThermal_t), # Thermal policy violations return structure
("power", c_dcgmPolicyConditionPower_t), # Power policy violations return structure
("nvlink", c_dcgmPolicyConditionNvlink_t), # Nvlink policy violations return structure..
("xid", c_dcgmPolicyConditionXID_t) # XID policy violations return structure
]
_fields_ = [
("version", c_uint),
("condition", c_int), # an OR'ed list of DCGM_POLICY_COND_*
("val", Value)
]
class c_dcgmFieldValue_v1_value(DcgmUnion):
_fields_ = [
('i64', c_int64),
('dbl', c_double),
('str', c_char * DCGM_MAX_STR_LENGTH),
('blob', c_byte * DCGM_MAX_BLOB_LENGTH)
]
# This structure is used to represent value for the field to be queried.
class c_dcgmFieldValue_v1(_PrintableStructure):
_fields_ = [
# version must always be first
('version', c_uint),
('fieldId', c_ushort),
('fieldType', c_short),
('status', c_int),
('ts', c_int64),
('value', c_dcgmFieldValue_v1_value)
]
dcgmFieldValue_version1 = make_dcgm_version(c_dcgmFieldValue_v1, 1)
# This structure is used to represent value for the field to be queried (version 2)
class c_dcgmFieldValue_v2(_PrintableStructure):
_fields_ = [
# version must always be first
('version', c_uint),
('entityGroupId', c_uint),
('entityId', c_uint),
('fieldId', c_ushort),
('fieldType', c_short),
('status', c_int),
('unused', c_uint),
('ts', c_int64),
('value', c_dcgmFieldValue_v1_value)
]
dcgmFieldValue_version2 = make_dcgm_version(c_dcgmFieldValue_v2, 2)
#Field value flags used by dcgm_agent.dcgmEntitiesGetLatestValues()
DCGM_FV_FLAG_LIVE_DATA = 0x00000001
DCGM_HEALTH_WATCH_PCIE = 0x1
DCGM_HEALTH_WATCH_NVLINK = 0x2
DCGM_HEALTH_WATCH_PMU = 0x4
DCGM_HEALTH_WATCH_MCU = 0x8
DCGM_HEALTH_WATCH_MEM = 0x10
DCGM_HEALTH_WATCH_SM = 0x20
DCGM_HEALTH_WATCH_INFOROM = 0x40
DCGM_HEALTH_WATCH_THERMAL = 0x80
DCGM_HEALTH_WATCH_POWER = 0x100
DCGM_HEALTH_WATCH_DRIVER = 0x200
DCGM_HEALTH_WATCH_NVSWITCH_NONFATAL = 0x400
DCGM_HEALTH_WATCH_NVSWITCH_FATAL = 0x800
DCGM_HEALTH_WATCH_ALL = 0xFFFFFFFF
DCGM_HEALTH_WATCH_COUNT_V1 = 10
DCGM_HEALTH_WATCH_COUNT_V2 = 12
DCGM_HEALTH_RESULT_PASS = 0
DCGM_HEALTH_RESULT_WARN = 10
DCGM_HEALTH_RESULT_FAIL = 20
class c_dcgmDiagErrorDetail_t(_PrintableStructure):
_fields_ = [
('msg', c_char * 1024),
('code', c_uint)
]
DCGM_HEALTH_WATCH_MAX_INCIDENTS = DCGM_GROUP_MAX_ENTITIES
class c_dcgmIncidentInfo_t(_PrintableStructure):
_fields_ = [
('system', c_uint),
('health', c_uint32),
('error', c_dcgmDiagErrorDetail_t),
('entityInfo', c_dcgmGroupEntityPair_t),
]
class c_dcgmHealthResponse_v4(_PrintableStructure):
_fields_ = [
('version', c_uint32),
('overallHealth', c_uint32),
('incidentCount', c_uint32),
('incidents', c_dcgmIncidentInfo_t * DCGM_HEALTH_WATCH_MAX_INCIDENTS),
]
dcgmHealthResponse_version4 = make_dcgm_version(c_dcgmHealthResponse_v4, 4)
class c_dcgmHealthSetParams_v2(_PrintableStructure):
_fields_ = [
('version', c_uint32),
('groupId', c_void_p),
('systems', c_uint32),
('updateInterval', c_int64),
('maxKeepAge', c_double)
]
dcgmHealthSetParams_version2 = make_dcgm_version(c_dcgmHealthSetParams_v2, 2)
#Pid info structs
class c_dcgmStatSummaryInt64_t(_PrintableStructure):
_fields_ = [
('minValue', c_int64),
('maxValue', c_int64),
('average', c_int64)
]
class c_dcgmStatSummaryInt32_t(_PrintableStructure):
_fields_ = [
('minValue', c_int32),
('maxValue', c_int32),
('average', c_int32)
]
class c_dcgmStatSummaryFp64_t(_PrintableStructure):
_fields_ = [
('minValue', c_double),
('maxValue', c_double),
('average', c_double)
]
class c_dcgmProcessUtilInfo_t(_PrintableStructure):
_fields_ = [
('pid', c_uint),
('smUtil', c_double),
('memUtil', c_double)
]
class c_dcgmHealthResponseInfo_t(_PrintableStructure):
_fields_ = [
('system', c_uint),
('health', c_uint)
]
DCGM_MAX_PID_INFO_NUM = 16
class c_dcgmPidSingleInfo_t(_PrintableStructure):
_fields_ = [
('gpuId', c_uint32),
('energyConsumed', c_int64),
('pcieRxBandwidth', c_dcgmStatSummaryInt64_t),
('pcieTxBandwidth', c_dcgmStatSummaryInt64_t),
('pcieReplays', c_int64),
('startTime', c_int64),
('endTime', c_int64),
('processUtilization', c_dcgmProcessUtilInfo_t),
('smUtilization', c_dcgmStatSummaryInt32_t),
('memoryUtilization', c_dcgmStatSummaryInt32_t),
('eccSingleBit', c_uint32), #Deprecated
('eccDoubleBit', c_uint32),
('memoryClock', c_dcgmStatSummaryInt32_t),
('smClock', c_dcgmStatSummaryInt32_t),
('numXidCriticalErrors', c_int32),
('xidCriticalErrorsTs', c_int64 * 10),
('numOtherComputePids', c_int32),
('otherComputePids', c_uint32 * DCGM_MAX_PID_INFO_NUM),
('numOtherGraphicsPids', c_int32),
('otherGraphicsPids', c_uint32 * DCGM_MAX_PID_INFO_NUM),
('maxGpuMemoryUsed', c_int64),
('powerViolationTime', c_int64),
('thermalViolationTime', c_int64),
('reliabilityViolationTime', c_int64),
('boardLimitViolationTime', c_int64),
('lowUtilizationTime', c_int64),
('syncBoostTime', c_int64),
('overallHealth', c_uint),
('incidentCount', c_uint),
('systems', c_dcgmHealthResponseInfo_t * DCGM_HEALTH_WATCH_COUNT_V1)
]
class c_dcgmPidInfo_v2(_PrintableStructure):
_fields_ = [
('version', c_uint32),
('pid', c_uint32),
('unused', c_uint32),
('numGpus', c_int32),
('summary', c_dcgmPidSingleInfo_t),
('gpus', c_dcgmPidSingleInfo_t * DCGM_MAX_NUM_DEVICES)
]
dcgmPidInfo_version2 = make_dcgm_version(c_dcgmPidInfo_v2, 2)
class c_dcgmRunningProcess_v1(_PrintableStructure):
_fields_ = [
('version', c_uint32),
('pid', c_uint32),
('memoryUsed', c_uint64)
]
dcgmRunningProcess_version1 = make_dcgm_version(c_dcgmRunningProcess_v1, 1)
c_dcgmRunningProcess_t = c_dcgmRunningProcess_v1
class c_dcgmGpuUsageInfo_t(_PrintableStructure):
_fields_ = [
('gpuId', c_uint32),
('energyConsumed', c_int64),
('powerUsage', c_dcgmStatSummaryFp64_t),
('pcieRxBandwidth', c_dcgmStatSummaryInt64_t),
('pcieTxBandwidth', c_dcgmStatSummaryInt64_t),
('pcieReplays', c_int64),
('startTime', c_int64),
('endTime', c_int64),
('smUtilization', c_dcgmStatSummaryInt32_t),
('memoryUtilization', c_dcgmStatSummaryInt32_t),
('eccSingleBit', c_uint32), #Deprecated
('eccDoubleBit', c_uint32),
('memoryClock', c_dcgmStatSummaryInt32_t),
('smClock', c_dcgmStatSummaryInt32_t),
('numXidCriticalErrors', c_int32),
('xidCriticalErrorsTs', c_int64 * 10),
('numComputePids', c_int32),
('computePids', c_dcgmProcessUtilInfo_t * DCGM_MAX_PID_INFO_NUM ),
('numGraphicsPids', c_int32),
('graphicsPids', c_dcgmProcessUtilInfo_t * DCGM_MAX_PID_INFO_NUM ),
('maxGpuMemoryUsed', c_int64),
('powerViolationTime', c_int64),
('thermalViolationTime', c_int64),
('reliabilityViolationTime', c_int64),
('boardLimitViolationTime', c_int64),
('lowUtilizationTime', c_int64),
('syncBoostTime', c_int64),
('overallHealth', c_uint),
('incidentCount', c_uint),
('systems', c_dcgmHealthResponseInfo_t * DCGM_HEALTH_WATCH_COUNT_V1)
]
class c_dcgmJobInfo_v3(_PrintableStructure):
_fields_ = [
('version', c_uint32),
('numGpus', c_int32),
('summary', c_dcgmGpuUsageInfo_t),
('gpus', c_dcgmGpuUsageInfo_t * DCGM_MAX_NUM_DEVICES)
]
dcgmJobInfo_version3 = make_dcgm_version(c_dcgmJobInfo_v3, 3)
class c_dcgmDiagTestResult_v2(_PrintableStructure):
_fields_ = [
('result', c_uint),
('error', c_dcgmDiagErrorDetail_t),
('info', c_char * 1024)
]
class c_dcgmDiagResponsePerGpu_v4(_PrintableStructure):
_fields_ = [
('gpuId', c_uint),
('hwDiagnosticReturn', c_uint),
('results', c_dcgmDiagTestResult_v2 * DCGM_PER_GPU_TEST_COUNT_V8)
]
DCGM_SWTEST_COUNT = 10
LEVEL_ONE_MAX_RESULTS = 16
class c_dcgmDiagResponse_v8(_PrintableStructure):
_fields_ = [
('version', c_uint),
('gpuCount', c_uint),
('levelOneTestCount', c_uint),
('levelOneResults', c_dcgmDiagTestResult_v2 * LEVEL_ONE_MAX_RESULTS),
('perGpuResponses', c_dcgmDiagResponsePerGpu_v4 * DCGM_MAX_NUM_DEVICES),
('systemError', c_dcgmDiagErrorDetail_t),
('_unused', c_char * 1024)
]
dcgmDiagResponse_version8 = make_dcgm_version(c_dcgmDiagResponse_v8, 8)
DCGM_AFFINITY_BITMASK_ARRAY_SIZE = 8
class c_dcgmDeviceTopologyPath_t(_PrintableStructure):
_fields_ = [
('gpuId', c_uint32),
('path', c_uint32),
('localNvLinkIds', c_uint32)
]
class c_dcgmDeviceTopology_v1(_PrintableStructure):
_fields_ = [
('version', c_uint32),
('cpuAffinityMask', c_ulong * DCGM_AFFINITY_BITMASK_ARRAY_SIZE),
('numGpus', c_uint32),
('gpuPaths', c_dcgmDeviceTopologyPath_t * (DCGM_MAX_NUM_DEVICES - 1))
]
dcgmDeviceTopology_version1 = make_dcgm_version(c_dcgmDeviceTopology_v1, 1)
class c_dcgmGroupTopology_v1(_PrintableStructure):
_fields_ = [
('version', c_uint32),
('groupCpuAffinityMask', c_ulong * DCGM_AFFINITY_BITMASK_ARRAY_SIZE),
('numaOptimalFlag', c_uint32),
('slowestPath', c_uint32)
]
dcgmGroupTopology_version1 = make_dcgm_version(c_dcgmGroupTopology_v1, 1)
# Maximum number of field groups that can exist
DCGM_MAX_NUM_FIELD_GROUPS = 64
# Maximum number of field IDs that can be in a single field group
DCGM_MAX_FIELD_IDS_PER_FIELD_GROUP = 128
class c_dcgmFieldGroupInfo_v1(_PrintableStructure):
_fields_ = [
('version', c_uint32),
('numFieldIds', c_uint32),
('fieldGroupId', c_void_p),
('fieldGroupName', c_char * DCGM_MAX_STR_LENGTH),
('fieldIds', c_uint16 * DCGM_MAX_FIELD_IDS_PER_FIELD_GROUP)
]
dcgmFieldGroupInfo_version1 = make_dcgm_version(c_dcgmFieldGroupInfo_v1, 1)
class c_dcgmAllFieldGroup_v1(_PrintableStructure):
_fields_ = [
('version', c_uint32),
('numFieldGroups', c_uint32),
('fieldGroups', c_dcgmFieldGroupInfo_v1 * DCGM_MAX_NUM_FIELD_GROUPS)
]
dcgmAllFieldGroup_version1 = make_dcgm_version(c_dcgmAllFieldGroup_v1, 1)
class c_dcgmIntrospectMemory_v1(_PrintableStructure):
_fields_ = [
('version', c_uint32),
('bytesUsed', c_longlong) # The total number of bytes being used to store all of the fields being watched
]
dcgmIntrospectMemory_version1 = make_dcgm_version(c_dcgmIntrospectMemory_v1, 1)
class c_dcgmIntrospectCpuUtil_v1(_PrintableStructure):
_fields_ = [
('version', c_uint32), #!< version number (dcgmIntrospectCpuUtil_version)
('total', c_double), #!< fraction of device's CPU resources that were used
('kernel', c_double), #!< fraction of device's CPU resources that were used in kernel mode
('user', c_double), #!< fraction of device's CPU resources that were used in user mode
]
dcgmIntrospectCpuUtil_version1 = make_dcgm_version(c_dcgmIntrospectCpuUtil_v1, 1)
DCGM_MAX_CONFIG_FILE_LEN = 10000
DCGM_MAX_TEST_NAMES = 20
DCGM_MAX_TEST_NAMES_LEN = 50
DCGM_MAX_TEST_PARMS = 100
DCGM_MAX_TEST_PARMS_LEN = 100
DCGM_GPU_LIST_LEN = 50
DCGM_FILE_LEN = 30
DCGM_PATH_LEN = 128
DCGM_THROTTLE_MASK_LEN = 50
# Flags options for running the GPU diagnostic
DCGM_RUN_FLAGS_VERBOSE = 0x0001
DCGM_RUN_FLAGS_STATSONFAIL = 0x0002
# UNUSED
DCGM_RUN_FLAGS_TRAIN = 0x0004
# UNUSED
DCGM_RUN_FLAGS_FORCE_TRAIN = 0x0008
DCGM_RUN_FLAGS_FAIL_EARLY = 0x0010 # Enable fail early checks for the Targeted Stress, Targeted Power, SM Stress, and Diagnostic tests
class c_dcgmRunDiag_v7(_PrintableStructure):
_fields_ = [
('version', c_uint), # version of this message
('flags', c_uint), # flags specifying binary options for running it. Currently verbose and stats on fail
('debugLevel', c_uint), # 0-5 for the debug level the GPU diagnostic will use for logging
('groupId', c_void_p), # group of GPUs to verify. Cannot be specified together with gpuList.
('validate', c_uint), # 0-3 for which tests to run. Optional.
('testNames', c_char * DCGM_MAX_TEST_NAMES * DCGM_MAX_TEST_NAMES_LEN), # Specifed list of test names. Optional.
('testParms', c_char * DCGM_MAX_TEST_PARMS * DCGM_MAX_TEST_PARMS_LEN), # Parameters to set for specified tests in the format: testName.parameterName=parameterValue. Optional.
('fakeGpuList', c_char * DCGM_GPU_LIST_LEN), # Comma-separated list of fake gpus. Cannot be specified with the groupId or gpuList.
('gpuList', c_char * DCGM_GPU_LIST_LEN), # Comma-separated list of gpus. Cannot be specified with the groupId.
('debugLogFile', c_char * DCGM_PATH_LEN), # Alternate name for the debug log file that should be used
('statsPath', c_char * DCGM_PATH_LEN), # Path that the plugin's statistics files should be written to
('configFileContents', c_char * DCGM_MAX_CONFIG_FILE_LEN), # Contents of nvvs config file (likely yaml)
('throttleMask', c_char * DCGM_THROTTLE_MASK_LEN), # Throttle reasons to ignore as either integer mask or csv list of reasons
('pluginPath', c_char * DCGM_PATH_LEN), # Custom path to the diagnostic plugins
('_unusedInt1', c_uint), # Unused
('_unusedInt2', c_uint), # Unused
('_unusedInt3', c_uint), # Unused
('_unusedBuf', c_char * DCGM_PATH_LEN), # Unused
('failCheckInterval', c_uint), # How often the fail early checks should occur when DCGM_RUN_FLAGS_FAIL_EARLY is set.
]
dcgmRunDiag_version7 = make_dcgm_version(c_dcgmRunDiag_v7, 7)
# Latest c_dcgmRunDiag class
c_dcgmRunDiag_t = c_dcgmRunDiag_v7
# Latest version for dcgmRunDiag_t
dcgmRunDiag_version = dcgmRunDiag_version7
#Flags for dcgmGetEntityGroupEntities's flags parameter
DCGM_GEGE_FLAG_ONLY_SUPPORTED = 0x00000001 #Only return entities that are supported by DCGM.
#Identifies a GPU NVLink error type returned by DCGM_FI_DEV_GPU_NVLINK_ERRORS
DCGM_GPU_NVLINK_ERROR_RECOVERY_REQUIRED = 1 # NVLink link recovery error occurred
DCGM_GPU_NVLINK_ERROR_FATAL = 2 # NVLink link fatal error occurred
# Topology hints for dcgmSelectGpusByTopology()
DCGM_TOPO_HINT_F_NONE = 0x00000000 # No hints specified
DCGM_TOPO_HINT_F_IGNOREHEALTH = 0x00000001 # Ignore the health of the GPUs when picking GPUs for job execution.
# By default, only healthy GPUs are considered.
class c_dcgmTopoSchedHint_v1(_PrintableStructure):
_fields_ = [
('version', c_uint), # version of this message
('inputGpuIds', c_uint64), # bitmask of the GPU ids to choose from
('numGpus', c_uint32), # the number of GPUs that DCGM should chooose
('hintFlags', c_uint64), # Hints to ignore certain factors for the scheduling hint
]
dcgmTopoSchedHint_version1 = make_dcgm_version(c_dcgmTopoSchedHint_v1, 1)
#DCGM NvLink link states used by c_dcgmNvLinkGpuLinkStatus_v1 & 2 and c_dcgmNvLinkNvSwitchLinkStatus_t's linkState field
DcgmNvLinkLinkStateNotSupported = 0 # NvLink is unsupported by this GPU (Default for GPUs)
DcgmNvLinkLinkStateDisabled = 1 # NvLink is supported for this link but this link is disabled (Default for NvSwitches)
DcgmNvLinkLinkStateDown = 2 # This NvLink link is down (inactive)
DcgmNvLinkLinkStateUp = 3 # This NvLink link is up (active)
# State of NvLink links for a GPU
class c_dcgmNvLinkGpuLinkStatus_v1(_PrintableStructure):
_fields_ = [
('entityId', c_uint32), # Entity ID of the GPU (gpuId)
('linkState', c_uint32 * DCGM_NVLINK_MAX_LINKS_PER_GPU_LEGACY1), #Link state of each link of this GPU
]
# State of NvLink links for a GPU
class c_dcgmNvLinkGpuLinkStatus_v2(_PrintableStructure):
_fields_ = [
('entityId', c_uint32), # Entity ID of the GPU (gpuId)
('linkState', c_uint32 * DCGM_NVLINK_MAX_LINKS_PER_GPU_LEGACY2), #Link state of each link of this GPU
]
class c_dcgmNvLinkGpuLinkStatus_v3(_PrintableStructure):
_fields_ = [
('entityId', c_uint32), # Entity ID of the GPU (gpuId)
('linkState', c_uint32 * DCGM_NVLINK_MAX_LINKS_PER_GPU), #Link state of each link of this GPU
]
#State of NvLink links for a NvSwitch
class c_dcgmNvLinkNvSwitchLinkStatus_v1(_PrintableStructure):
_fields_ = [
('entityId', c_uint32), # Entity ID of the NvSwitch (physicalId)
('linkState', c_uint32 * DCGM_NVLINK_MAX_LINKS_PER_NVSWITCH_V1) #Link state of each link of this NvSwitch
]
class c_dcgmNvLinkStatus_v2(_PrintableStructure):
'''
NvSwitch link status for all GPUs and NvSwitches in the system
'''
_fields_ = [
('version', c_uint32), # version of this message. Should be dcgmNvLinkStatus_version1
('numGpus', c_uint32), # Number of GPUs populated in gpus[]
('gpus', c_dcgmNvLinkGpuLinkStatus_v2 * DCGM_MAX_NUM_DEVICES), #Per-GPU NvLink link statuses
('numNvSwitches', c_uint32), # Number of NvSwitches populated in nvSwitches[]
('nvSwitches', c_dcgmNvLinkNvSwitchLinkStatus_v1 * DCGM_MAX_NUM_SWITCHES) #Per-NvSwitch NvLink link statuses
]
dcgmNvLinkStatus_version2 = make_dcgm_version(c_dcgmNvLinkStatus_v2, 2)
#State of NvLink links for a NvSwitch
class c_dcgmNvLinkNvSwitchLinkStatus_v2(_PrintableStructure):
_fields_ = [
('entityId', c_uint32), # Entity ID of the NvSwitch (physicalId)
('linkState', c_uint32 * DCGM_NVLINK_MAX_LINKS_PER_NVSWITCH) #Link state of each link of this NvSwitch
]
class c_dcgmNvLinkStatus_v3(_PrintableStructure):
'''
NvSwitch link status for all GPUs and NvSwitches in the system
'''
_fields_ = [
('version', c_uint32), # version of this message. Should be dcgmNvLinkStatus_version1
('numGpus', c_uint32), # Number of GPUs populated in gpus[]
('gpus', c_dcgmNvLinkGpuLinkStatus_v3 * DCGM_MAX_NUM_DEVICES), #Per-GPU NvLink link statuses
('numNvSwitches', c_uint32), # Number of NvSwitches populated in nvSwitches[]
('nvSwitches', c_dcgmNvLinkNvSwitchLinkStatus_v2 * DCGM_MAX_NUM_SWITCHES) #Per-NvSwitch NvLink link statuses
]
dcgmNvLinkStatus_version3 = make_dcgm_version(c_dcgmNvLinkStatus_v3, 3)
# Bitmask values for dcgmGetFieldIdSummary
DCGM_SUMMARY_MIN = 0x00000001
DCGM_SUMMARY_MAX = 0x00000002
DCGM_SUMMARY_AVG = 0x00000004
DCGM_SUMMARY_SUM = 0x00000008
DCGM_SUMMARY_COUNT = 0x00000010
DCGM_SUMMARY_INTEGRAL = 0x00000020
DCGM_SUMMARY_DIFF = 0x00000040
DCGM_SUMMARY_SIZE = 7
class c_dcgmSummaryResponse_t(_PrintableStructure):
class ResponseValue(DcgmUnion):
_fields_ = [
('i64', c_int64),
('dbl', c_double),
]
_fields_ = [
('fieldType', c_uint),
('summaryCount', c_uint),
('values', ResponseValue * DCGM_SUMMARY_SIZE),
]
class c_dcgmFieldSummaryRequest_v1(_PrintableStructure):
_fields_ = [
('version', c_uint),
('fieldId', c_ushort),
('entityGroupType', c_uint32),
('entityId', c_uint),
('summaryTypeMask', c_uint32),
('startTime', c_uint64),
('endTime', c_uint64),
('response', c_dcgmSummaryResponse_t),
]
dcgmFieldSummaryRequest_version1 = make_dcgm_version(c_dcgmFieldSummaryRequest_v1, 1)
# Module IDs
DcgmModuleIdCore = 0 # Core DCGM
DcgmModuleIdNvSwitch = 1 # NvSwitch Module
DcgmModuleIdVGPU = 2 # VGPU Module
DcgmModuleIdIntrospect = 3 # Introspection Module
DcgmModuleIdHealth = 4 # Health Module
DcgmModuleIdPolicy = 5 # Policy Module
DcgmModuleIdConfig = 6 # Config Module
DcgmModuleIdDiag = 7 # GPU Diagnostic Module
DcgmModuleIdProfiling = 8 # Profiling Module
DcgmModuleIdCount = 9 # 1 greater than largest ID above
# Module Status
DcgmModuleStatusNotLoaded = 0 # Module has not been loaded yet
DcgmModuleStatusDenylisted = 1 # Module has been added to the denylist so it can't be loaded
DcgmModuleStatusFailed = 2 # Loading the module failed
DcgmModuleStatusLoaded = 3 # Module has been loaded
DcgmModuleStatusUnloaded = 4 # Module has been unloaded
DcgmModuleStatusPaused = 5 # Module has been paused. Implies it's been loaded
DCGM_MODULE_STATUSES_CAPACITY = 16
class c_dcgmModuleGetStatusesModule_t(_PrintableStructure):
_fields_ = [
('id', c_uint32), #One of DcgmModuleId*
('status', c_uint32), #One of DcgmModuleStatus*
]
class c_dcgmModuleGetStatuses_v1(_PrintableStructure):
_fields_ = [
('version', c_uint),
('numStatuses', c_uint32),
('statuses', c_dcgmModuleGetStatusesModule_t * DCGM_MODULE_STATUSES_CAPACITY),
]
dcgmModuleGetStatuses_version1 = make_dcgm_version(c_dcgmModuleGetStatuses_v1, 1)
DCGM_PROF_MAX_NUM_GROUPS_V2 = 10 # Maximum number of metric ID groups that can exist in DCGM
DCGM_PROF_MAX_FIELD_IDS_PER_GROUP_V2 = 64 # Maximum number of field IDs that can be in a single DCGM profiling metric group
class c_dcgmProfMetricGroupInfo_v2(_PrintableStructure):
_fields_ = [
('majorId', c_ushort),
('minorId', c_ushort),
('numFieldIds', c_uint32),
('fieldIds', c_ushort * DCGM_PROF_MAX_FIELD_IDS_PER_GROUP_V2),
]
class c_dcgmProfGetMetricGroups_v3(_PrintableStructure):
_fields_ = [
('version', c_uint32),
('unused', c_uint32),
('gpuId', c_uint32),
('numMetricGroups', c_uint32),
('metricGroups', c_dcgmProfMetricGroupInfo_v2 * DCGM_PROF_MAX_NUM_GROUPS_V2),
]
dcgmProfGetMetricGroups_version3 = make_dcgm_version(c_dcgmProfGetMetricGroups_v3, 3)
class c_dcgmVersionInfo_v2(_PrintableStructure):
_fields_ = [
('version', c_uint32),
('rawBuildInfoString', c_char * (DCGM_MAX_STR_LENGTH * 2)),
]
dcgmVersionInfo_version2 = make_dcgm_version(c_dcgmVersionInfo_v2, 2)
dcgmVersionInfo_version = dcgmVersionInfo_version2
| DCGM-master | testing/python3/dcgm_structs.py |
#! /usr/bin/env python3
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
print('Starting DCGM test framework')
# Early check to make sure we're running on a supported version of Python
import sys
print('Python version: {}'.format(sys.version))
def _version_check():
version = sys.version.split()[0] # Discard compilation information
version_tuple = tuple(map(int, version.split('.')))
if version_tuple < (3, 5):
print('DCGM Testing framework requires Python 3.5+')
sys.exit(1)
_version_check()
import os
import platform
import test_utils
import option_parser
import logger
import utils
import shutil
import nvidia_smi_utils
from subprocess import check_output, Popen, CalledProcessError
from run_tests import run_tests
from run_tests import print_test_info
def is_file_binary(FileName):
""" Checks for binary files and skips logging if True """
try:
with open(FileName, 'rb') as f:
# Files with null bytes are binary
if b'\x00' in f.read():
print("\n=========================== " + FileName + " ===========================\n")
print("File is binary, skipping log output!")
return True
else:
return False
except IOError:
pass
def _summarize_tests():
test_root = test_utils.SubTest.get_all_subtests()[0]
tests_ok_count = test_root.stats[test_utils.SubTest.SUCCESS]
tests_fail_count = test_root.stats[test_utils.SubTest.FAILED]
tests_waived_count = test_root.stats[test_utils.SubTest.SKIPPED]
tests_count = tests_ok_count + tests_fail_count
# Dump all log output in Eris
if tests_fail_count > 0 and option_parser.options.eris:
logPath = os.path.join(logger.default_log_dir, logger.log_dir)
logFiles = os.listdir(logPath)
for logFile in logFiles:
logFilePath = os.path.join(logPath,logFile)
if not is_file_binary(logFilePath) and not os.path.isdir(logFilePath):
print("\n=========================== " + logFile + " ===========================\n")
with open(logFilePath, "r", encoding="utf-8-sig") as f:
print(f.read())
logger.info("\n========== TEST SUMMARY ==========\n")
logger.info("Passed: {}".format(tests_ok_count))
logger.info("Failed: {}".format(tests_fail_count))
logger.info("Waived: {}".format(tests_waived_count))
logger.info("Total: {}".format(tests_count))
tests_completed_ratio = 0.0
if tests_count > 0.0:
tests_completed_ratio = float(tests_ok_count) / (float(tests_count) - (float(tests_fail_count / 2)))
logger.info("Score: %.2f" % (100.0 * tests_completed_ratio))
logger.info("==================================\n\n")
warnings_count = logger.messages_level_counts[logger.WARNING]
if warnings_count > 0:
logger.warning("Framework encountered %d warning%s" % (warnings_count, utils.plural_s(warnings_count)))
if tests_ok_count < tests_count:
logger.info()
logger.info("Bug filing instructions:")
logger.info(" * In bug description please include first and last error")
logger.info(" * Also attach %s file (it already contains nvml trace logs, nvidia-bug report and stdout)" % (logger.log_archive_filename))
def _run_burn_in_tests():
file_name = "burn_in_stress.py"
if os.path.exists(file_name):
logger.info("\nRunning a single iteration of Burn-in Stress Test! \nPlease wait...\n")
#updates environment for the child process
env = os.environ.copy()
#remove env. variables below to prevent log file locks
if "__DCGM_DBG_FILE" in env: del env["__DCGM_DBG_FILE"]
if "__NVML_DBG_FILE" in env: del env["__NVML_DBG_FILE"]
if option_parser.options.dvssc_testing:
burn = Popen([sys.executable, file_name, "-t", "3", "--dvssc-testing"], stdout=None, stderr=None, env = env)
else:
burn = Popen([sys.executable, file_name, "-t", "3"], stdout=None, stderr=None, env = env)
if burn.pid == None:
assert False, "Failed to launch Burn-in Tests"
burn.wait()
else:
logger.warning("burn_in_stress.py script not found!")
def do_root_check_possibly_exit():
if not option_parser.options.no_root_check:
if not utils.is_root():
print("The test framework must be run as root to function properly. Switch to root by running 'sudo su'.")
sys.exit(1)
class TestFrameworkSetup(object):
def __enter__(self):
'''Initialize the test framework or exit on failure'''
os.environ['__DCGM_TESTING_FRAMEWORK_ACTIVE'] = '1'
# Make sure that the MPS server is disabled before running the test-suite
if utils.is_mps_server_running():
print('DCGM Testing framework is not interoperable with MPS server. Please disable MPS server.')
sys.exit(1)
# Various setup steps
option_parser.parse_options()
# The test framework must be run as root. Check after initializing the option_parser
do_root_check_possibly_exit()
utils.verify_user_file_permissions()
utils.verify_localhost_dns()
if not option_parser.options.use_running_hostengine:
utils.verify_hostengine_port_is_usable()
utils.verify_dcgm_service_not_active()
if not test_utils.verify_dcgmi_executible_visible_for_all_users():
print('DCGM Testing framework is located in the directory that is does have proper permissions to run ' \
'tests under unprivileged service account.')
print('See the logs to understand which part of the path lacks the read+execute permissions')
print('Either run `chmod o+rx <directory>` or move the DCGM Testing framework to another location')
sys.exit(1)
utils.verify_nvidia_fabricmanager_service_active_if_needed()
# Setup logging regardless of if we're going to be logging or not in order to clear out old logs
# and initialize logger classes.
logger.setup_environment()
option_parser.validate()
if not test_utils.is_framework_compatible():
logger.fatal("The test framework and dcgm versions are incompatible. Exiting Test Framework.")
sys.exit(1)
# Directory where DCGM test*.py files reside
test_utils.set_tests_directory('tests')
# Verify that package architecture matches python architecture
if utils.is_64bit():
# ignore this check on ppc64le and armv8 for now
if not (platform.machine() == "ppc64le" or platform.machine() == "aarch64"):
if not os.path.exists(os.path.join(utils.script_dir, "apps/amd64")):
print("Testing package is missing 64bit binaries, are you sure you're using package of correct architecture?")
sys.exit(1)
else:
if not os.path.exists(os.path.join(utils.script_dir, "apps/x86")):
print("Testing package is missing 32bit binaries, are you sure you're using package of correct architecture?")
sys.exit(1)
# Stops the framework if running python 32bits on 64 bits OS
if utils.is_windows():
if os.name == "nt" and "32 bit" in sys.version and platform.machine() == "AMD64":
print("Running Python 32-bit on a 64-bit OS is not supported. Please install Python 64-bit")
sys.exit(1)
if utils.is_linux():
python_exec = str(sys.executable)
python_arch = check_output(["file", "-L", python_exec])
if "32-bit" in python_arch.decode('utf-8') and utils.is_64bit() == True:
print("Running Python 32-bit on a 64-bit OS is not supported. Please install Python 64-bit")
sys.exit(1)
#Tell DCGM how to find our testing package's NVVS
test_utils.set_nvvs_bin_path()
def __exit__(self, type, value, traceback):
logger.close()
del os.environ['__DCGM_TESTING_FRAMEWORK_ACTIVE']
pass
def main():
with TestFrameworkSetup():
if not option_parser.options.no_env_check:
if not test_utils.is_test_environment_sane():
logger.warning("The test environment does not seem to be healthy, test framework cannot continue.")
sys.exit(1)
if not option_parser.options.no_process_check:
if not nvidia_smi_utils.are_gpus_free():
sys.exit(1)
else:
logger.warning("Not checking for other processes using the GPU(s), test failures may occur.")
if option_parser.options.test_info:
print_test_info()
return
testdir = os.path.dirname(os.path.realpath(__file__))
os.environ['GCOV_PREFIX'] = os.path.join(testdir, "_coverage/python")
os.environ['GCOV_PREFIX_STRIP'] = '5'
if test_utils.noLogging:
run_tests()
else:
logger.run_with_coverage(run_tests())
_summarize_tests()
# Runs a single iteration of burn_in_stress test
if option_parser.options.burn:
_run_burn_in_tests()
if __name__ == '__main__':
main()
| DCGM-master | testing/python3/main.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import dcgm_structs_internal
import dcgm_agent_internal
import dcgm_fields
import dcgm_structs
import logger
import subprocess
import os
import time
import threading
import sys
from apps.app_runner import AppRunner
from DcgmReader import DcgmReader
from dcgm_field_injection_helpers import inject_value
class FieldReader(DcgmReader):
def __init__(self, expectedValue, desiredNumMatches, *args, **kwargs):
super(FieldReader, self).__init__(*args, **kwargs)
self._expectedValue = expectedValue
self._desiredNumMatches = desiredNumMatches
self._mostRecentTs = 0
self.numMatchesSeen = 0
self.passed = False
def CustomFieldHandler(self, gpuId, fieldId, fieldTag, val):
"""
This method is called once for each field for each GPU each
time that its Process() method is invoked, and it will be skipped
for blank values and fields in the ignore list.
fieldTag is the field name, and val is a dcgm_field_helpers.DcgmFieldValue instance.
"""
if val.ts > self._mostRecentTs:
self._mostRecentTs = val.ts
else:
return
if val.value == self._expectedValue:
self.numMatchesSeen += 1
if self.numMatchesSeen == self._desiredNumMatches:
self.passed = True
return
STANDALONE_DENYLIST_SCRIPT_NAME = "denylist_recommendations.py"
def createDenylistApp(numGpus=None, numSwitches=None, testNames=None, instantaneous=False):
args = ["./%s" % STANDALONE_DENYLIST_SCRIPT_NAME]
if numGpus == None or numSwitches == None:
args.append("-d")
else:
args.append("-g")
args.append(str(numGpus))
args.append("-s")
args.append(str(numSwitches))
if instantaneous:
args.append("-i")
elif testNames:
args.append("-r")
args.append(testNames)
else:
args.append("-r")
args.append("memory bandwidth")
return AppRunner(sys.executable, args)
## Helper for verifying inserted values
STANDALONE_VALUE_VERIFICATION_SCRIPT_NAME = "verify_field_value.py"
def verify_field_value(gpuId, fieldId, expectedValue, maxWait=2, checkInterval=0.1, numMatches=3):
"""
Verify that DCGM sees the expected value for the specified field ID. Waits a maximum of maxWait seconds to see
the given value.
- numMatches is the number of times the expected value must be seen before the verification is considered successful.
- checkInterval is the update interval for the given fieldId in seconds (this is also roughly the interval
at which the value test is performed).
Returns True on successful verifcation and False otherwise.
"""
interval_in_usec = int(checkInterval * 1000000)
fr = FieldReader(expectedValue, numMatches, fieldIds=[fieldId], updateFrequency=interval_in_usec, gpuIds=[gpuId])
start = time.time()
while (time.time() - start) < maxWait:
fr.Process()
if fr.passed:
return True
time.sleep(checkInterval)
# If we were unable to see the expected value, log how many times we did see it before failing
logger.info("Saw expected value %s (for field %s) %s times" % (expectedValue, fieldId, fr.numMatchesSeen))
return False
# Verifies that the NVVS process is running. This is used to make tests more deterministic instead of sleeping
# and hoping that the NVVS process has started at the end of the sleep
def check_nvvs_process(want_running, delay=0.5, attempts=20):
"""
Checks status of nvvs process.
If want_running is True, method returns True if nvvs is running.
If want_running is False, method returns True if nvvs is NOT running.
"""
retry_count = 0
debug_output = ''
while retry_count < attempts:
retry_count += 1
time.sleep(delay) # delay for a bit before trying again
try:
# If pgrep is run via a shell, there will be extraneous output caused by the shell command itself
debug_output = subprocess.check_output(["pgrep", "-l", "-f", "apps/nvvs/nvvs"])
if want_running:
return True, debug_output
except subprocess.CalledProcessError:
if not want_running:
return True, debug_output
return False, debug_output
| DCGM-master | testing/python3/dcgm_internal_helpers.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import datetime
import inspect
import ctypes
import platform
import string
import os
import sys
import argparse
import _thread
import threading
import test_utils
import utils
import dcgm_structs
import dcgm_structs_internal
import dcgm_agent
import dcgm_agent_internal
import dcgmvalue
import dcgm_fields
import DcgmiDiag
import nvidia_smi_utils
import option_parser
import subprocess
import shlex
import pydcgm
import apps
import logger
import json
from datetime import date, timedelta
from subprocess import Popen, PIPE, STDOUT, check_output, check_call, CalledProcessError
import version
# Initializing logger
logger.log_dir = os.path.join(os.getcwd(), "_out_runLogs")
script_dir = os.path.realpath(sys.path[0])
# Global variables
DEFAULT_POWER_LIMIT = 0
MAX_POWER_LIMIT = 1
MIN_POWER_LIMIT = 2
TOTAL_TEST_PASSED = 0
TOTAL_TEST_FAILED = 0
TOTAL_TEST_WAIVED = 0
TOTAL_TEST_COUNT = 0
TOTAL_TEST_CYCLES = 0
def get_dcgmi_bin_directory():
"""
Function to return the directory where dcgmi is expected
Example: apps/amd64
"""
path = ""
if platform.machine() == 'x86_64' and platform.system() == 'Linux':
path = "apps/amd64"
elif platform.machine() == 'aarch64' and platform.system() == 'Linux':
path = "apps/aarch64"
elif platform.machine() == 'ppc64le' and platform.system() == 'Linux':
path = "apps/ppc64le"
else:
print("Unsupported platform. Please modify get_dcgmi_bin_directory()")
sys.exit(1)
return path
def get_dcgmi_bin_path():
"""
Function to figure out what dcgmi binary to use based on the platform
"""
# Including future supported architectures
return get_dcgmi_bin_directory() + "/dcgmi"
dcgmi_absolute_path = os.path.join(script_dir, get_dcgmi_bin_path())
def get_newest_field_group_id(dcgmSystem):
field_group_id = ""
maxFieldGroupId = None
maxFieldGroupName = ""
fieldGroups = dcgmSystem.GetAllFieldGroups()
assert fieldGroups.numFieldGroups > 0
for idx in range(0, fieldGroups.numFieldGroups):
fieldGroup = fieldGroups.fieldGroups[idx]
if maxFieldGroupId is None or fieldGroup.fieldGroupId > maxFieldGroupId:
maxFieldGroupId = fieldGroup.fieldGroupId
maxFieldGroupName = fieldGroup.fieldGroupName
print("Most recent field group is ID %d, name %s" % (maxFieldGroupId, maxFieldGroupName))
return maxFieldGroupId
def updateTestResults(result):
"""
Helper to function to update test results count
"""
global TOTAL_TEST_PASSED
global TOTAL_TEST_FAILED
global TOTAL_TEST_WAIVED
global TOTAL_TEST_COUNT
global TOTAL_TEST_CYCLES
if "PASSED" in result:
TOTAL_TEST_PASSED += 1
elif "FAILED" in result:
TOTAL_TEST_FAILED += 1
elif "WAIVED" in result:
TOTAL_TEST_WAIVED += 1
elif "COUNT" in result:
TOTAL_TEST_COUNT += 1
elif "CYCLE" in result:
TOTAL_TEST_CYCLES += 1
def getTestSummary():
"""
Function to print out final results
"""
print("\n============ SUMMARY TEST RESULTS ============\n")
print("Total number of Tests PASS: %d " % TOTAL_TEST_PASSED)
print("Total number of Tests FAILED: %d " % TOTAL_TEST_FAILED)
print("Total number of Tests WAIVED: %d " % TOTAL_TEST_WAIVED)
print("Total number of Tests: %d " % TOTAL_TEST_COUNT)
print("Total number of Cycles: %d " % TOTAL_TEST_CYCLES)
print("\n===============================================\n")
def setupEnvironment():
"""
Function to prepare the test environment
"""
# Set variable indicating we are running tests
os.environ['__DCGM_TESTING_FRAMEWORK_ACTIVE'] = '1'
# Verify if GPUs are free before running the tests
if not nvidia_smi_utils.are_gpus_free():
print("Some GPUs are in use, please make sure that GPUs are free and try again")
sys.exit(1)
if test_utils.is_framework_compatible() == False:
print("burn_in_stress.py found to be a different version than DCGM. Exiting")
sys.exit(1)
else:
print(("Running against Git Commit %s" % version.GIT_COMMIT))
test_utils.set_nvvs_bin_path()
# Collects the output of "nvidia-smi -q" and prints it out on the screen for debugging
print("\n###################### NVSMI OUTPUT FOR DEBUGGING ONLY ##########################")
(message, error) = nvidia_smi_utils.get_output()
if message:
print(message)
if error:
print(error)
print("\n###################### NVSMI OUTPUT FOR DEBUGGING ONLY ##########################\n\n")
print("---------> Enabling persistence mode <------------")
# Enable persistence mode or the tests will fail
(message, error) = nvidia_smi_utils.enable_persistence_mode()
if message:
print(message)
if error:
print(error)
sys.exit(1)
# class to enable printing different colors for terminal output
class bcolors:
PURPLE = '\033[95m' # purple
BLUE = '\033[94m' # blue
GREEN = '\033[92m' # green
YELLOW = '\033[93m' # yellow
RED = '\033[91m' # red
ENDC = '\033[0m' # ends coloring from this point on
BOLD = '\033[1m' # bold
UNDERLINE = '\033[4m' # underline
"""
Usage:
print bcolors.YELLOW + "Warning: No active frommets remain. Continue?" + bcolors.ENDC
The above would print a nice yellow warning
"""
class Logger(object):
def __init__(self):
import socket
self.terminal = sys.stdout
self.timestamp = str(time.strftime('%Y-%m-%d'))
self.logName = "DCGM-BURN-IN_%s_%s.log" % (socket.gethostname(), self.timestamp)
# Early attempt to clean up the old log file
if os.path.exists(self.logName):
try:
os.remove(self.logName)
except IOError:
print("\nUnable to remove older logs file.\n")
self.log = open(self.logName, 'a+')
def write(self, message):
self.terminal.write(message)
self.log.write(message)
def flush(self):
self.terminal.flush()
self.log.flush()
class RunHostEngine(apps.NvHostEngineApp):
def __init__(self, writeDebugFile=False):
self.timestamp = str(time.strftime('%Y-%m-%d'))
self.memlog = "HOST_ENGINE_MEMORY_USAGE_%s.log" % self.timestamp
self.cpulog = "HOST_ENGINE_CPU_USAGE_%s.log" % self.timestamp
super(RunHostEngine, self).__init__()
self.writeDebugFile = writeDebugFile
def mem_usage(self, timeout):
"""
Monitors memory usage of the hostEngine
"""
pid = self.getpid()
vmem = []
rmem = []
hm = open(self.memlog, "a")
loopTime = 3 # Seconds between loops
timeout_start = time.time()
while time.time() < timeout_start + float(timeout):
filename = "/proc/%d/status" % pid
try:
fp = open(filename)
lines = fp.readlines()
except IOError as e:
print("Unable to read process file for pid %d: %d msg=%s fn=%s" % (pid, e.errno, e.message, e.filename))
time.sleep(loopTime)
continue
for line in lines:
if "VmPeak" in line:
vmem.append(float(line.split()[1])/1024)
if "VmRSS" in line:
rmem.append(float(line.split()[1])/1024)
if len(vmem) < 1 or len(rmem) < 1:
print("VmPeak or VmRSS not found in %d lines of %s" % (len(lines), filename))
time.sleep(loopTime)
continue
virtual_min = min(vmem)
virtual_max = max(vmem)
virtual_avg = sum(vmem) / len(vmem)
resident_min = min(rmem)
resident_max = max(rmem)
resident_avg = sum(rmem) / len(rmem)
hm.write("%s\n" % time.asctime())
hm.write("Virtual Memory info in MB, Min: %.4f, Max: %.4f, Avg: %.4f\n" % (virtual_min, virtual_max, virtual_avg))
hm.write("Resident Memory info in MB, Min: %.4f, Max: %.4f, Avg: %.4f\n" % (resident_min, resident_max, resident_avg))
hm.write("\n.........................................................\n\n")
time.sleep(loopTime)
hm.close()
def cpu_usage(self, timeout):
"""
Monitors cpu usage of the hostEngine
"""
pid = self.getpid()
cpu = []
hc = open(self.cpulog, "a")
timeout_start = time.time()
while time.time() < timeout_start + float(timeout):
cmd = check_output(shlex.split("ps -p %d -o %%cpu" % pid)).decode('utf-8')
cpu.append(float(cmd.split("\n")[1]))
time.sleep(3)
cpu_min = min(cpu)
cpu_max = max(cpu)
cpu_avg = sum(cpu) / len(cpu)
hc.write("%s\n" % time.asctime())
hc.write("CPU %% used for HostEngine, Min: %.4f, Max: %.4f, Avg: %.4f\n" % (cpu_min, cpu_max, cpu_avg))
hc.write("\n.........................................................\n\n")
hc.close()
class BurnInHandle(object):
"""
This class is used to communicate with the host engine from the burn-in tests
hostEngineIp is the IP address of the running host engine. None=start embedded.
burnInCfg is the parsed command-line parameters. Note that we're not using the IP address from these
"""
def __init__(self, hostEngineIp, burnInCfg):
self.dcgmHandle = None
self.dcgmSystem = None
self.burnInCfg = burnInCfg
self.hostEngineIp = hostEngineIp
self.Connect()
def Connect(self):
self.dcgmHandle = pydcgm.DcgmHandle(ipAddress=self.hostEngineIp, opMode=dcgm_structs.DCGM_OPERATION_MODE_AUTO)
self.dcgmSystem = self.dcgmHandle.GetSystem()
def __del__(self):
if self.dcgmSystem is not None:
del(self.dcgmSystem)
self.dcgmSystem = None
if self.dcgmHandle is not None:
del(self.dcgmHandle)
self.dcgmHandle = None
def GetGpuIds(self):
"""
Function to get a list of all GPU IDs. We are looking at the default group's GPU IDs rather than
dcgmGetAllDevices()'s since the default group includes GPU IDs DCGM cares about
"""
dcgmGroup = self.dcgmSystem.GetDefaultGroup()
groupGpuIds = dcgmGroup.GetGpuIds()
assert len(groupGpuIds) > 0, "DCGM doesn't see any enabled GPUs. Set __DCGM_WL_BYPASS=1 in your environment to bypass DCGM's allowlist"
# See if the user provided the GPU IDs they care about. If so, return those
if len(self.burnInCfg.onlyGpuIds) < 1:
return groupGpuIds
for gpuId in self.burnInCfg.onlyGpuIds:
assert(gpuId in groupGpuIds), "User-specified GPU ID %d is not known by the system. System GPU IDs: %s" % (gpuId, str(groupGpuIds))
return self.burnInCfg.onlyGpuIds
def GetGpuAttributes(self, gpuIds=None):
"""
Get an array of dcgm_structs.c_dcgmDeviceAttributes_v3 entries for the passed in gpuIds. None=all devices DCGM knows about
"""
retList = []
if gpuIds is None:
gpuIds = self.GetGpuIds()
for gpuId in gpuIds:
retList.append(self.dcgmSystem.discovery.GetGpuAttributes(gpuId))
return retList
def GetBusIds(self):
"""
Get the PCI-E Bus IDs of the attached devices.
Returns an array of strings like ["0:1;0", "0:2:0"]
"""
busIdList = []
attributeList = self.GetGpuAttributes()
for attributeElem in attributeList:
busIdList.append(attributeElem.identifiers.pciBusId)
return busIdList
def GetValueForFieldId(self, gpuId, fieldId):
'''
Watch and get the value of a fieldId
Returns a dcgmFieldValue_v1 instance
'''
dcgm_agent_internal.dcgmWatchFieldValue(self.dcgmHandle.handle, gpuId, fieldId, 60000000, 3600.0, 0)
self.dcgmSystem.UpdateAllFields(1)
values = dcgm_agent_internal.dcgmGetLatestValuesForFields(self.dcgmHandle.handle, gpuId, [fieldId, ])
return values[0]
def GpuSupportsEcc(self, gpuId):
'''
Returns whether (True) or not (False) a gpu supports ECC
'''
value = self.GetValueForFieldId(gpuId, dcgm_fields.DCGM_FI_DEV_ECC_CURRENT)
if dcgmvalue.DCGM_INT64_IS_BLANK(value.value.i64):
return False
else:
return True
def GetGpuIdsGroupedBySku(self):
'''
Get the GPU IDs DCGM supports, grouped by SKU. The return value will be a list of lists like:
[[gpu0, gpu1], [gpu2, gpu3]]
In the above example, gpu0 and gpu1 are the same sku, and gpu2 and gpu3 are the same sku
'''
gpuIds = self.GetGpuIds()
listBySku = test_utils.group_gpu_ids_by_sku(self.dcgmHandle.handle, gpuIds)
return listBySku
class RunCudaCtxCreate:
def __init__(self, gpuIds, burnInHandle, runTimeSeconds, timeoutSeconds):
self.burnInHandle = burnInHandle
self.runTimeSeconds = runTimeSeconds
self.timeoutSeconds = timeoutSeconds
# Gets gpuId list
# gpuIds = self.burnInHandle.GetGpuIds()
self._apps = []
if gpuIds is None or len(gpuIds) < 1:
gpuIds = self.burnInHandle.GetGpuIds()
deviceAttribs = self.burnInHandle.GetGpuAttributes(gpuIds)
for deviceAttrib in deviceAttribs:
busId = deviceAttrib.identifiers.pciBusId
assert len(busId) > 0, ("Failed to get busId for device %d" % deviceAttrib.identifiers.gpuId)
args = ["--ctxCreate", busId, "--busyGpu", busId, str(self.runTimeSeconds * 1000)]
app = apps.CudaCtxCreateAdvancedApp(args)
app.busId = busId
self._apps.append(app)
def start(self):
for app in self._apps:
print("Generating Cuda Workload for GPU %s " % app.busId + " at %s \n" % time.asctime())
app.start(timeout=self.timeoutSeconds)
def wait(self):
for app in self._apps:
app.wait()
def terminate(self):
for app in self._apps:
app.terminate()
app.validate()
def getpids(self):
pids = []
for app in self._apps:
pids.append(app.getpid())
return pids
def get_host_ip(burnInCfg):
if burnInCfg.remote:
return burnInCfg.srv
return "127.0.0.1"
# Helper Class to do group operations
class GroupsOperationsHelper:
def __init__(self, burnInHandle):
self.group_id = None
self.burnInHandle = burnInHandle
self.dcgmi_path = get_dcgmi_bin_path()
self.host_ip = get_host_ip(self.burnInHandle.burnInCfg)
def __safe_checkcall(self, args):
try:
check_call([self.dcgmi_path]+args)
except CalledProcessError:
pass
return True
def __safe_checkoutput(self, args):
rc = None
try:
rc = check_output([self.dcgmi_path]+args)
except CalledProcessError:
pass
return rc
def create_group(self, groupName, gpuIds):
if groupName is None:
groupName = "Group"
args = ["group", "--host", self.host_ip, "-c", groupName]
output = self.__safe_checkoutput(args)
self.group_id = int(output.strip().split()[-1]) if output is not None else -1
for gpuId in gpuIds:
self.add_device(gpuId)
return self.group_id
def list_group(self):
args = ["group", "--host", self.host_ip, "-l"]
self.__safe_checkcall(args)
def get_group_info(self):
assert self.group_id is not None
args = ["group", "--host", self.host_ip, "-g", str(self.group_id), "-i"]
return self.__safe_checkcall(args)
def add_device(self, gpuId):
assert self.group_id is not None
args = ["group", "--host", self.host_ip, "-g", str(self.group_id), "-a", str(gpuId)]
return self.__safe_checkcall(args)
def remove_device(self, gpuId):
assert self.group_id is not None
args = ["group", "--host", self.host_ip, "-g", str(self.group_id), "-r", str(gpuId)]
return self.__safe_checkcall(args)
def delete_group(self):
assert self.group_id is not None
args = ["group", "--host", self.host_ip, "-d", str(self.group_id)]
return self.__safe_checkcall(args)
# Run the GROUPS subsystem tests
class GroupTests:
def __init__(self, burnInHandle):
self.groups_op = GroupsOperationsHelper(burnInHandle)
self.group_id = None
self.burnInHandle = burnInHandle
self.host_ip = get_host_ip(self.burnInHandle.burnInCfg)
print("The hostEngine IP is %s\n" % self.host_ip)
def test1_create_group(self, gpuIds):
""" Test to create groups using the subsystem groups """
#Intentionally create the group as empty so that test5_add_device_to_group doesn't fail
self.group_id = self.groups_op.create_group(None, [])
print("Creating a default test group: %s" % self.group_id)
return self.group_id > 0
def test2_list_group(self, gpuIds):
""" Test to list existing groups using the subsystem groups """
args = ["group", "--host", self.host_ip, "-l"]
print("Listing the test group: %s" % args)
return args
def test3_get_group_info(self, gpuIds):
""" Test to get info about exiting groups using the subsystem groups """
args = ["group", "--host", self.host_ip, "-g", str(self.group_id), "-i"]
print("Showing info about the test group: %s" % args)
return args
def test5_remove_device_from_group(self, gpuIds):
""" Test to remove a GPU from a group using the subsystem groups """
# Removes a GPU from the test group
args = ["group", "--host", self.host_ip, "-g", str(self.group_id), "-r", str(gpuIds[0])]
print("Removing GPU %s from the test group: %s" % (gpuIds[0], args))
return args
def test4_add_device_to_group(self, gpuIds):
""" Test to add a GPU to a groups using the subsystem groups """
args = ["group", "--host", self.host_ip, "-g", str(self.group_id), "-a", str(gpuIds[0])]
print("Adding GPU %s to the test group: %s" % (gpuIds[0], args))
return args
def test6_delete_group(self, gpuIds):
""" Test deleting existing groups using the subsystem groups """
print("Deleting the default test group: %s" % self.group_id)
return self.groups_op.delete_group()
# Run the CONFIG subsystem tests
class ConfigTests:
def __init__(self, burnInHandle):
self.groups_op = GroupsOperationsHelper(burnInHandle)
self.group_id = None
self.burnInHandle = burnInHandle
self.host_ip = get_host_ip(self.burnInHandle.burnInCfg)
def test1_create_group(self, gpuIds):
""" Creates a group for testing the subsystem config """
self.group_id = self.groups_op.create_group(None, gpuIds)
return self.group_id > 1
def _set_compute_mode_helper(self, gpuIds, comp):
""" Test --set compute mode values on "config" subsystem """
args = ["config", "--host", self.host_ip, "-g", str(self.group_id), "--set", "-c", comp]
print("Setting different compute modes: %s" % args)
return args
# Runs with each possible compute mode
@test_utils.run_only_if_mig_is_disabled()
def test2_set_compute_mode_value_0(self, gpuIds):
return self._set_compute_mode_helper(gpuIds, "2")
@test_utils.run_only_if_mig_is_disabled()
def test2_set_compute_mode_value_1(self, gpuIds):
return self._set_compute_mode_helper(gpuIds, "1")
@test_utils.run_only_if_mig_is_disabled()
def test2_set_compute_mode_value_2(self, gpuIds):
return self._set_compute_mode_helper(gpuIds, "0")
def get_power_power_limit(self, pLimitType, gpuIds):
"""
Helper function to get power limit for the device
"""
deviceAttrib = self.burnInHandle.dcgmSystem.discovery.GetGpuAttributes(gpuIds[0])
if pLimitType == DEFAULT_POWER_LIMIT:
pwrLimit = str(deviceAttrib.powerLimits.defaultPowerLimit)
elif pLimitType == MAX_POWER_LIMIT:
pwrLimit = str(deviceAttrib.powerLimits.maxPowerLimit)
elif pLimitType == MIN_POWER_LIMIT:
pwrLimit = str(deviceAttrib.powerLimits.minPowerLimit)
return pwrLimit
def _set_power_limit_helper(self, kind, pwr):
""" Test --set power limit values on "config" subsystem """
args = ["config", "--host", self.host_ip, "-g", str(self.group_id), "--set", "-P", pwr]
print("Setting %s power limit for devices: %s" % (kind, args))
return args
# Runs and tries to set the minimum power limit supported
def test3_set_min_power_limit(self, gpuIds):
defPowerLimit = self.get_power_power_limit(DEFAULT_POWER_LIMIT, gpuIds)
minPowerLimit = self.get_power_power_limit(MIN_POWER_LIMIT, gpuIds)
if defPowerLimit == minPowerLimit:
print("Only the default power limit is available for this device, skipping minimum power limit test")
RunDcgmi.print_test_footer(inspect.currentframe().f_code.co_name, "SKIPPED", bcolors.PURPLE)
return ""
else:
minPowerLimit = int(minPowerLimit) + 1 # +1 to address fractions lost in data type conversion
return self._set_power_limit_helper("minimum", str(minPowerLimit))
def test4_set_max_power_limit(self, gpuIds):
defPowerLimit = self.get_power_power_limit(DEFAULT_POWER_LIMIT, gpuIds)
maxPowerLimit = self.get_power_power_limit(MAX_POWER_LIMIT, gpuIds)
maxPowerLimit = int(maxPowerLimit) - 1 # -1 to address fractions lost in data type conversion
return self._set_power_limit_helper("maximum", str(maxPowerLimit))
def test5_set_default_power_limit(self, gpuIds):
defPowerLimit = self.get_power_power_limit(DEFAULT_POWER_LIMIT, gpuIds)
return self._set_power_limit_helper("default", defPowerLimit)
def _set_application_clocks_helper(self, mem, sm):
""" Test --set application clocks on "config" subsystem """
args = ["config", "--host", self.host_ip, "-g", str(self.group_id), "--set", "-a", "%s,%s" % (mem, sm)]
print("Setting application clocks values for \"mem,proc\": %s" % args)
return args
# Runs and tries to set the application clocks to 900W
def test6_set_application_clocks_values(self, gpuIds):
deviceAttrib = self.burnInHandle.dcgmSystem.discovery.GetGpuAttributes(gpuIds[0])
sm_clk = str(deviceAttrib.clockSets.clockSet[0].smClock)
mem_clk = str(deviceAttrib.clockSets.clockSet[0].memClock)
return self._set_application_clocks_helper(mem_clk, sm_clk)
def test7_enforce_values(self, gpuIds):
""" Test on "config" subsystem using the "enforce" operation """
# Trying to enforce previous "--set" configurations for each device
args = ["config", "--host", self.host_ip, "-g", str(self.group_id), "--enforce"]
print("Trying to enforce last configuration used via \"--set\": %s" % args)
time.sleep(1)
return args
def _set_sync_boost_helper(self, gpuIds, val):
""" Test --set syncboost on "config" subsystem """
args = ["config", "--host", self.host_ip, "-g", str(self.group_id), "--set", "-s", val]
print("Enable/Disable syncboost feature on device: %s" % args)
return args
#These tests aren't valid as long as we're running on one GPU at a time
#Runs and tries to enable and disable sync_boost
#def test8_set_sync_boost_value_0(self, dev): return self._set_sync_boost_helper(dev, "0")
#def test8_set_sync_boost_value_1(self, dev): return self._set_sync_boost_helper(dev, "1")
def _set_ecc_helper(self, gpuIds, val):
""" Test --set ecc on "config" subsystem """
for gpuId in gpuIds:
if not self.burnInHandle.GpuSupportsEcc(gpuId):
print("Skipping ECC tests for GPU %d that doesn't support ECC" % gpuId)
RunDcgmi.print_test_footer("ECC tests", "SKIPPED", bcolors.PURPLE)
return ""
args = ["config", "--host", self.host_ip, "-g", str(self.group_id), "--set", "-e", val]
print("Enable/Disable ecc on device: %s" % args)
return args
# Runs and tries to enable and disable ecc / DISABLED until GPU Reset issue can be resolved
# def test9_set_ecc_value_0(self, gpuIds): return self._set_ecc_helper(gpuIds, "0")
# def test9_set_ecc_value_1(self, gpuIds): return self._set_ecc_helper(gpuIds, "1")
def test11_get_values(self, gpuIds):
""" Test getting values on "config" subsystem using the get operation """
args = ["config", "--host", self.host_ip, "-g", str(self.group_id), "--get"]
print("Getting subsystem \"config\" information for a group: %s" % args)
time.sleep(1)
return args
def test12_delete_group(self, gpuIds):
""" Removes group used for testing """
return self.groups_op.delete_group()
# Run the DISCOVERY subsystem tests
class DiscoveryTests():
def __init__(self, burnInHandle):
self.groups_op = GroupsOperationsHelper(burnInHandle)
self.group_id = None
self.burnInHandle = burnInHandle
self.host_ip = get_host_ip(self.burnInHandle.burnInCfg)
def test1_create_group(self, gpuIds):
""" Creates a group for testing the subsystem discovery """
self.group_id = self.groups_op.create_group(None, gpuIds)
return self.group_id > 1
def test2_discovery_gpus(self, gpuIds):
""" Test to list existing GPUs """
args = ["discovery", "--host", self.host_ip, "-l"]
print("Querying existing gpus: %s" % args)
return args
def _discovery_device_info_helper(self, gpuIds, flag):
""" Test to get discovery info for device per feature """
args = ["discovery", "--host", self.host_ip, "-g", str(self.group_id), "-i", flag]
print("Querying info in group: %s" % args)
return args
def test3_discovery_device_info_a(self, gpuIds): return self._discovery_device_info_helper(gpuIds, "a")
def test3_discovery_device_info_p(self, gpuIds): return self._discovery_device_info_helper(gpuIds, "t")
def test3_discovery_device_info_t(self, gpuIds): return self._discovery_device_info_helper(gpuIds, "p")
def test3_discovery_device_info_c(self, gpuIds): return self._discovery_device_info_helper(gpuIds, "c")
def test4_discovery_device(self, gpuIds):
""" Test to discovery info of each GPU """
args = ["discovery", "--host", self.host_ip, "--gpuid", str(gpuIds[0]), "-i", "aptc"]
print("Querying info for GPU %s: %s" % (gpuIds[0], args))
return args
def test5_delete_group(self, gpuIds):
""" Removes group used for testing """
return self.groups_op.delete_group()
# Run the HEALTH subsystem tests
class HealthTests:
def __init__(self, burnInHandle):
self.groups_op = GroupsOperationsHelper(burnInHandle)
self.group_id = None
self.burnInHandle = burnInHandle
self.host_ip = get_host_ip(self.burnInHandle.burnInCfg)
def test1_create_group(self, gpuIds):
""" Creates a group for testing the subsystem health """
self.group_id = self.groups_op.create_group(None, gpuIds)
return self.group_id > 1
def _set_health_watches_helper(self, gpuIds, flag):
""" Test to set health watches """
args = ["health", "--host", self.host_ip, "-g", str(self.group_id), "--set", flag]
print("Setting health watch %s: %s" % (flag, args))
return args
def test2_health_set_watches_t(self, gpuIds): return self._set_health_watches_helper(gpuIds, "a")
def test2_health_set_watches_p(self, gpuIds): return self._set_health_watches_helper(gpuIds, "p")
def test2_health_set_watches_m(self, gpuIds): return self._set_health_watches_helper(gpuIds, "m")
def test2_health_set_watches_a(self, gpuIds): return self._set_health_watches_helper(gpuIds, "t")
def test2_health_set_watches_i(self, gpuIds): return self._set_health_watches_helper(gpuIds, "i")
def test2_health_set_watches_n(self, gpuIds): return self._set_health_watches_helper(gpuIds, "n")
def test3_heath_fetch_watchers_status(self, gpuIds):
""" Test to fetch watcher list """
args = ["health", "--host", self.host_ip, "-g", str(self.group_id), "--fetch"]
print("Fetching Health Watches: %s" % args)
return args
def test4_health_check(self, gpuIds):
""" Test to check the overall health """
args = ["health", "--host", self.host_ip, "-g", str(self.group_id), "--check"]
print("Checking overall health: %s" % args)
return args
def test5_health_clear_watches(self, gpuIds):
""" Test to clear all watches """
args = ["health", "--host", self.host_ip, "-g", str(self.group_id), "--clear"]
print("Clearing all Health Watches: %s" % args)
return args
def test6_delete_group(self, gpuIds):
""" Removes group used for testing """
return self.groups_op.delete_group()
# Runs the DIAG subsystem tests
class DiagnosticsTests:
def __init__(self, burnInHandle):
self.groups_op = GroupsOperationsHelper(burnInHandle)
self.group_id = None
self.burnInHandle = burnInHandle
self.host_ip = get_host_ip(self.burnInHandle.burnInCfg)
def test1_create_group(self, gpuIds):
""" Creates a group for testing the subsystem diag """
self.group_id = self.groups_op.create_group(None, gpuIds)
return self.group_id > 1
def _set_diag_helper(self, gpuIds, flag):
""" Test to run diag tests """
args = ["diag", "--host", self.host_ip, "-g", str(self.group_id), "--run", flag]
print("Running Diagnostic Test : %s" % args)
return args
def test2_diag1_short(self, gpuIds): return self._set_diag_helper(gpuIds, "1")
def test2_diag2_medium(self, gpuIds): return self._set_diag_helper(gpuIds, "2")
def test2_diag3_long(self, gpuIds): return self._set_diag_helper(gpuIds, "3")
def test3_delete_group(self, gpuIds):
""" Removes group used for testing """
return self.groups_op.delete_group()
# Run the TOPO subsystem tests
class TopologyTests:
def __init__(self, burnInHandle):
self.groups_op = GroupsOperationsHelper(burnInHandle)
self.group_id = None
self.burnInHandle = burnInHandle
self.host_ip = get_host_ip(self.burnInHandle.burnInCfg)
def test1_create_group(self, gpuIds):
""" Creates a group for testing the subsystem diag """
self.group_id = self.groups_op.create_group(None, gpuIds)
return self.group_id > 1
def test2_query_topology_by_groupId(self, gpuIds):
""" Test to read topology by group id """
args = ["topo", "--host", self.host_ip, "-g", str(self.group_id)]
print("Reading topology by group Id")
return args
def test3_query_topology_by_gpuId(self, gpuIds):
""" Test to read topology by gpu id """
args = ["topo", "--host", self.host_ip, "--gpuid", str(gpuIds[0])]
print("Reading topology by GPU Id %s: %s" % (gpuIds[0], args))
return args
def test4_delete_group(self, dev):
""" Removes group used for testing """
return self.groups_op.delete_group()
# Run the POLICY subsystem tests
class PolicyTests:
def __init__(self, burnInHandle):
self.groups_op = GroupsOperationsHelper(burnInHandle)
self.group_id = None
self.burnInHandle = burnInHandle
self.host_ip = get_host_ip(self.burnInHandle.burnInCfg)
def test1_create_group(self, gpuIds):
""" Creates a group for testing the subsystem policy """
self.group_id = self.groups_op.create_group(None, gpuIds)
return self.group_id > 1
def test2_get_current_policy_violation_by_groupId(self, gpuIds):
""" Get the current violation policy by group id """
args = ["policy", "--host", self.host_ip, "-g", str(self.group_id), "--get"]
print("Getting current violation policy list: %s " % args)
return args
def test3_set_pcierrors_policy_violation(self, gpuIds):
""" Set the policy violation by group id """
actions = ["0","1"] # 0->None, 1->GPU Reset
validations = ["0","1","2","3"] # 0->None, 1-> NVVS(short), 2-> NVVS(medium), 3-> NVVS(long)
for action in actions:
for val in validations:
args = ["policy", "--host", self.host_ip, "-g", str(self.group_id), \
"--set", "%s,%s" % (action,val), "-p"]
print("Setting PCI errors sviolation policy list action %s, validation %s: %s " % (action, val, args))
return args
def test4_set_eccerrors_policy_violation(self, gpuIds):
""" Set the policy violation by group id """
actions = ["0","1"] # 0->None, 1->GPU Reset
validations = ["0","1","2","3"] # 0->None, 1-> NVVS(short), 2-> NVVS(medium), 3-> NVVS(long)
for action in actions:
for val in validations:
args = ["policy", "--host", self.host_ip, "-g", str(self.group_id), \
"--set", "%s,%s" % (action,val), "-e"]
print("Setting ECC errors violation policy list action %s, validation %s: %s " % (action, val, args))
return args
def test5_set_power_temperature_values_policy(self, gpuIds):
""" Get the violation policy for all devices at once """
deviceAttrib = self.burnInHandle.dcgmSystem.discovery.GetGpuAttributes(gpuIds[0])
max_temp = str(deviceAttrib.thermalSettings.slowdownTemp)
max_pwr = str(deviceAttrib.powerLimits.maxPowerLimit)
max_pages = str(60)
args = ["policy", "--host", self.host_ip, "-g", str(self.group_id), "--set", "1,1", "-T", max_temp, "-P", max_pwr, "-M", max_pages]
print("Setting max power and max temperature for policy list: %s " % args)
return args
def test6_get_detailed_policy_violation(self, gpuIds):
""" Get the violation policy for all devices at once """
args = ["policy", "--host", self.host_ip, "-g", str(self.group_id), "--get", "-v"]
print("Getting detailed violation policy list: %s " % args)
return args
def test7_clear__policy_values(self, gpuIds):
""" Get the violation policy for all devices at once """
args = ["policy", "--host", self.host_ip, "-g", str(self.group_id), "--clear"]
print("Clearing settings for policy list: %s " % args)
return args
def test8_delete_group(self, gpuIds):
""" Removes group used for testing """
return self.groups_op.delete_group()
# Run the STATS subsystem tests
class ProcessStatsTests:
def __init__(self, burnInHandle):
self.groups_op = GroupsOperationsHelper(burnInHandle)
self.group_id = None
self.burnInHandle = burnInHandle
self.host_ip = get_host_ip(self.burnInHandle.burnInCfg)
def test1_create_group(self, gpuIds):
""" Creates a group for testing the subsystem diag """
self.gpuIds = gpuIds
self.group_id = self.groups_op.create_group(None, self.gpuIds)
return self.group_id > 1
@test_utils.run_only_if_mig_is_disabled()
def test2_enable_system_watches(self, gpuIds):
""" Enable watches for process stats """
args = ["stats", "--host", self.host_ip, "-g", str(self.group_id), "--enable"]
print("Enabling system watches for process stats: %s" % args)
return args
@test_utils.run_only_if_mig_is_disabled()
def test3_get_pid_stats(self, gpuIds):
""" Gets the process stats using the stats subsystem """
app = RunCudaCtxCreate(self.gpuIds, self.burnInHandle, runTimeSeconds=10, timeoutSeconds=(20 * len(gpuIds)))
app.start()
pids = app.getpids()
for pid in pids:
stats_msg = "--> Generating Data for Process Stats - PID %d <--" % pid
updateTestResults("CYCLE")
print(bcolors.PURPLE + stats_msg + bcolors.ENDC)
args = ["stats", "--host", self.host_ip, "-g", str(self.group_id), "--pid", str(pid), "-v"]
print("Collecting process stats information: %s" % args)
sys.stdout.flush()
app.wait()
app.terminate()
return args
@test_utils.run_only_if_mig_is_disabled()
def test4_disable_system_watches(self, gpuIds):
""" Enable watches for process stats """
args = ["stats", "--host", self.host_ip, "-g", str(self.group_id), "--disable"]
print("Disabling system watches for process stats: %s" % args)
return args
def test5_delete_group(self, gpuIds):
""" Removes group used for testing """
return self.groups_op.delete_group()
# Run the NVLINK subsystem tests
class NvlinkTests:
def __init__(self, burnInHandle):
self.groups_op = GroupsOperationsHelper(burnInHandle)
self.group_id = None
self.burnInHandle = burnInHandle
self.host_ip = get_host_ip(self.burnInHandle.burnInCfg)
def test1_create_group(self, gpuIds):
""" Creates a group for testing the subsystem nvlink """
self.group_id = self.groups_op.create_group(None, gpuIds)
return self.group_id > 1
def test2_check_nvlink_status(self, gpuIds):
""" Gets the nvlink error counts for various links and GPUs on the system """
args = ["nvlink", "--host", self.host_ip, "-g", str(self.group_id), "-s"]
print("Reporting current nvlink status")
return args
def test3_query_nvlink_errors(self, gpuIds):
for gpuId in gpuIds:
args = ["nvlink", "--host", self.host_ip, "-g", str(gpuId), "-e"]
print("Running queries to get errors for various nvlinks on the system: %s" % args)
return args
def test4_query_nvlink_errors_json_output(self, gpuIds):
for gpuId in gpuIds:
args = ["nvlink", "--host", self.host_ip, "-g", str(gpuId), "-e", "-j"]
print("Running queries to get errors for various nvlinks on the system in Json format: %s" % args)
return args
def test5_delete_group(self, gpuIds):
""" Removes group used for testing """
return self.groups_op.delete_group()
# Run the Introspection subsystem tests
class IntrospectionTests:
def __init__(self, burnInHandle):
self.burnInHandle = burnInHandle
self.host_ip = get_host_ip(self.burnInHandle.burnInCfg)
def test1_show_hostengine_stats(self, gpuIds):
""" Prints out hostengine statistics """
args = ["introspect", "--host", self.host_ip, "--show", "--hostengine"]
print("Showing introspection information for hostengine: %s" % args)
return args
# Run the Fieldgroups subsystem tests
class FieldGroupsTests():
def __init__(self, burnInHandle):
self.groups_op = GroupsOperationsHelper(burnInHandle)
self.group_id = None
self.burnInHandle = burnInHandle
self.host_ip = get_host_ip(self.burnInHandle.burnInCfg)
self.dcgmHandle = pydcgm.DcgmHandle(ipAddress=self.host_ip)
self.dcgmSystem = self.dcgmHandle.GetSystem()
self.numFieldGroupsAdded = 0
def test1_create_group(self, gpuIds):
self.group_id = self.groups_op.create_group(None, gpuIds)
return self.group_id > 1
def test2_create_fieldgroup(self, gpuIds):
# Get the field IDs of all of the field groups that exist so far. This is assumed to be the default groups
# DCGM_INTERNAL_30SEC, HOURLY, JOB...etc.
allFieldIds = []
fieldGroups = self.dcgmSystem.GetAllFieldGroups()
for fieldGroup in fieldGroups.fieldGroups[:fieldGroups.numFieldGroups]:
allFieldIds.extend(fieldGroup.fieldIds[:fieldGroup.numFieldIds])
print("Found %d fieldIds in %d field groups" % (len(allFieldIds), fieldGroups.numFieldGroups))
#Make sure we only use as many as what the API can handle
if len(allFieldIds) > dcgm_structs.DCGM_MAX_FIELD_IDS_PER_FIELD_GROUP:
allFieldIds = allFieldIds[:dcgm_structs.DCGM_MAX_FIELD_IDS_PER_FIELD_GROUP-1]
allFieldsStr = ",".join(map(str,allFieldIds))
fieldGroupName = "testFg_%d" % self.numFieldGroupsAdded
args = ["fieldgroup", "--host", self.host_ip, "-c", fieldGroupName, "-f", "%s" % allFieldsStr]
self.numFieldGroupsAdded += 1
print("Creating a field groups: %s" % args)
return args
def test3_list_existing_field_groups(self, gpuIds):
args = ["fieldgroup", "--host", self.host_ip, "-l"]
print("Listing available field IDs: %s" % args)
return args
def test4_list_json_format_field_groups(self, gpuIds):
args = ["fieldgroup", "--host", self.host_ip, "-l", "-j"]
print("Listing available field IDs in Json format: %s" % args)
return args
def test5_get_fieldgroup_info(self, gpuIds):
fieldGroupId = get_newest_field_group_id(self.dcgmSystem)
args = ["fieldgroup", "--host", self.host_ip, "-i", "-g", "%d" % fieldGroupId]
print("Listing field ID information: %s" % args)
return args
def test6_delete_fieldgroup(self, gpuIds):
fieldGroupId = get_newest_field_group_id(self.dcgmSystem)
args = ["fieldgroup", "--host", self.host_ip, "-d", "-g", "%d" % fieldGroupId]
print("Deleting a field group: %s" % args)
return args
def test7_delete_group(self, gpuIds):
""" Removes group used for testing """
return self.groups_op.delete_group()
# Run the Modules subsystem tests
class ModulesTests():
def __init__(self, burnInHandle):
self.groups_op = GroupsOperationsHelper(burnInHandle)
self.burnInHandle = burnInHandle
self.host_ip = get_host_ip(self.burnInHandle.burnInCfg)
def test1_create_group(self, gpuIds):
self.group_id = self.groups_op.create_group(None, gpuIds)
return self.group_id > 1
def test2_list_modules(self, gpuIds):
args = ["modules", "--host", self.host_ip, "-l"]
print("Listing existing modules: %s" % args)
return args
def test3_list_modules_json(self, gpuIds):
args = ["modules", "--host", self.host_ip, "-l", "-j"]
print("Listing existing modules Json format: %s" % args)
return args
def test4_delete_group(self, gpuIds):
""" Removes group used for testing """
return self.groups_op.delete_group()
# Run the dmon subsystem tests
class DmonTests:
def __init__(self, burnInHandle):
self.groups_op = GroupsOperationsHelper(burnInHandle)
self.group_id = None
self.burnInHandle = burnInHandle
self.host_ip = get_host_ip(self.burnInHandle.burnInCfg)
self.dcgmHandle = pydcgm.DcgmHandle(ipAddress=self.host_ip)
self.dcgmSystem = self.dcgmHandle.GetSystem()
def test1_create_group(self, gpuIds):
""" Creates a group for testing the subsystem dmon """
self.group_id = self.groups_op.create_group(None, gpuIds)
return self.group_id > 1
def test2_list_long_short_name_field_ids_dmon(self, gpuIds):
""" Prints out list of available items for dmon"""
args = ["dmon", "--host", self.host_ip, "-l"]
print("Listing available items for dmon to monitor: %s" % args)
return args
def test3_dmon_with_various_field_ids_per_device(self, gpuIds):
""" Runs dmon to get field group info for each device """
print_header = False
fieldIdsStr = "%d,%d" % (dcgm_fields.DCGM_FI_DEV_SM_CLOCK, dcgm_fields.DCGM_FI_DEV_MEM_CLOCK)
for gpuId in gpuIds:
cmd = "%s dmon --host %s -i %s -e %s -c 10 -d 100" % (dcgmi_absolute_path, self.host_ip, str(gpuId), fieldIdsStr)
if print_header:
RunDcgmi.print_test_header(inspect.currentframe().f_code.co_name)
print("Running dmon on a single GPU with field ids %s: %s" % (fieldIdsStr, shlex.split(cmd)))
try:
check_output(cmd, shell=True)
RunDcgmi.print_test_footer(inspect.currentframe().f_code.co_name, "PASSED", bcolors.BLUE)
updateTestResults("PASSED")
updateTestResults("COUNT")
except CalledProcessError:
print("Failed to get dmon data for GPU %s " % str(gpuId))
RunDcgmi.print_test_footer(inspect.currentframe().f_code.co_name, "FAILED", bcolors.RED)
updateTestResults("FAILED")
updateTestResults("COUNT")
except Exception:
print("Unexpected exception %s " % str(gpuId))
RunDcgmi.print_test_footer(inspect.currentframe().f_code.co_name, "FAILED", bcolors.RED)
updateTestResults("FAILED")
updateTestResults("COUNT")
print_header = True
return ""
def test4_dmon_field_group_dcgm_internal_30sec(self, gpuIds):
args = ["dmon", "--host", self.host_ip, "-f", "1", "-c", "10", "-d", "100"]
print("Running dmon on a group to monitor data on field group DCGM_INTERNAL_30SEC: %s" % args)
return args
def test5_dmon_field_group_dcgm_internal_hourly(self, gpuIds):
"dcgmi dmon -f 3 -c 10 -d 100"
args = ["dmon", "--host", self.host_ip, "-f", "2", "-c", "10", "-d", "100"]
print("Running dmon on a group to monitor data on field group DCGM_INTERNAL_HOURLY: %s" % args)
return args
def test6_dmon_field_group_dcgm_internal_job(self, gpuIds):
"dcgmi dmon -f 3 -c 10 -d 100"
args = ["dmon", "--host", self.host_ip, "-f", "3", "-c", "10", "-d", "100"]
print("Running dmon on a group to monitor data on field group DCGM_INTERNAL_JOB: %s" % args)
return args
def test5_delete_group(self, gpuIds):
""" Removes group used for testing """
return self.groups_op.delete_group()
def IsDiagTest(testname):
if testname == 'test2_diag1_short':
return 1
elif testname == 'test2_diag2_medium':
return 2
elif testname == 'test2_diag3_long':
return 3
return 0
class RunDcgmi():
"""
Class to launch an instance of the dcgmi client for each gpu
and control its execution
"""
forbidden_strings = [
# None of this error codes should be ever printed by dcgmi
"Unknown Error",
"Uninitialized",
"Invalid Argument",
"Already Initialized",
"Insufficient Size",
"Driver Not Loaded",
"Timeout",
"DCGM Shared Library Not Found",
"Function Not Found",
"(null)", # e.g. from printing %s from null ptr
]
def __init__(self, burnInHandle):
self.burnInHandle = burnInHandle
self.dcgmi_path = get_dcgmi_bin_path()
self.timestamp = str(time.strftime('%Y-%m-%d'))
self._timer = None # to implement timeout
self._subprocess = None
self._retvalue = None # stored return code or string when the app was terminated
self._lock = threading.Lock() # to implement thread safe timeout/terminate
self.log = open('DCGMI-CLIENT_%s.log' % self.timestamp, 'a+')
self.stdout_lines = []
self.stderr_lines = []
self.group_tests = GroupTests(burnInHandle)
self.config_tests = ConfigTests(burnInHandle)
self.health_tests = HealthTests(burnInHandle)
self.policy_tests = PolicyTests(burnInHandle)
self.discovery_tests = DiscoveryTests(burnInHandle)
self.diag_tests = DiagnosticsTests(burnInHandle)
self.stats_tests = ProcessStatsTests(burnInHandle)
self.topo_tests = TopologyTests(burnInHandle)
self.nvlink_tests = NvlinkTests(burnInHandle)
self.introspection_tests = IntrospectionTests(burnInHandle)
self.field_groups_tests = FieldGroupsTests(burnInHandle)
self.modules_tests = ModulesTests(burnInHandle)
self.dmon_tests = DmonTests(burnInHandle)
def _get_sorted_tests(self, obj):
""" Helper function to get test elements from each class and sort them in ascending order
Lambda breakdown:
sorted(filter(lambda x:x[0].startswith("test"), inspect.getmembers(obj)) -> Filters and sorts each members from the class that startswith "test"
key=lambda x:int(x[0][4:x[0].find('_')])) -> key modifies the object to compare the items by their integer value found after the "_"
"""
return sorted([x for x in inspect.getmembers(obj) if x[0].startswith("test")], key=lambda x:int(x[0][4:x[0].find('_')]))
def get_group_tests(self):
return self._get_sorted_tests(self.group_tests)
def get_config_tests(self):
return self._get_sorted_tests(self.config_tests)
def get_discovery_tests(self):
return self._get_sorted_tests(self.discovery_tests)
def get_health_tests(self):
return self._get_sorted_tests(self.health_tests)
def get_diag_tests(self):
return self._get_sorted_tests(self.diag_tests)
def get_stats_tests(self):
return self._get_sorted_tests(self.stats_tests)
def get_policy_tests(self):
return self._get_sorted_tests(self.policy_tests)
def get_topo_tests(self):
return self._get_sorted_tests(self.topo_tests)
def get_nvlink_tests(self):
return self._get_sorted_tests(self.nvlink_tests)
def get_introspection_tests(self):
return self._get_sorted_tests(self.introspection_tests)
def get_fieldgroups_tests(self):
return self._get_sorted_tests(self.field_groups_tests)
def get_modules_tests(self):
return self._get_sorted_tests(self.modules_tests)
def get_dmon_tests(self):
return self._get_sorted_tests(self.dmon_tests)
@staticmethod
def print_test_header(testName):
print(("&&&& RUNNING " + testName + "\n"))
@staticmethod
def print_test_footer(testName, statusText, color):
#Don't include colors for eris
if option_parser.options.dvssc_testing or option_parser.options.eris:
print(("&&&& " + statusText + " " + testName + "\n"))
else:
print(color + "&&&& " + statusText + " " + testName + bcolors.ENDC + "\n")
def start(self, timeout=None, server=None):
"""
Launches dcgmi application.
"""
assert self._subprocess is None
# checks if the file has executable permission
if os.path.exists(self.dcgmi_path):
assert os.access(self.dcgmi_path, os.X_OK), "Application binary %s is not executable! Make sure that the testing archive has been correctly extracted." % (self.dcgmi_path)
timeout_start = time.time()
while time.time() < timeout_start + timeout:
# Gets gpuId list
gpuIdLists = self.burnInHandle.GetGpuIdsGroupedBySku()
# Starts a process to run dcgmi
for gpuIds in gpuIdLists:
fout = open('DCGMI-RUN_%s.log' % self.timestamp, 'a+')
# Creates a list of lists from the return of each function
all_tests = [
self.get_group_tests(),
self.get_config_tests(),
self.get_discovery_tests(),
self.get_health_tests(),
self.get_stats_tests(),
self.get_topo_tests(),
self.get_policy_tests(),
self.get_nvlink_tests(),
self.get_introspection_tests(),
self.get_fieldgroups_tests(),
self.get_modules_tests(),
self.get_dmon_tests(),
self.get_diag_tests()
]
if not self.burnInHandle.burnInCfg.eud:
all_tests.pop()
for test in all_tests:
for (testName, testMethod) in test:
try:
RunDcgmi.print_test_header(testName)
exec_test = testMethod(gpuIds)
if exec_test is None:
# The test was not run (likely prevented by
# decorator).
RunDcgmi.print_test_footer(testName, "SKIPPED", bcolors.PURPLE)
continue
if type(exec_test) == str:
# The test itself reported.
continue
if type(exec_test) == bool:
if exec_test:
RunDcgmi.print_test_footer(testName, "PASSED", bcolors.BLUE)
updateTestResults("PASSED")
else:
RunDcgmi.print_test_footer(testName, "FAILED", bcolors.RED)
updateTestResults("FAILED")
updateTestResults("COUNT")
continue
# We need to check pass / fail differently for the diag. If it finds a legitimate problem
# we want to consider the test waived.
dd = None
diagPassed = False
diagTest = IsDiagTest(testName)
if diagTest:
nsc = nvidia_smi_utils.NvidiaSmiJob()
nsc.start()
paramsStr = "pcie.test_nvlink_status=false"
paramsStr += ";pcie.h2d_d2h_single_unpinned.min_pci_width=2"
paramsStr += ";pcie.h2d_d2h_single_pinned.min_pci_width=2"
dd = DcgmiDiag.DcgmiDiag(dcgmiPrefix=get_dcgmi_bin_directory(), runMode=diagTest, gpuIds=gpuIds, paramsStr=paramsStr)
diagPassed = not dd.Run()
if dd.failed_list:
"""
Diag may have passed if all errors are
DCGM_FI_DEV_CLOCK_THROTTLE_REASONS
"""
diagPassed = True
logCooling = False
for failure in dd.failed_list:
if failure.m_fieldId == dcgm_fields.DCGM_FI_DEV_CLOCK_THROTTLE_REASONS:
logCooling = True;
else:
diagPassed = False
if logCooling:
self.log.write("Please check cooling on this machine.")
nsc.m_shutdownFlag.set()
nsc.join()
throttleReasons = nsc.GetAnyThermalThrottlingReasons()
if len(throttleReasons):
self.log.write("Ignoring diagnostic failure due to thermal throttling.")
for throttleReason in throttleReasons:
self.log.write("Found thermal throttling: %s" % str(throttleReasons))
diagPassed = True
fout.write(str(dd.lastStdout))
fout.write(str(dd.lastStderr))
rc = dd.diagRet
else:
self._subprocess = Popen([self.dcgmi_path]+exec_test, stdout=fout, stderr=fout)
self._subprocess.wait()
rc = self._subprocess.returncode
#DCGM returns an undeflowed int8 as the status. So -3 is returned as 253. Convert it to the negative value
if rc != 0:
rc -= 256
print("Got rc %d" % rc)
if rc == 0:
RunDcgmi.print_test_footer(testName, "PASSED", bcolors.BLUE)
updateTestResults("PASSED")
updateTestResults("COUNT")
elif rc == dcgm_structs.DCGM_ST_NOT_SUPPORTED:
RunDcgmi.print_test_footer(testName, "WAIVED", bcolors.YELLOW)
updateTestResults("WAIVED")
updateTestResults("COUNT")
elif diagPassed == True:
# If we reach here, it means the diag detected a problem we consider legitimate.
# Mark this test as waived instead of failed
RunDcgmi.print_test_footer(testName, "WAIVED", bcolors.YELLOW)
print("Waiving test due to errors we believe to be legitimate detected by the diagnostic")
if dd is not None:
dd.PrintFailures()
updateTestResults("WAIVED")
updateTestResults("COUNT")
else:
RunDcgmi.print_test_footer(testName, "FAILED", bcolors.RED)
if dd is not None:
dd.PrintFailures()
updateTestResults("FAILED")
updateTestResults("COUNT")
except test_utils.TestSkipped as err:
RunDcgmi.print_test_footer(testName, "SKIPPED", bcolors.PURPLE)
print(err)
updateTestResults("SKIPPED")
return ("\n".join(self.stdout_lines), self._subprocess.returncode), str(gpuIds)
def retvalue(self):
"""
Returns code/string if application finished or None otherwise.
"""
if self._subprocess.poll() is not None:
self.wait()
return self._retvalue
def _process_finish(self):
# if still alive, kill process
if self._subprocess.returncode is None:
self._subprocess.terminate()
self._subprocess.wait()
# Check if child process has terminated.
if self._subprocess.returncode is not None:
if self._subprocess.returncode == 0:
message = ("PASSED - DCGMI is running normally, returned %d\n" % self._subprocess.returncode)
self.log.writelines(message)
else:
message = ("FAILED - DCGMI failed with returned non-zero %s \n") % self._subprocess.returncode
self.log.writelines(message)
# Verify that dcgmi doesn't print any strings that should never be printed on a working system
stdout = "\n".join(self.stdout_lines)
for forbidden_text in RunDcgmi.forbidden_strings:
assert stdout.find(forbidden_text) == -1, "dcgmi printed \"%s\", this should never happen!" % forbidden_text
return self._retvalue
def wait(self):
"""
Wait for application to finish and return the app's error code/string
"""
if self._retvalue is not None:
return self._retvalue
with self._lock: # set ._retvalue in thread safe way. Make sure it wasn't set by timeout already
if self._retvalue is None:
self._retvalue = self._subprocess.returncode
self._process_finish()
return self._retvalue
def _trigger_timeout(self):
"""
Function called by timeout routine. Kills the app in a thread safe way.
"""
with self._lock: # set ._retvalue in thread safe way. Make sure that app wasn't terminated already
if self._retvalue is not None:
return self._retvalue
self._subprocess.kill()
self._process_finish()
return self._retvalue
# Start host egine on headnode
def run_local_host_engine(burnInCfg):
#Starting HostEngine
host_engine = RunHostEngine(burnInCfg.writeHostEngineDebugFile)
host_engine.start(int(burnInCfg.runtime)+5)
try:
_thread.start_new_thread(lambda: host_engine.mem_usage(burnInCfg.runtime), ())
_thread.start_new_thread(lambda: host_engine.cpu_usage(burnInCfg.runtime), ())
except:
print("Error: unable to create thread")
time.sleep(1)
return host_engine
# Start cuda workload
def run_cuda_workload(burnInCfg):
burnInHandle = BurnInHandle(burnInCfg.ip, burnInCfg)
#Creates cuda workload
cuda_workload = RunCudaCtxCreate(None, burnInHandle, int(burnInCfg.runtime)+4, timeoutSeconds=10)
cuda_workload.start()
print("\nGenerating Cuda Workload on Clients...\n")
cuda_workload.wait()
# Start dcgmi testing
def run_dcgmi_client(burnInCfg):
burnInHandle = BurnInHandle(burnInCfg.ip, burnInCfg)
dcgmi_client = RunDcgmi(burnInHandle)
dcgmi_client.start(int(burnInCfg.runtime)+1, burnInCfg.srv)
time.sleep(2)
# Copy packages to test nodes
def copy_files_to_targets(ip):
# Gets current user name
user = os.getlogin()
# Compress current directory, copies the package to target systems
print("Creating burning-package.tar.gz package...")
time.sleep(2)
package = "burning-package.tar.gz"
abscwd=os.path.abspath(os.getcwd())
os.system("tar zcvf /tmp/%s -C %s testing" % (package, os.path.dirname(abscwd)))
print("\n...................................................\n")
print("Copying package to test systems... %s" % ip)
for address in ip:
# Sends package to remote's /home/$USER folder
os.system("scp -rp /tmp/" + package + " " + user + "@" + address + ":")
time.sleep(1)
# Unpacking package on remote nodes
os.system("ssh " + user + "@" + address + " tar zxf " + package)
time.sleep(1)
# Run the tests on the remote nodes
def run_remote(runtime, address, srv, nodes):
# Gets current user name
user = os.getlogin()
py_cmd = sys.executable + " burn_in_stress.py"
# Run the tests on remote systems
cmd="ssh %s@%s \"MODULEPATH=%s;cd testing; LD_LIBRARY_PATH=~/testing %s -t %s -s %s \"" % (user, address, os.environ["MODULEPATH"], py_cmd, runtime, srv)
return Popen(cmd.split())
# Run the tests on the local (single) node
def run_tests(burnInCfg):
color = bcolors()
# Start an embedded host engine first to make sure we even have GPUs configured
burnInHandle = BurnInHandle(None, burnInCfg)
# Gets gpuId list
gpuIds = burnInHandle.GetGpuIds()
if len(gpuIds) < 1:
print("\n...................................................\n")
print("At least one GPU is required to run the burn_in stress test on a single node.\n")
print("Set __DCGM_WL_BYPASS=1 in your environment to bypass the DCGM allowlist\n")
sys.exit(1)
else:
for gpuId in gpuIds:
print("The available devices are: " + color.BOLD + "GPU %d" % gpuId + color.ENDC)
# Disconnect from embedded host engine
del(burnInHandle)
burnInHandle = None
start_timestamp = time.asctime()
host_engine = None
if not test_utils.is_hostengine_running():
# Starting HostEngine
host_engine = RunHostEngine(burnInCfg.writeHostEngineDebugFile)
host_engine.start(int(burnInCfg.runtime)+5)
try:
_thread.start_new_thread(lambda: host_engine.mem_usage(burnInCfg.runtime), ())
_thread.start_new_thread(lambda: host_engine.cpu_usage(burnInCfg.runtime), ())
except:
print("Error: unable to create thread")
time.sleep(1)
else:
print("\nHostengine detected, using existing nv-hostegine...")
# We're now running a host engine daemon. Connect to it using TCP/IP
burnInHandle = BurnInHandle("127.0.0.1", burnInCfg)
dcgmi_client = RunDcgmi(burnInHandle)
time.sleep(2)
dcgmi_client.start(int(burnInCfg.runtime)+1)
time.sleep(2)
print("\nStart timestamp: %s" % start_timestamp)
# Finish when done
dcgmi_client.wait()
if host_engine is not None:
host_engine.wait()
host_engine.terminate()
host_engine.validate()
#Disconnect from host engine
del(burnInHandle)
burnInHandle = None
def validate_ip(s):
# Function to validate ip addresses
a = s.split('.')
if len(a) != 4:
return False
for x in a:
if not x.isdigit():
return False
i = int(x)
if i < 0 or i > 255:
return False
color = bcolors()
print(color.GREEN + "IP Validate successfully..." + color.ENDC)
return True
# Class for holding global configuration information for this test module
class BurnInGlobalConfig:
def __init__(self):
self.remote = False #Are we connecting to a remote server? True = Yes. False = No
self.eud = True #Should we run the EUD? True = Yes
self.runtime = 0 #How long to run the tests in seconds
self.onlyGpuIds = [] #Only GPU IDs this framework should run on
self.dvssc_testing = False #Display color on output
#Undocumented globals I'm pulling out of the global namespace for sanity's sake
self.srv = None
self.ip = None
self.nodes = []
self.server = []
def parseCommandLine():
burnInCfg = BurnInGlobalConfig()
color = bcolors()
# Script supports following args
# -t time to run in seconds
# -s (after that, one or more server names/IPs, default is localhost)
# -a (after that one or more client names, default is localhost) or
# -n (one or more client names from a text file)
# -i GPU ids separated by commas
# Parsing arguments
parser = argparse.ArgumentParser(description="BURN-IN STRESS TEST")
parser.add_argument("-t", "--runtime", required=True, help="Number of seconds to keep the test running")
#nargs="+" means no args or any number of arguments
parser.add_argument("-a", "--address", nargs="+", help="One or more IP addresses separated by spaces where DCGMI Clients will run on ")
parser.add_argument("-n", "--nodesfile", help="File with list of IP address or hostnames where DCGMI Clients will run on")
parser.add_argument("-s", "--server", help="Server IP address where DCGM HostEngines will running on")
parser.add_argument("-ne", "--noeud", action="store_true", help="Runs without the EUD Diagnostics test")
parser.add_argument("-i", "--indexes", nargs=1, help="One or more GPU IDs to run the burn-in tests on, separated by commas. These come from 'dcgmi discovery -l'")
parser.add_argument("-d", "--debug", action="store_true", help="Write debug logs from nv-hostengine")
parser.add_argument("-D", "--dvssc-testing", action="store_true", help="DVSCC Testing disable colors in output")
args = parser.parse_args()
# passes stdout to the Logger Class
sys.stdout = Logger()
# Parsing color
burnInCfg.dvssc_testing = args.dvssc_testing
# pass on to option_parser for use in logging.py
option_parser.options.dvssc_testing = burnInCfg.dvssc_testing
# Parsing time to run argument
if args.runtime:
burnInCfg.runtime = str(args.runtime)
if args.runtime.isdigit():
print("\n...................................................")
print(color.YELLOW + " ##### DCGM BURN-IN STRESS TEST ##### " + color.ENDC)
print("...................................................\n")
else:
print(color.RED + "\nPlease enter a decimal number for the time option.\n" + color.ENDC)
sys.exit(1)
# Runs without EUD tests
if args.noeud:
burnInCfg.eud = False
print(color.PURPLE + "## Running without Diagnostics Tests ##\n" + color.ENDC)
# Parsing server arguments
burnInCfg.server = []
if args.server:
burnInCfg.remote = True
burnInCfg.srv = args.server
if validate_ip(burnInCfg.srv):
burnInCfg.server.append(burnInCfg.srv)
else:
print(color.RED + "\n%s is invalid. Please enter a valid IP address.\n" + color.ENDC)
sys.exit(1)
# Parsing IP address arguments
burnInCfg.ip = []
burnInCfg.nodes = []
if args.address:
if not args.server:
print(color.RED + "The server IP was not informed" + color.ENDC)
sys.exit(1)
if "," in args.address:
print(color.RED + "Comma separator is not allowed ok" + color.ENDC)
sys.exit(1)
for add in args.address:
if validate_ip(add):
burnInCfg.ip.append(add)
else:
print(color.RED + "\nFailed to validate IP Address %s\n" % add + color.ENDC)
sys.exit(1)
print(color.PURPLE + "IP address to run on: %s " % str(burnInCfg.ip) + color.ENDC)
print("\n...................................................\n")
else:
if args.nodesfile:
burnInCfg.remote = True
if not args.server:
print(color.RED + "The server IP was not informed" + color.ENDC)
sys.exit(1)
# Parsing hostfile argument
location = os.path.abspath(os.path.join(args.nodesfile))
if os.path.exists(location):
print(color.PURPLE + "Nodes list file used \"%s\"\n" % str(location).split() + color.ENDC)
# Reads the node list file and removes newlines from each line
f = open(location, 'r')
for lines in f.readlines():
burnInCfg.nodes.append(lines[:-1])
print(color.PURPLE + "IP address to run on: %s " % burnInCfg.nodes + color.ENDC)
print("\n...................................................\n")
if args.indexes:
burnInCfg.onlyGpuIds = []
for gpuId in args.indexes[0].split(','):
if not gpuId.isdigit():
print(color.RED + "GPU ID '%s' must be a number" % gpuId + color.ENDC)
sys.exit(1)
burnInCfg.onlyGpuIds.append(int(gpuId))
if args.debug:
burnInCfg.writeHostEngineDebugFile = True
else:
burnInCfg.writeHostEngineDebugFile = False
return burnInCfg
def cleanup():
'''
Clean up our environment before exit
'''
apps.AppRunner.clean_all()
def main_wrapped():
# Initialize the framework's option parser so we can use framework classes
option_parser.initialize_as_stub()
if not utils.is_root():
sys.exit("\nOnly root can run this script\n")
#Parse the command line
burnInCfg = parseCommandLine()
setupEnvironment()
#initialize the DCGM library globally ONCE
try:
dcgm_structs._dcgmInit(utils.get_testing_framework_library_path())
except dcgm_structs.dcgmExceptionClass(dcgm_structs.DCGM_ST_LIBRARY_NOT_FOUND):
print("DCGM Library hasn't been found in the system, is the driver correctly installed?", file=sys.stderr)
sys.exit(1)
if not burnInCfg.remote:
run_tests(burnInCfg)
elif len(burnInCfg.ip)==0:
run_cuda_workload(burnInCfg)
run_dcgmi_client(burnInCfg)
else:
copy_files_to_targets(burnInCfg.ip)
run_local_host_engine(burnInCfg)
remotes=[]
for address in burnInCfg.ip:
remotes.append(run_remote(burnInCfg.runtime, address, burnInCfg.srv, burnInCfg.nodes))
for remote in remotes:
remote.wait()
getTestSummary()
def main():
try:
main_wrapped()
except Exception as e:
print(("Got exception " + str(e)))
raise
finally:
cleanup()
if __name__ == "__main__":
main()
| DCGM-master | testing/python3/burn_in_stress.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import threading
import xml.etree.ElementTree as ET
import time
import option_parser
import logger
import pydcgm
import dcgm_structs
import dcgm_fields
import dcgm_agent
import shlex
import subprocess
################################################################################
### XML tags to search for
################################################################################
THROTTLE_FN = "clocks_throttle_reasons"
ECC_FN = "ecc_errors"
ECC_ENABLED_FN = "ecc_mode"
GPU_FN = "gpu"
MINOR_NUM_FN = "minor_number"
ECC_CURRENT_FN = "current_ecc"
RETIRED_PAGES_FN = "retired_pages"
RETIRED_COUNT_FN = "retired_count"
RETIRED_SBE_FN = "multiple_single_bit_retirement"
RETIRED_DBE_FN = "double_bit_retirement"
VOLATILE_FN = "volatile"
AGGREGATE_FN = "aggregate"
TOTAL_FN = "total"
DB_FN = 'double_bit'
SB_FN = 'single_bit'
PCI_FN = "pci"
PCI_DEVICE_ID_FN = "pci_device_id"
# list of relevant throttle reasons
relevant_throttling = ["clocks_throttle_reason_hw_slowdown",
"clocks_throttle_reason_hw_thermal_slowdown",
"clocks_throttle_reason_hw_power_brake_slowdown",
"clocks_throttle_reason_sw_thermal_slowdown"]
################################################################################
### Supported field ids
### Each is a tuple of the field id, the type of check for error, and the ideal
### value. Set each of these values in order to add support for a new field.
################################################################################
CHECKER_ANY_VALUE = 0
CHECKER_MAX_VALUE = 1
CHECKER_LAST_VALUE = 2
CHECKER_INFOROM = 3
supportedFields = [ (dcgm_fields.DCGM_FI_DEV_CLOCK_THROTTLE_REASONS, CHECKER_ANY_VALUE, ''),
(dcgm_fields.DCGM_FI_DEV_ECC_CURRENT, CHECKER_ANY_VALUE, 'Active'),
(dcgm_fields.DCGM_FI_DEV_THERMAL_VIOLATION, CHECKER_MAX_VALUE, 0),
(dcgm_fields.DCGM_FI_DEV_ECC_DBE_VOL_TOTAL, CHECKER_LAST_VALUE, 0),
(dcgm_fields.DCGM_FI_DEV_INFOROM_CONFIG_VALID, CHECKER_INFOROM, True),
]
# field ids where any value is an error
anyCheckFields = [ dcgm_fields.DCGM_FI_DEV_CLOCK_THROTTLE_REASONS, dcgm_fields.DCGM_FI_DEV_ECC_CURRENT ]
# field ids where the max value should be returned as an error
maxCheckFields = [ dcgm_fields.DCGM_FI_DEV_THERMAL_VIOLATION ]
# field ids where the last value should be returned as an error
lastCheckFields = [ dcgm_fields.DCGM_FI_DEV_ECC_DBE_VOL_TOTAL ]
# field ids where the ideal value is 0
zeroIdealField = [ dcgm_fields.DCGM_FI_DEV_ECC_DBE_VOL_TOTAL, dcgm_fields.DCGM_FI_DEV_THERMAL_VIOLATION ]
# field ids where the ideal value is an empty string
emptyStrIdealField = [dcgm_fields.DCGM_FI_DEV_CLOCK_THROTTLE_REASONS ]
# field ids where False is the ideal value
falseIdealField = [ dcgm_fields.DCGM_FI_DEV_INFOROM_CONFIG_VALID ]
def parse_int_from_nvml_xml(text):
if text == 'N/A':
return 0
try:
return int(text)
except ValueError as e:
return 0
################################################################################
class NvidiaSmiJob(threading.Thread):
################################################################################
### Constructor
################################################################################
def __init__(self):
threading.Thread.__init__(self)
self.m_shutdownFlag = threading.Event()
self.m_data = {}
self.m_sleepInterval = 1
self.m_inforomCorrupt = None
self.m_supportedFields = {}
self.InitializeSupportedFields()
################################################################################
### Map the fieldId to the information for supporting that field id
################################################################################
def InitializeSupportedFields(self):
for fieldInfo in supportedFields:
self.m_supportedFields[fieldInfo[0]] = fieldInfo
################################################################################
### Sets the sleep interval between querying nvidia-smi
################################################################################
def SetIterationInterval(self, interval):
self.m_sleepInterval = interval
################################################################################
### Looks at the volatile XML node to find the total double bit errors
################################################################################
def ParseEccErrors(self, ecc_subnode, gpudata, isVolatile):
for child in ecc_subnode:
if child.tag == DB_FN:
for grandchild in child:
if grandchild.tag == TOTAL_FN:
if isVolatile:
gpudata[dcgm_fields.DCGM_FI_DEV_ECC_DBE_VOL_TOTAL] = parse_int_from_nvml_xml(grandchild.text)
else:
gpudata[dcgm_fields.DCGM_FI_DEV_ECC_DBE_AGG_TOTAL] = parse_int_from_nvml_xml(grandchild.text)
elif child.tag == SB_FN:
for grandchild in child:
if grandchild.tag == TOTAL_FN:
if isVolatile:
gpudata[dcgm_fields.DCGM_FI_DEV_ECC_SBE_VOL_TOTAL] = parse_int_from_nvml_xml(grandchild.text)
else:
gpudata[dcgm_fields.DCGM_FI_DEV_ECC_SBE_AGG_TOTAL] = parse_int_from_nvml_xml(grandchild.text)
def ParseRetiredPagesCount(self, retired_sbe_node, gpudata, fieldId):
for child in retired_sbe_node:
if child.tag == RETIRED_COUNT_FN:
gpudata[fieldId] = parse_int_from_nvml_xml(child.text)
break
################################################################################
### Reads the common failure conditions from the XML for this GPU
### All non-error values are set to None to make it easier to read the map
################################################################################
def ParseSingleGpuDataFromXml(self, gpuxml_node):
gpudata = {}
gpu_id = -1
for child in gpuxml_node:
if child.tag == THROTTLE_FN:
reasons = ''
for grandchild in child:
if grandchild.tag in relevant_throttling and grandchild.text == 'Active':
if not reasons:
reasons = grandchild.tag
else:
reasons += ",%s" % (grandchild.tag)
gpudata[dcgm_fields.DCGM_FI_DEV_CLOCK_THROTTLE_REASONS] = reasons
elif child.tag == MINOR_NUM_FN:
gpu_id = parse_int_from_nvml_xml(child.text)
gpudata[dcgm_fields.DCGM_FI_DEV_NVML_INDEX] = gpu_id
elif child.tag == ECC_ENABLED_FN:
for grandchild in child:
if grandchild.tag == ECC_CURRENT_FN:
gpudata[dcgm_fields.DCGM_FI_DEV_ECC_CURRENT] = grandchild.text
elif child.tag == ECC_FN:
for grandchild in child:
if grandchild.tag == VOLATILE_FN:
self.ParseEccErrors(grandchild, gpudata, True)
elif grandchild.tag == AGGREGATE_FN:
self.ParseEccErrors(grandchild, gpudata, False)
elif child.tag == RETIRED_PAGES_FN:
for grandchild in child:
if grandchild.tag == RETIRED_SBE_FN:
self.ParseRetiredPagesCount(grandchild, gpudata, dcgm_fields.DCGM_FI_DEV_RETIRED_SBE)
elif grandchild.tag == RETIRED_DBE_FN:
self.ParseRetiredPagesCount(grandchild, gpudata, dcgm_fields.DCGM_FI_DEV_RETIRED_DBE)
elif child.tag == PCI_FN:
for grandchild in child:
if grandchild.tag == PCI_DEVICE_ID_FN:
gpudata[dcgm_fields.DCGM_FI_DEV_PCI_COMBINED_ID] = grandchild.text
if gpu_id not in self.m_data:
self.m_data[gpu_id] = {}
for key in gpudata:
if key not in self.m_data[gpu_id]:
self.m_data[gpu_id][key] = []
self.m_data[gpu_id][key].append(gpudata[key])
################################################################################
### Finds each GPU's xml entry and passes it off to be read
################################################################################
def ParseDataFromXml(self, root):
for child in root:
if child.tag == GPU_FN:
self.ParseSingleGpuDataFromXml(child)
################################################################################
### Reads some common failure condition values from nvidia-smi -q -x
### Returns an XML ElementTree object on success
### None on failure
################################################################################
def QueryNvidiaSmiXml(self, parseData=None):
if parseData is None:
parseData = True
output = ""
nvsmi_cmd = "nvidia-smi -q -x"
try:
runner = subprocess.Popen(nvsmi_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
(output, error) = runner.communicate()
root = ET.fromstring(output)
if parseData:
self.ParseDataFromXml(root)
return root
except OSError as e:
logger.error("Failed to run nvidia-smi.\nError: %s" % e)
return None
except ET.ParseError as e:
logger.error("Got exception %s while parsing XML: %s" % (str(e), str(output)))
return None
################################################################################
### Reads thermal violation values from nvidia-smi stats
################################################################################
def QueryNvidiaSmiStats(self):
nvsmi_cmd = "nvidia-smi stats -c 1 -d violThm"
# Initialize lines as an empty list so we don't do anything if IO fails
lines = []
try:
runner = subprocess.Popen(nvsmi_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
(output_buf, error_buf) = runner.communicate()
output = output_buf.decode('utf-8')
error = error_buf.decode('utf-8')
lines = output.split('\n')
except OSError as e:
logger.error("Failed to query nvidia-smi stats.\nError: %s" % e)
for line in lines:
if not line:
continue
try:
values = line.split(',')
if len(values) != 4:
continue
gpu_id = int(values[0])
violation = int(values[-1].strip())
except ValueError as e:
# Sometimes there is output with comments in it from nvidia-smi. These will throw
# exceptions that we should ignore.
continue
if gpu_id not in self.m_data:
self.m_data[gpu_id] = {}
if dcgm_fields.DCGM_FI_DEV_THERMAL_VIOLATION not in self.m_data[gpu_id]:
self.m_data[gpu_id][dcgm_fields.DCGM_FI_DEV_THERMAL_VIOLATION] = []
self.m_data[gpu_id][dcgm_fields.DCGM_FI_DEV_THERMAL_VIOLATION].append(violation)
################################################################################
def GetAnyThermalThrottlingReasons(self):
throttling = []
for gpuId in self.m_data:
if dcgm_fields.DCGM_FI_DEV_THERMAL_VIOLATION in self.m_data[gpuId]:
throttling.append(self.m_data[gpuId][dcgm_fields.DCGM_FI_DEV_THERMAL_VIOLATION])
return throttling
################################################################################
def CheckInforom(self):
nvsmi_cmd = "nvidia-smi"
try:
runner = subprocess.Popen(nvsmi_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
(outputbytes, errorbytes) = runner.communicate()
output = outputbytes.decode('utf-8')
error = errorbytes.decode('utf-8')
if output.find("infoROM is corrupted") != -1:
return True
else:
return False
except OSError as e:
logger.error("Failed to query for corrupt inforom.\nError: %s" % e)
return None
################################################################################
def run(self):
while not self.m_shutdownFlag.is_set():
self.QueryNvidiaSmiXml(parseData=True)
self.QueryNvidiaSmiStats()
time.sleep(self.m_sleepInterval)
################################################################################
def CheckField(self, fieldId, values):
# Return None for an empty list or an unsupported field id
if not values or fieldId not in self.m_supportedFields:
return None
if self.m_supportedFields[fieldId][1] == CHECKER_ANY_VALUE:
for val in values:
if val:
return val
return None
elif self.m_supportedFields[fieldId][1] == CHECKER_MAX_VALUE:
maxVal = values[0]
for num, val in enumerate(values, start=1):
if val > maxVal:
maxVal = val
return maxVal
elif self.m_supportedFields[fieldId][1] == CHECKER_LAST_VALUE:
return values[-1]
else:
return None
################################################################################
### Determine if the NvidiaSmiChecker object also found an error for the specified
### gpuId and fieldId. If so, return a valid value.
### Returning None means no error was found for that fieldId and gpuId
################################################################################
def GetErrorValue(self, gpuId, fieldId):
if not gpuId:
if fieldId == dcgm_fields.DCGM_FI_DEV_INFOROM_CONFIG_VALID:
if self.m_inforomCorrupt is None:
self.m_inforomCorrupt = self.CheckInforom()
# Only valid if we're looking for inforom errors
return self.m_inforomCorrupt, self.GetCorrectValue(fieldId)
elif gpuId in self.m_data:
if fieldId in self.m_data[gpuId]:
return self.CheckField(fieldId, self.m_data[gpuId][fieldId]), self.GetCorrectValue(fieldId)
return None, None
def GetCorrectValue(self, fieldId):
if fieldId not in self.m_supportedFields:
return 'Unknown'
else:
return self.m_supportedFields[fieldId][2]
################################################################################
### Checks for multiple page retirement issues within the nvidia-smi xml output
### returns True if there are page retirement issues according to criteria
### described in JIRA DCGM-1009
################################################################################
def CheckPageRetirementErrors(self):
elemTree = self.QueryNvidiaSmiXml()
if elemTree is None:
logger.warning("We were unable to query nvidia-smi XML successfully. Ignoring the page retirement check.")
return False
totals = {}
totals[dcgm_fields.DCGM_FI_DEV_ECC_SBE_VOL_TOTAL] = 0
totals[dcgm_fields.DCGM_FI_DEV_ECC_DBE_VOL_TOTAL] = 0
totals[dcgm_fields.DCGM_FI_DEV_ECC_SBE_AGG_TOTAL] = 0
totals[dcgm_fields.DCGM_FI_DEV_ECC_DBE_AGG_TOTAL] = 0
totals[dcgm_fields.DCGM_FI_DEV_RETIRED_SBE] = 0
totals[dcgm_fields.DCGM_FI_DEV_RETIRED_DBE] = 0
for gpuId in self.m_data:
for fieldId in totals:
if fieldId in self.m_data[gpuId]:
if self.m_data[gpuId][fieldId]:
totals[fieldId] += self.m_data[gpuId][fieldId][-1]
if (totals[dcgm_fields.DCGM_FI_DEV_RETIRED_SBE] + totals[dcgm_fields.DCGM_FI_DEV_RETIRED_DBE]) > 64:
logger.warning("More than 64 page retirements were found")
return True
if totals[dcgm_fields.DCGM_FI_DEV_ECC_SBE_VOL_TOTAL] + totals[dcgm_fields.DCGM_FI_DEV_ECC_DBE_VOL_TOTAL] > 50000:
logger.warning("Too many ECC errors found: %d volatile SBE and %d volatile DBE errors" % \
(totals[dcgm_fields.DCGM_FI_DEV_ECC_SBE_VOL_TOTAL], \
totals[dcgm_fields.DCGM_FI_DEV_ECC_DBE_VOL_TOTAL]))
return True
if totals[dcgm_fields.DCGM_FI_DEV_ECC_SBE_AGG_TOTAL] + totals[dcgm_fields.DCGM_FI_DEV_ECC_DBE_AGG_TOTAL] > 50000:
logger.warning("Too many ECC errors found: %d aggregate SBE and %d aggregate DBE errors" % \
(totals[dcgm_fields.DCGM_FI_DEV_ECC_SBE_AGG_TOTAL], \
totals[dcgm_fields.DCGM_FI_DEV_ECC_DBE_AGG_TOTAL]))
return True
return False
def are_gpus_free():
"""
Parses nvidia-smi xml output and discovers if any processes are using the GPUs,
returns whether or not the GPUs are in use or not. True = GPUs are not being used.
False = GPUs are in use by one or more processes
"""
cmd = "nvidia-smi -q -x"
try:
nvsmiData = subprocess.check_output(shlex.split(cmd)).decode('utf-8')
except subprocess.CalledProcessError:
logger.info("The nvidia-smi XML output was malformed.")
return True
nvsmiData = subprocess.check_output(shlex.split(cmd)).decode('utf-8')
try:
tree = ET.fromstring(nvsmiData)
except ET.ParseError:
logger.info("The nvidia-smi XML output was malformed.")
return True
pidList = []
processList = []
# Goes deep into the XML Element Tree to get PID and Process Name
for node in tree.iter('gpu'):
for proc in node.iterfind('processes'):
for pr in proc.iterfind('process_info'):
for pid in pr.iterfind('pid'):
pidList.append(pid.text)
for name in pr.iterfind('process_name'):
processList.append(name.text)
if len(pidList) != 0:
logger.warning("UNABLE TO CONTINUE, GPUs ARE IN USE! MAKE SURE THAT THE GPUS ARE FREE AND TRY AGAIN!")
logger.info("Gpus are being used by processes below: ")
logger.info("Process ID: %s" % pidList)
logger.info("Process Name: %s" % processList)
logger.info()
return False
return True
def get_output():
"""
Executes nvidia-smi -q and returns the output
"""
cmd = "nvidia-smi -q"
try:
nvsmiData = subprocess.check_output(shlex.split(cmd)).decode('utf-8')
return (nvsmiData, None)
except subprocess.CalledProcessError as e:
print(("Unable to collect \"nvidia-smi -q\" output. Error:\n%s" % e))
return (None, e.output)
def enable_persistence_mode():
"""
Attempts to enable persistence mode via nvidia-smi
"""
cmd = "nvidia-smi -pm 1"
try:
nvsmiData = subprocess.check_output(shlex.split(cmd)).decode('utf-8')
return (nvsmiData, None)
except subprocess.CalledProcessError as e:
print(("Failed to enable persistence mode. Error:\n%s" % e))
return (None, e.output)
################################################################################
### Simple function to print out some values from nvidia-smi commands
################################################################################
def main():
#sc = check_sanity_nvml.SanityChecker()
j = NvidiaSmiJob()
j.start()
time.sleep(20)
j.m_shutdownFlag.set()
j.join()
print("Data:")
print((j.m_data))
if __name__ == '__main__':
main()
| DCGM-master | testing/python3/nvidia_smi_utils.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import subprocess
import signal, os
import pydcgm
import dcgm_structs
import threading
import dcgm_fields
import sys
import logging
defaultFieldIds = [
dcgm_fields.DCGM_FI_DEV_POWER_USAGE,
dcgm_fields.DCGM_FI_DEV_GPU_TEMP,
dcgm_fields.DCGM_FI_DEV_SM_CLOCK,
dcgm_fields.DCGM_FI_DEV_GPU_UTIL,
dcgm_fields.DCGM_FI_DEV_RETIRED_PENDING,
dcgm_fields.DCGM_FI_DEV_RETIRED_SBE,
dcgm_fields.DCGM_FI_DEV_RETIRED_DBE,
dcgm_fields.DCGM_FI_DEV_ECC_SBE_VOL_TOTAL,
dcgm_fields.DCGM_FI_DEV_ECC_DBE_VOL_TOTAL,
dcgm_fields.DCGM_FI_DEV_ECC_SBE_AGG_TOTAL,
dcgm_fields.DCGM_FI_DEV_ECC_DBE_AGG_TOTAL,
dcgm_fields.DCGM_FI_DEV_FB_TOTAL,
dcgm_fields.DCGM_FI_DEV_FB_FREE,
dcgm_fields.DCGM_FI_DEV_FB_USED,
dcgm_fields.DCGM_FI_DEV_PCIE_REPLAY_COUNTER,
dcgm_fields.DCGM_FI_DEV_POWER_VIOLATION,
dcgm_fields.DCGM_FI_DEV_THERMAL_VIOLATION,
dcgm_fields.DCGM_FI_DEV_XID_ERRORS,
dcgm_fields.DCGM_FI_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_TOTAL,
dcgm_fields.DCGM_FI_DEV_NVLINK_CRC_DATA_ERROR_COUNT_TOTAL,
dcgm_fields.DCGM_FI_DEV_NVLINK_REPLAY_ERROR_COUNT_TOTAL,
dcgm_fields.DCGM_FI_DEV_NVLINK_RECOVERY_ERROR_COUNT_TOTAL,
dcgm_fields.DCGM_FI_DEV_MEM_CLOCK,
dcgm_fields.DCGM_FI_DEV_MEMORY_TEMP,
dcgm_fields.DCGM_FI_DEV_TOTAL_ENERGY_CONSUMPTION,
dcgm_fields.DCGM_FI_DEV_MEM_COPY_UTIL,
dcgm_fields.DCGM_FI_DEV_NVLINK_BANDWIDTH_TOTAL,
dcgm_fields.DCGM_FI_DEV_PCIE_TX_THROUGHPUT,
dcgm_fields.DCGM_FI_DEV_PCIE_RX_THROUGHPUT
]
def entity_group_id_to_string(entityGroupId):
if entityGroupId == dcgm_fields.DCGM_FE_GPU:
return 'GPU'
elif entityGroupId == dcgm_fields.DCGM_FE_VGPU:
return 'VGPU'
elif entityGroupId == dcgm_fields.DCGM_FE_SWITCH:
return 'NVSWITCH'
elif entityGroupId == dcgm_fields.DCGM_FE_GPU_I:
return 'GPU INSTANCE'
elif entityGroupId == dcgm_fields.DCGM_FE_GPU_CI:
return 'COMPUTE INSTANCE'
elif entityGroupId == dcgm_fields.DCGM_FE_LINK:
return 'LINK'
else:
return ''
class DcgmReader(object):
###########################################################################
'''
This function can be implemented as a callback in the class that inherits from DcgmReader
to handle each field individually.
By default, it passes a string with the gpu, field tag, and value to LogInfo()
@params:
gpuId : the id of the GPU this field is reporting on
fieldId : the id of the field (ignored by default, may be useful for children)
fieldTag : the string representation of the field id
val : the value class that comes from DCGM (v.value is the value for the field)
'''
def CustomFieldHandler(self, gpuId, fieldId, fieldTag, val):
print("GPU %s field %s=%s" % (str(gpuId), fieldTag, str(val.value)))
###########################################################################
'''
This function can be implemented as a callback in the class that inherits from DcgmReader
to handle each field individually.
By default, it passes a string with the gpu, field tag, and value to LogInfo()
@params:
entityGroupId : the type of entity this field is reporting on
entityId : the id of the entity this field is reporting on
fieldId : the id of the field (ignored by default, may be useful for children)
fieldTag : the string representation of the field id
val : the value class that comes from DCGM (v.value is the value for the field)
'''
def CustomFieldHandler_v2(self, entityGroupId, entityId, fieldId, fieldTag, val):
print("%s %s field %s=%s" % (entity_group_id_to_string(entityGroupId), str(entityId), fieldTag, str(val.value)))
###########################################################################
'''
This function can be implemented as a callback in the class that inherits from DcgmReader
to handle all of the data queried from DCGM.
By default, it will simply print the field tags and values for each GPU
@params:
fvs : Data in the format entityGroupId -> entityId -> values (dictionary of dictionaries)
'''
def CustomDataHandler_v2(self,fvs):
for entityGroupId in list(fvs.keys()):
entityGroup = fvs[entityGroupId]
for entityId in list(entityGroup.keys()):
entityFv = entityGroup[entityId]
for fieldId in list(entityFv.keys()):
if fieldId in self.m_dcgmIgnoreFields:
continue
val = entityFv[fieldId][-1]
if val.isBlank:
continue
fieldTag = self.m_fieldIdToInfo[fieldId].tag
self.CustomFieldHandler_v2(entityGroupId, entityId, fieldId, fieldTag, val)
###########################################################################
'''
This function can be implemented as a callback in the class that inherits from DcgmReader
to handle all of the data queried from DCGM.
By default, it will simply print the field tags and values for each GPU
@params:
fvs : Dictionary with gpuID as key and values as Value
'''
def CustomDataHandler(self, fvs):
for gpuId in list(fvs.keys()):
gpuFv = fvs[gpuId]
for fieldId in list(gpuFv.keys()):
if fieldId in self.m_dcgmIgnoreFields:
continue
val = gpuFv[fieldId][-1]
if val.isBlank:
continue
fieldTag = self.m_fieldIdToInfo[fieldId].tag
self.CustomFieldHandler(gpuId, fieldId, fieldTag, val)
###########################################################################
def SetupGpuIdUUIdMappings(self):
'''
Populate the m_gpuIdToUUId map
'''
gpuIds = self.m_dcgmGroup.GetGpuIds()
for gpuId in gpuIds:
gpuInfo = self.m_dcgmSystem.discovery.GetGpuAttributes(gpuId)
self.m_gpuIdToUUId[gpuId] = gpuInfo.identifiers.uuid
###########################################################################
'''
Constructor
@params:
hostname : Address:port of the host to connect. Defaults to localhost
fieldIds : List of the field ids to publish. If it isn't specified, our default list is used.
updateFrequency : Frequency of update in microseconds. Defauls to 10 seconds or 10000000 microseconds
maxKeepAge : Max time to keep data from NVML, in seconds. Default is 3600.0 (1 hour)
ignoreList : List of the field ids we want to query but not publish.
gpuIds : List of GPU IDs to monitor. If not provided, DcgmReader will monitor all GPUs on the system
fieldIntervalMap: Map of intervals to list of field numbers to monitor. Takes precedence over fieldIds and updateFrequency if not None.
'''
def __init__(self, hostname='localhost', fieldIds=None, updateFrequency=10000000,
maxKeepAge=3600.0, ignoreList=None, fieldGroupName='dcgm_fieldgroupData', gpuIds=None,
entities=None, fieldIntervalMap=None):
fieldIds = fieldIds or defaultFieldIds
ignoreList = ignoreList or []
self.m_dcgmHostName = hostname
self.m_updateFreq = updateFrequency # default / redundant
self.m_fieldGroupName = fieldGroupName
self.m_fieldGroup = None
self.m_publishFields = {}
if fieldIntervalMap is not None:
self.m_publishFields = fieldIntervalMap
else:
self.m_publishFields[self.m_updateFreq] = fieldIds
self.m_requestedGpuIds = gpuIds
self.m_requestedEntities = entities
self.m_dcgmIgnoreFields = ignoreList #Fields not to publish
self.m_maxKeepAge = maxKeepAge
self.m_dcgmHandle = None
self.m_dcgmSystem = None
self.m_dcgmGroup = None
self.m_closeHandle = False
self.m_gpuIdToBusId = {} #GpuID => PCI-E busId string
self.m_gpuIdToUUId = {} # FieldId => dcgm_fields.dcgm_field_meta_t
self.m_fieldIdToInfo = {} #FieldId => dcgm_fields.dcgm_field_meta_t
self.m_lock = threading.Lock() #DCGM connection start-up/shutdown is not thread safe. Just lock pessimistically
self.m_debug = False
# For GetAllSinceLastCall* calls. We cache the value for these objects
# after first retrieval, so initializing them to None lets us know if
# we've made a first retrieval. The first retrieval is based on a
# "since" timestamp of 0, so it gets data in which we are not
# interested in. The second retrieval gets data since the first one, in
# which we ARE interested. The practical upshot of this is that actual
# reporting of data is delayed one collectd sampling interval -- as if
# the sampling was actually started one collectd sampling interval
# later. We expect this is not an issue.
self.fvs = None
self.dfvc = None
self.dfvec = None
###########################################################################
'''
Define what should happen to this object at the beginning of a with
block. In this case, nothing more is needed since the constructor should've
been called.
'''
def __enter__(self):
return self
###########################################################################
'''
Define the cleanup
'''
def __exit__(self, type, value, traceback):
self.Shutdown()
###########################################################################
'''
This function intializes DCGM from the specified directory and connects to
the host engine.
'''
def InitWrapped(self, path=None):
dcgm_structs._dcgmInit(libDcgmPath=path)
self.Reconnect()
###########################################################################
'''
This function tries to connect to hostengine and calls initwrapped to initialize
the dcgm.
'''
def Init(self, libpath=None):
with self.m_lock:
try:
self.InitWrapped(path=libpath)
except dcgm_structs.dcgmExceptionClass(dcgm_structs.DCGM_ST_CONNECTION_NOT_VALID):
self.LogError("Can't connect to nv-hostengine. Is it down?")
self.SetDisconnected()
###########################################################################
'''
Delete the DCGM group, DCGM system and DCGM handle and clear the attributes
on shutdown.
'''
def SetDisconnected(self):
#Force destructors since DCGM currently doesn't support more than one client connection per process
if self.m_dcgmGroup is not None:
del(self.m_dcgmGroup)
self.m_dcgmGroup = None
if self.m_dcgmSystem is not None:
del(self.m_dcgmSystem)
self.m_dcgmSystem = None
if self.m_dcgmHandle is not None:
del(self.m_dcgmHandle)
self.m_dcgmHandle = None
##########################################################################
'''
This function calls the SetDisconnected function which disconnects from
DCGM and clears DCGM handle and DCGM group.
'''
def Shutdown(self):
with self.m_lock:
if self.m_closeHandle == True:
self.SetDisconnected()
############################################################################
'''
Turns debugging output on
'''
def AddDebugOutput(self):
self.m_debug = True
############################################################################
'''
'''
def InitializeFromHandle(self):
self.m_dcgmSystem = self.m_dcgmHandle.GetSystem()
if not self.m_requestedGpuIds and not self.m_requestedEntities:
self.m_dcgmGroup = self.m_dcgmSystem.GetDefaultGroup()
else:
groupName = "dcgmreader_%d" % os.getpid()
if self.m_requestedGpuIds:
self.m_dcgmGroup = self.m_dcgmSystem.GetGroupWithGpuIds(groupName, self.m_requestedGpuIds)
if self.m_requestedEntities:
for entity in self.m_requestedEntities:
self.m_dcgmGroup.AddEntity(entity.entityGroupId, entity.entityId)
else:
self.m_dcgmGroup = self.m_dcgmSystem.GetGroupWithEntities(groupName, self.m_requestedEntities)
self.SetupGpuIdBusMappings()
self.SetupGpuIdUUIdMappings()
self.GetFieldMetadata()
self.AddFieldWatches()
############################################################################
'''
Has DcgmReader use but not own a handle. Currently for the unit tests.
'''
def SetHandle(self, handle):
self.m_dcgmHandle = pydcgm.DcgmHandle(handle)
self.InitializeFromHandle()
############################################################################
'''
Reconnect function checks if connection handle is present. If the handle is
none, it creates the handle and gets the default DCGM group. It then maps
gpuIds to BusID, set the meta data of the field ids and adds watches to the
field Ids mentioned in the idToWatch list.
'''
def Reconnect(self):
if self.m_dcgmHandle is not None:
return
self.LogDebug("Connection handle is None. Trying to reconnect")
self.m_dcgmHandle = pydcgm.DcgmHandle(None, self.m_dcgmHostName, dcgm_structs.DCGM_OPERATION_MODE_AUTO)
self.m_closeHandle = True
self.LogDebug("Connected to nv-hostengine")
self.InitializeFromHandle()
###########################################################################
'''
Populate the g_gpuIdToBusId map. This map contains mapping from
gpuID to the BusID.
'''
def SetupGpuIdBusMappings(self):
self.m_gpuIdToBusId = {}
gpuIds = self.m_dcgmGroup.GetGpuIds()
for gpuId in gpuIds:
gpuInfo = self.m_dcgmSystem.discovery.GetGpuAttributes(gpuId)
self.m_gpuIdToBusId[gpuId] = gpuInfo.identifiers.pciBusId
###########################################################################
'''
Add watches to the fields which are passed in init function in idToWatch
list. It also updates the field values for the first time.
'''
def AddFieldWatches(self):
maxKeepSamples = 0 #No limit. Handled by m_maxKeepAge
for interval, fieldGroup in self.m_fieldGroups.items():
self.LogDebug("AddWatchFields: interval = " + str(interval) + "\n")
self.m_dcgmGroup.samples.WatchFields(fieldGroup, interval, self.m_maxKeepAge, maxKeepSamples)
self.m_dcgmSystem.UpdateAllFields(1)
self.LogDebug("AddWatchFields exit\n")
###########################################################################
'''
If the groupID already exists, we delete that group and create a new fieldgroup with
the fields mentioned in idToWatch. Then information of each field is acquired from its id.
'''
def GetFieldMetadata(self):
self.m_fieldIdToInfo = {}
self.m_fieldGroups = {}
self.m_fieldGroup = None
allFieldIds = []
# Initialize groups for all field intervals.
self.LogDebug("GetFieldMetaData:\n")
intervalIndex = 0
for interval, fieldIds in self.m_publishFields.items():
self.LogDebug("sampling interval = " + str(interval) + ":\n")
for fieldId in fieldIds:
self.LogDebug(" fieldId: " + str(fieldId) + "\n")
intervalIndex += 1
fieldGroupName = self.m_fieldGroupName + "_" + str(intervalIndex)
findByNameId = self.m_dcgmSystem.GetFieldGroupIdByName(fieldGroupName)
self.LogDebug("fieldGroupName: " + fieldGroupName + "\n")
# Remove our field group if it exists already
if findByNameId is not None:
self.LogDebug("fieldGroupId: " + findByNameId + "\n")
delFieldGroup = pydcgm.DcgmFieldGroup(dcgmHandle=self.m_dcgmHandle, fieldGroupId=findByNameId)
delFieldGroup.Delete()
del(delFieldGroup)
self.m_fieldGroups[interval] = pydcgm.DcgmFieldGroup(self.m_dcgmHandle, fieldGroupName, fieldIds)
for fieldId in fieldIds:
if fieldId not in allFieldIds:
allFieldIds += [fieldId]
self.m_fieldIdToInfo[fieldId] = self.m_dcgmSystem.fields.GetFieldById(fieldId)
if self.m_fieldIdToInfo[fieldId] == 0 or self.m_fieldIdToInfo[fieldId] == None:
self.LogError("Cannot get field tag for field id %d. Please check dcgm_fields to see if it is valid." % (fieldId))
raise dcgm_structs.DCGMError(dcgm_structs.DCGM_ST_UNKNOWN_FIELD)
# Initialize a field group of ALL fields.
fieldGroupName = self.m_fieldGroupName
findByNameId = self.m_dcgmSystem.GetFieldGroupIdByName(fieldGroupName)
# Remove our field group if it exists already
if findByNameId is not None:
delFieldGroup = pydcgm.DcgmFieldGroup(dcgmHandle=self.m_dcgmHandle, fieldGroupId=findByNameId)
delFieldGroup.Delete()
del(delFieldGroup)
if len(allFieldIds) > 0:
self.m_fieldGroup = pydcgm.DcgmFieldGroup(self.m_dcgmHandle, fieldGroupName, allFieldIds)
###########################################################################
'''
This function attempts to connect to DCGM and calls the implemented
CustomDataHandler in the child class with field values.
@params:
self.m_dcgmGroup.samples.GetLatest(self.m_fieldGroup).values : The field
values for each field. This dictionary contains fieldInfo for each field id
requested to be watched.
'''
def Process(self):
with self.m_lock:
try:
self.Reconnect()
# The first call just clears the collection set.
if not self.m_requestedEntities:
self.dfvc = self.m_dcgmGroup.samples.GetAllSinceLastCall(self.dfvc, self.m_fieldGroup)
self.CustomDataHandler(self.dfvc.values)
self.dfvc.EmptyValues()
else:
self.dfvec = self.m_dcgmGroup.samples.GetAllSinceLastCall_v2(self.dfvec, self.m_fieldGroup)
self.CustomDataHandler_v2(self.dfvec.values)
self.dfvec.EmptyValues()
except dcgm_structs.dcgmExceptionClass(dcgm_structs.DCGM_ST_CONNECTION_NOT_VALID):
self.LogError("Can't connect to nv-hostengine. Is it down?")
self.SetDisconnected()
###########################################################################
def LogInfo(self, msg):
logging.info(msg)
###########################################################################
def LogDebug(self, msg):
logging.debug(msg)
###########################################################################
def LogError(self, msg):
logging.error(msg)
###########################################################################
'''
This function gets each value as a dictionary of dictionaries. The dictionary
returned is each gpu id mapped to a dictionary of it's field values. Each
field value dictionary is the field name mapped to the value or the field
id mapped to value depending on the parameter mapById.
'''
def GetLatestGpuValuesAsDict(self, mapById):
systemDictionary = {}
with self.m_lock:
try:
self.Reconnect()
fvs = self.m_dcgmGroup.samples.GetLatest(self.m_fieldGroup).values
for gpuId in list(fvs.keys()):
systemDictionary[gpuId] = {} # initialize the gpu's dictionary
gpuFv = fvs[gpuId]
for fieldId in list(gpuFv.keys()):
val = gpuFv[fieldId][-1]
if val.isBlank:
continue
if mapById == False:
fieldTag = self.m_fieldIdToInfo[fieldId].tag
systemDictionary[gpuId][fieldTag] = val.value if isinstance(val.value, bytes) else val.value
else:
systemDictionary[gpuId][fieldId] = val.value if isinstance(val.value, bytes) else val.value
except dcgm_structs.dcgmExceptionClass(dcgm_structs.DCGM_ST_CONNECTION_NOT_VALID):
self.LogError("Can't connection to nv-hostengine. Please verify that it is running.")
self.SetDisconnected()
return systemDictionary
###########################################################################
'''
This function gets value as a dictionary of dictionaries of lists. The
dictionary returned is each gpu id mapped to a dictionary of it's field
value lists. Each field value dictionary is the field name mapped to the
list of values or the field id mapped to list of values depending on the
parameter mapById. The list of values are the values for each field since
the last retrieval.
'''
def GetAllGpuValuesAsDictSinceLastCall(self, mapById):
systemDictionary = {}
with self.m_lock:
try:
self.Reconnect()
report = self.fvs is not None
self.fvs = self.m_dcgmGroup.samples.GetAllSinceLastCall(self.fvs, self.m_fieldGroup)
if report:
for gpuId in list(self.fvs.values.keys()):
systemDictionary[gpuId] = {} # initialize the gpu's dictionary
gpuFv = self.fvs.values[gpuId]
for fieldId in list(gpuFv.keys()):
for val in gpuFv[fieldId]:
if val.isBlank:
continue
if mapById == False:
fieldTag = self.m_fieldIdToInfo[fieldId].tag
if not fieldTag in systemDictionary[gpuId]:
systemDictionary[gpuId][fieldTag] = []
systemDictionary[gpuId][fieldTag].append(val)
else:
if not fieldId in systemDictionary[gpuId]:
systemDictionary[gpuId][fieldId] = []
systemDictionary[gpuId][fieldId].append(val)
except dcgm_structs.dcgmExceptionClass(dcgm_structs.DCGM_ST_CONNECTION_NOT_VALID):
self.LogError("Can't connection to nv-hostengine. Please verify that it is running.")
self.SetDisconnected()
if self.fvs is not None:
self.fvs.EmptyValues()
return systemDictionary
###########################################################################
def GetLatestGpuValuesAsFieldIdDict(self):
return self.GetLatestGpuValuesAsDict(True)
###########################################################################
def GetLatestGpuValuesAsFieldNameDict(self):
return self.GetLatestGpuValuesAsDict(False)
###########################################################################
def GetAllGpuValuesAsFieldIdDictSinceLastCall(self):
return self.GetAllGpuValuesAsDictSinceLastCall(True)
###########################################################################
def GetAllGpuValuesAsFieldNameDictSinceLastCall(self):
return self.GetAllGpuValuesAsDictSinceLastCall(False)
| DCGM-master | testing/python3/DcgmReader.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import subprocess
import signal
import os
import re
import sys
dir_path = os.path.dirname(os.path.realpath(__file__))
parent_dir_path = os.path.abspath(os.path.join(dir_path, os.pardir))
sys.path.insert(0, parent_dir_path)
import dcgm_fields_collectd
import pydcgm
import dcgm_fields
import dcgm_structs
import threading
from DcgmReader import DcgmReader
if 'DCGM_TESTING_FRAMEWORK' in os.environ:
try:
import collectd_tester_api as collectd
except:
import collectd
else:
import collectd
# Set default values for the hostname and the library path
g_dcgmLibPath = '/usr/lib'
g_dcgmHostName = 'localhost'
# Add overriding through the environment instead of hard coded.
if 'DCGM_HOSTNAME' in os.environ:
g_dcgmHostName = os.environ['DCGM_HOSTNAME']
if 'DCGMLIBPATH' in os.environ:
g_dcgmLibPath = os.environ['DCGMLIBPATH']
c_ONE_SEC_IN_USEC = 1000000
g_intervalSec = 10 # Default
g_dcgmIgnoreFields = [dcgm_fields.DCGM_FI_DEV_UUID] # Fields not to publish
g_publishFieldIds = [
dcgm_fields.DCGM_FI_DEV_UUID, #Needed for plugin instance
dcgm_fields.DCGM_FI_DEV_POWER_USAGE,
dcgm_fields.DCGM_FI_DEV_GPU_TEMP,
dcgm_fields.DCGM_FI_DEV_SM_CLOCK,
dcgm_fields.DCGM_FI_DEV_GPU_UTIL,
dcgm_fields.DCGM_FI_DEV_RETIRED_PENDING,
dcgm_fields.DCGM_FI_DEV_RETIRED_SBE,
dcgm_fields.DCGM_FI_DEV_RETIRED_DBE,
dcgm_fields.DCGM_FI_DEV_ECC_SBE_VOL_TOTAL,
dcgm_fields.DCGM_FI_DEV_ECC_DBE_VOL_TOTAL,
dcgm_fields.DCGM_FI_DEV_ECC_SBE_AGG_TOTAL,
dcgm_fields.DCGM_FI_DEV_ECC_DBE_AGG_TOTAL,
dcgm_fields.DCGM_FI_DEV_FB_TOTAL,
dcgm_fields.DCGM_FI_DEV_FB_FREE,
dcgm_fields.DCGM_FI_DEV_FB_USED,
dcgm_fields.DCGM_FI_DEV_PCIE_REPLAY_COUNTER,
dcgm_fields.DCGM_FI_DEV_POWER_VIOLATION,
dcgm_fields.DCGM_FI_DEV_THERMAL_VIOLATION,
dcgm_fields.DCGM_FI_DEV_XID_ERRORS,
dcgm_fields.DCGM_FI_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_TOTAL,
dcgm_fields.DCGM_FI_DEV_NVLINK_CRC_DATA_ERROR_COUNT_TOTAL,
dcgm_fields.DCGM_FI_DEV_NVLINK_REPLAY_ERROR_COUNT_TOTAL,
dcgm_fields.DCGM_FI_DEV_MEM_CLOCK,
dcgm_fields.DCGM_FI_DEV_MEMORY_TEMP,
dcgm_fields.DCGM_FI_DEV_TOTAL_ENERGY_CONSUMPTION,
dcgm_fields.DCGM_FI_DEV_MEM_COPY_UTIL,
dcgm_fields.DCGM_FI_DEV_NVLINK_RECOVERY_ERROR_COUNT_TOTAL,
dcgm_fields.DCGM_FI_DEV_NVLINK_BANDWIDTH_TOTAL,
dcgm_fields.DCGM_FI_DEV_PCIE_TX_THROUGHPUT,
dcgm_fields.DCGM_FI_DEV_PCIE_RX_THROUGHPUT
]
g_fieldIntervalMap = None
g_parseRegEx = None
g_fieldRegEx = None
# We build up a regex to match field IDs. These can be numeric IDs, or
# names. We start with field_regex that matches either as a string (as
# well as names that might start with digits, but we do not worry about
# this over-generation of valid IDs at this point).
#
# Basically a field is an integral number or a textual name. A field
# list is a field, or a list of fields separated by commas and enclosed
# in parenthssis. A field list may be optionally followed by a colon,
# indicating a possible non-default interval if also followed by a
# floating point interval value. This is a complete field list.
# Multiple complete field lists may appear, separated by commas.
#
# For example: (1001,tensor_active):5,1002:10
#
# This specifies that fields 1001 and tensor_active are to be sampled
# at a rate of every 5 seconds, and 1002 every ten seconds.
#
# For example: (1001,tensor_active):5,1002:
#
# This is the same, but field 1002 is to be sampled at the default rate
# (and the colon in entirely unnecessary, but not illegal).
field_regex = r"[0-9a-zA-Z_]+"
g_fieldRegEx = re.compile("((" + field_regex + "),?)")
# We now generate a list of field regular expressions, separated by a
# comma, and enclosed with parenthesis, for grouping.
fields_regex = r"\(" + field_regex + "(," + field_regex + ")*" + r"\)"
# This is an optional interval specification, allowing an optional :,
# followed by an optional floating point dcgm sampling interval. If any
# are missing, the default collectd sampling interval is used.
interval_regex = r"(:[0-9]*(\.[0-9]+)?)?,?"
# Here, we combine a field regex or field list regex with an optional
# interval regex. Multiple of these may appear in succession.
g_parseRegEx = re.compile("((" + field_regex + "|(" + fields_regex + "))" + interval_regex + ")")
class DcgmCollectdPlugin(DcgmReader):
###########################################################################
def __init__(self):
global c_ONE_SEC_IN_USEC
collectd.debug('Initializing DCGM with interval={}s'.format(g_intervalSec))
DcgmReader.__init__(self, fieldIds=g_publishFieldIds, ignoreList=g_dcgmIgnoreFields, fieldGroupName='collectd_plugin', updateFrequency=g_intervalSec*c_ONE_SEC_IN_USEC, fieldIntervalMap = g_fieldIntervalMap)
###########################################################################
def CustomDataHandler(self, fvs):
global c_ONE_SEC_IN_USEC
value = collectd.Values(type='gauge') # pylint: disable=no-member
value.plugin = 'dcgm_collectd'
for gpuId in list(fvs.keys()):
gpuFv = fvs[gpuId]
uuid = self.m_gpuIdToUUId[gpuId]
collectd.debug('CustomDataHandler uuid: ' + '%s' % (uuid) + '\n')
value.plugin_instance = '%s' % (uuid)
typeInstance = str(gpuId)
for fieldId in list(gpuFv.keys()):
# Skip ignore list
if fieldId in self.m_dcgmIgnoreFields:
continue
fieldTag = self.m_fieldIdToInfo[fieldId].tag
lastValTime = float("inf")
# Filter out times too close together (< 1.0 sec) but always
# include latest one.
for val in gpuFv[fieldId][::-1]:
# Skip blank values. Otherwise, we'd have to insert a placeholder blank value based on the fieldId
if val.isBlank:
continue
valTimeSec1970 = (val.ts / c_ONE_SEC_IN_USEC) #Round down to 1-second for now
if (lastValTime - valTimeSec1970) < 1.0:
collectd.debug("DCGM sample for field ID %d too soon at %f, last one sampled at %f" % (fieldId, valTimeSec1970, lastValTime))
val.isBlank = True # Filter this one out
continue
lastValTime = valTimeSec1970
i = 0
for val in gpuFv[fieldId]:
# Skip blank values. Otherwise, we'd have to insert a placeholder blank value based on the fieldId
if val.isBlank:
continue
# Round down to 1-second for now
valTimeSec1970 = (val.ts / c_ONE_SEC_IN_USEC)
valueArray = [val.value, ]
value.dispatch(type=fieldTag, type_instance=typeInstance, time=valTimeSec1970, values=valueArray, plugin=value.plugin)
collectd.debug(" gpuId %d, tag %s, sample %d, value %s, time %s" % (gpuId, fieldTag, i, str(val.value), str(val.ts))) # pylint: disable=no-member
i += 1
###########################################################################
def LogInfo(self, msg):
collectd.info(msg) # pylint: disable=no-member
###########################################################################
def LogError(self, msg):
collectd.error(msg) # pylint: disable=no-member
###############################################################################
##### Parse supplied collectd configuration object.
###############################################################################
def parse_config(config):
global c_ONE_SEC_IN_USEC
global g_intervalSec
global g_fieldIntervalMap
global g_parseRegEx
global g_fieldRegEx
g_fieldIntervalMap = {}
for node in config.children:
if node.key == 'Interval':
g_intervalSec = float(node.values[0])
elif node.key == 'FieldIds':
fieldIds = node.values[0]
# And we parse out the field ID list with this regex.
field_set_list = g_parseRegEx.finditer(fieldIds)
for field_set in field_set_list:
# We get the list of fields...
fields = field_set.group(2)
# ... and the optional interval.
interval_str = field_set.group(5)
# We figure out if the default collectd sampling interval is
# to be used, or a different one.
if (interval_str == None) or (interval_str == ":"):
interval = int(g_intervalSec * c_ONE_SEC_IN_USEC)
else:
interval = int(float(interval_str[1:]) * c_ONE_SEC_IN_USEC) # strip :
# Here we parse out either multiple fields sharing an
# interval, or a single field.
if fields[0:1] == "(": # a true field set
fields = fields[1:-1]
field_list = g_fieldRegEx.finditer(fields)
for field_group in field_list:
# We map any field names to field numbers, and add
# them to the list for the interval
field = dcgm_fields_collectd.GetFieldByName(field_group.group(2))
if (field >= 0):
# We keep a set of fields for each unique interval
if interval not in g_fieldIntervalMap.keys():
g_fieldIntervalMap[interval] = []
g_fieldIntervalMap[interval] += [field]
else:
collectd.error("Field " + field_group.group(2) + " does not exist.")
else: # just one field
# Map field name to number.
field = dcgm_fields_collectd.GetFieldByName(fields)
if (field >= 0):
# We keep a set of fields for each unique interval
if interval not in g_fieldIntervalMap.keys():
g_fieldIntervalMap[interval] = []
g_fieldIntervalMap[interval] += [field]
else:
collectd.error("Field " + fields + " does not exist.")
###############################################################################
##### Wrapper the Class methods for collectd callbacks
###############################################################################
def config_dcgm(config = None):
"""
collectd config for dcgm is in the form of a dcgm.conf file, usually
installed in /etc/collectd/collectd.conf.d/dcgm.conf.
An example is:
LoadPlugin python
<Plugin python>
ModulePath "/usr/lib64/collectd/dcgm"
LogTraces true
Interactive false
Import "dcgm_collectd_plugin"
<Module dcgm_collectd_plugin>
Interval 2
FieldIds "(1001,tensor_active):5,1002:10,1004:.1,1005:"
FieldIds "1013"
</Module>
</Plugin>
ModulePath indicates where the plugin and supporting files are installed
(generally copied from /usr/local/dcgm/bindings/python3).
Interval is the default collectd sampling interval in seconds.
FieldIds may appear several times. One is either a field ID by name or
number. A field ID list is either a single field ID or a list of same,
separated by commas (,) and bounded by parenthesis ( ( and ) ). Each field
ID list can be followed by an optional colon (:) and a floating point
DCGM sampling interval. If no sampling interval is specified the default
collectd sampling interval is used (and the colon is redundant but not
illegal). Multiple field ID lists can appear on one FieldIds entry,
separated by commas (,). FieldIDs are strings and must be enclosed in
quotes ("). Multiple FieldIds lines are permitted.
DCGM will sample the fields at the interval(s) indicated, and collectd will
collect the samples asynchronously at the Interval specified. Because this
is asynchronous sometimes one less than expected will be collected and other
times one more than expected will be collected.
"""
# If we throw an exception here, collectd config will terminate loading the
# plugin.
if config is not None:
parse_config(config)
# Register the read function with the default collectd sampling interval.
collectd.register_read(read_dcgm, interval=g_intervalSec) # pylint: disable=no-member
###############################################################################
def init_dcgm():
global g_dcgmCollectd
# restore default SIGCHLD behavior to avoid exceptions with new processes
signal.signal(signal.SIGCHLD, signal.SIG_DFL)
g_dcgmCollectd = DcgmCollectdPlugin()
g_dcgmCollectd.Init()
###############################################################################
def shutdown_dcgm():
g_dcgmCollectd.Shutdown()
###############################################################################
def read_dcgm(data=None):
if g_dcgmCollectd.m_fieldGroup is None:
collectd.error('No DCGM fields collected: Did you forget FieldIds collectd DCGM config entries or not start nv-hostengine?')
else:
g_dcgmCollectd.Process()
def register_collectd_callbacks():
collectd.register_config(config_dcgm, name="dcgm_collectd_plugin") # pylint: disable=no-member
# config_dcgm registers read since it needs to parse the sampling interval.
collectd.register_init(init_dcgm) # pylint: disable=no-member
collectd.register_shutdown(shutdown_dcgm) # pylint: disable=no-member
###############################################################################
##### Main
###############################################################################
register_collectd_callbacks()
| DCGM-master | testing/python3/dcgm_collectd_plugin.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
sys.path.insert(0, '../')
import dcgm_structs
import dcgm_fields
import dcgm_agent
import dcgmvalue
from threading import Thread
from time import sleep
## Look at __name__ == "__main__" for entry point to the script
class RunDCGM():
def __init__(self, ip, opMode):
self.ip = ip
self.opMode = opMode
def __enter__(self):
dcgm_structs._dcgmInit()
self.handle = dcgm_agent.dcgmInit()
return self.handle
def __exit__(self, eType, value, traceback):
dcgm_agent.dcgmShutdown()
## Helper method to convert enum to system name
def helper_convert_system_enum_to_sytem_name(system):
if system & (1 << dcgm_structs.DCGM_HEALTH_WATCH_PCIE):
return "PCIe"
if system & (1 << dcgm_structs.DCGM_HEALTH_WATCH_NVLINK):
return "NvLink"
if system & (1 << dcgm_structs.DCGM_HEALTH_WATCH_PMU):
return "PMU"
if system & (1 << dcgm_structs.DCGM_HEALTH_WATCH_MCU):
return "MCU"
if system & (1 << dcgm_structs.DCGM_HEALTH_WATCH_MEM):
return "MEM"
if system & (1 << dcgm_structs.DCGM_HEALTH_WATCH_SM):
return "SM"
if system & (1 << dcgm_structs.DCGM_HEALTH_WATCH_INFOROM):
return "Inforom"
if system & (1 << dcgm_structs.DCGM_HEALTH_WATCH_THERMAL):
return "Thermal"
if system & (1 << dcgm_structs.DCGM_HEALTH_WATCH_POWER):
return "Power"
if system & (1 << dcgm_structs.DCGM_HEALTH_WATCH_DRIVER):
return "Driver"
## helper method to convert helath return to a string for display purpose
def convert_overall_health_to_string(health):
if health == dcgm_structs.DCGM_HEALTH_RESULT_PASS:
return "Pass"
elif health == dcgm_structs.DCGM_HEALTH_RESULT_WARN:
return "Warn"
elif health == dcgm_structs.DCGM_HEALTH_RESULT_FAIL:
return "Fail"
else :
return "N/A"
## Worker function
def agent_worker_function(dcgmHandle, groupId):
NUM_ITERATIONS = 5
count = 0
groupId = groupId
## Add the health watches
newSystems = dcgm_structs.DCGM_HEALTH_WATCH_ALL
dcgm_agent.dcgmHealthSet(dcgmHandle, groupId, newSystems)
while True:
dcgm_agent.dcgmUpdateAllFields(dcgmHandle, 1)
try:
## Invoke Health checks
group_health = dcgm_agent.dcgmHealthCheck(dcgmHandle, groupId)
print("Overall Health for the group: %s" % convert_overall_health_to_string(group_health.overallHealth))
for index in range (0, group_health.gpuCount):
print("GPU ID : %d" % group_health.gpu[index].gpuId)
for incident in range (0, group_health.gpu[index].incidentCount):
print("system tested : %d" % group_health.gpu[index].systems[incident].system)
print("system health : %s" % convert_overall_health_to_string(group_health.gpu[index].systems[incident].health))
print("system health err : %s" % group_health.gpu[index].systems[incident].errorString)
print("\n")
except dcgm_structs.DCGMError as e:
errorCode = e.value
print("dcgmEngineHelathCheck returned error: %d" % errorCode)
sys.exc_clear()
count = count + 1
if count == NUM_ITERATIONS:
break
sleep(2)
## Main
def main():
## Initilaize the DCGM Engine as manual operation mode. This implies that it's execution is
## controlled by the monitoring agent. The user has to periodically call APIs such as
## dcgmEnginePolicyTrigger and dcgmEngineUpdateAllFields which tells DCGM to wake up and
## perform data collection and operations needed for policy management.
with RunDCGM('127.0.0.1', dcgm_structs.DCGM_OPERATION_MODE_MANUAL) as handle:
## Create a default group. (Default group is comprised of all the GPUs on the node)
## Let's call the group as "all_gpus_group". The method returns an opaque handle (groupId) to
## identify the newly created group.
groupId = dcgm_agent.dcgmGroupCreate(handle, dcgm_structs.DCGM_GROUP_DEFAULT, "all_gpus_group")
## Invoke method to get information on the newly created group
groupInfo = dcgm_agent.dcgmGroupGetInfo(handle, groupId)
## Create reference to DCGM status handler which can be used to get the statuses for multiple
## operations on one or more devices present in the group
status_handle = dcgm_agent.dcgmStatusCreate()
## The worker function can be executed as a separate thread or as part of the main thread.
## Executed as a separate thread here
thread = Thread(target = agent_worker_function, args = (handle, groupId))
thread.start()
##########################################
# Any other useful work can be placed here
##########################################
thread.join()
print("Worker thread completed")
## Destroy the group
try:
dcgm_agent.dcgmGroupDestroy(handle, groupId)
except dcgm_structs.DCGMError as e:
print("Failed to remove the test group, error: %s" % e, file=sys.stderr)
sys.exit(1)
## Destroy the status handle
try:
dcgm_agent.dcgmStatusDestroy(status_handle)
except dcgm_structs.DCGMError as e:
print("Failed to remove status handler, error: %s" % e, file=sys.stderr)
sys.exit(1)
if __name__ == '__main__':
main()
| DCGM-master | testing/python3/isv_scripts/dcgm_health_check.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
sys.path.insert(0, '../')
import dcgm_structs
import dcgm_fields
import dcgm_agent
import dcgmvalue
from threading import Thread
from time import sleep
## Look at __name__ == "__main__" for entry point to the script
class RunDCGM():
def __init__(self, ip, opMode):
self.ip = ip
self.opMode = opMode
def __enter__(self):
dcgm_structs._dcgmInit()
self.handle = dcgm_agent.dcgmInit()
return self.handle
def __exit__(self, eType, value, traceback):
dcgm_agent.dcgmShutdown()
## Helper method to convert DCGM value to string
def convert_value_to_string(value):
v = dcgmvalue.DcgmValue(value)
try:
if (v.IsBlank()):
return "N/A"
else:
return v.__str__()
except:
## Exception is generally thorwn when int32 is
## passed as an input. Use additional methods to fix it
sys.exc_clear()
v = dcgmvalue.DcgmValue(0)
v.SetFromInt32(value)
if (v.IsBlank()):
return "N/A"
else:
return v.__str__()
## Helper method to investigate the status handler
def helper_investigate_status(statusHandle):
"""
Helper method to investigate status handle
"""
errorCount = 0;
errorInfo = dcgm_agent.dcgmStatusPopError(statusHandle)
while (errorInfo != None):
errorCount += 1
print("Error%d" % errorCount)
print((" GPU Id: %d" % errorInfo.gpuId))
print((" Field ID: %d" % errorInfo.fieldId))
print((" Error: %d" % errorInfo.status))
errorInfo = dcgm_agent.dcgmStatusPopError(statusHandle)
## Worker Function to get Configuration for a dcgm group
def agent_worker_function(handle, groupId, groupInfo, status_handle):
NUM_ITERATIONS = 5
count = 0
while True:
dcgm_agent.dcgmUpdateAllFields(handle, 1)
## Get the current configuration for the group
config_values = dcgm_agent.dcgmConfigGet(handle, groupId, dcgm_structs.DCGM_CONFIG_CURRENT_STATE, groupInfo.count, status_handle)
## Since this is a group operation, Check for the status codes if any of the property failed
helper_investigate_status(status_handle)
dcgm_agent.dcgmStatusClear(status_handle)
## Display current configuration for the group
for x in range(0,groupInfo.count):
print("GPU Id : %d" % (config_values[x].gpuId))
print("Ecc Mode : %s" % (convert_value_to_string(config_values[x].mEccMode)))
print("Auto Boost : %s" % (convert_value_to_string(config_values[x].mPerfState.autoBoost)))
print("Sync Boost : %s" % (convert_value_to_string(config_values[x].mPerfState.autoBoost)))
print("Mem Clock : %s" % (convert_value_to_string(config_values[x].mPerfState.minVPState.memClk)))
print("SM Clock : %s" % (convert_value_to_string(config_values[x].mPerfState.minVPState.procClk)))
print("Power Limit : %s" % (convert_value_to_string(config_values[x].mPowerLimit.val)))
print("Compute Mode: %s" % (convert_value_to_string(config_values[x].mComputeMode)))
print("\n")
count = count + 1
if count == NUM_ITERATIONS:
break
sleep(2)
## Entry point for this script
if __name__ == "__main__":
## Initialize the DCGM Engine as manual operation mode. This implies that it's execution is
## controlled by the monitoring agent. The user has to periodically call APIs such as
## dcgmEnginePolicyTrigger and dcgmEngineUpdateAllFields which tells DCGM to wake up and
## perform data collection and operations needed for policy management.
with RunDCGM('127.0.0.1', dcgm_structs.DCGM_OPERATION_MODE_MANUAL) as handle:
## Create a default group. (Default group is comprised of all the GPUs on the node)
## Let's call the group as "all_gpus_group". The method returns an opaque handle (groupId) to
## identify the newly created group.
groupId = dcgm_agent.dcgmGroupCreate(handle, dcgm_structs.DCGM_GROUP_DEFAULT, "all_gpus_group")
## Invoke method to get information on the newly created group
groupInfo = dcgm_agent.dcgmGroupGetInfo(handle, groupId)
## Create reference to DCGM status handler which can be used to get the statuses for multiple
## operations on one or more devices present in the group
status_handle = dcgm_agent.dcgmStatusCreate()
## The worker function can be executed as a separate thread or as part of the main thread.
## Executed as a separate thread here
thread = Thread(target = agent_worker_function, args = (handle, groupId, groupInfo, status_handle))
thread.start()
##########################################
# Any other useful work can be placed here
##########################################
thread.join()
print("Worker thread completed")
## Destroy the group
ret = dcgm_agent.dcgmGroupDestroy(handle, groupId)
assert(ret == dcgm_structs.DCGM_ST_OK), "Failed to remove the test group, error: %s" % ret
## Destroy the status handle
ret = dcgm_agent.dcgmStatusDestroy(status_handle)
assert(ret == dcgm_structs.DCGM_ST_OK), "Failed to remove status handler, error: %s" % ret
| DCGM-master | testing/python3/isv_scripts/dcgm_config_settings.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| DCGM-master | testing/python3/isv_scripts/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
sys.path.insert(0, '../')
import dcgm_structs
import dcgm_fields
import dcgm_agent
import dcgmvalue
from ctypes import *
C_FUNC = CFUNCTYPE(None, c_void_p)
def callback_function(data):
print("Received a callback from the policy manager")
c_callback = C_FUNC(callback_function)
class RunDCGM():
def __init__(self, ip, opMode):
self.ip = ip
self.opMode = opMode
def __enter__(self):
dcgm_structs._dcgmInit()
self.handle = dcgm_agent.dcgmInit()
return self.handle
def __exit__(self, eType, value, traceback):
dcgm_agent.dcgmShutdown()
## Main
if __name__ == "__main__":
## Initilaize the DCGM Engine as manual operation mode. This implies that it's execution is
## controlled by the monitoring agent. The user has to periodically call APIs such as
## dcgmEnginePolicyTrigger and dcgmEngineUpdateAllFields which tells DCGM to wake up and
## perform data collection and operations needed for policy management.
with RunDCGM('127.0.0.1', dcgm_structs.DCGM_OPERATION_MODE_MANUAL) as handle:
# The validate information should be packed in the dcgmRunDiag object
runDiagInfo = dcgm_structs.c_dcgmRunDiag_v7()
runDiagInfo.version = dcgm_structs.dcgmRunDiag_version7
## Create a default group. (Default group is comprised of all the GPUs on the node)
## Let's call the group as "all_gpus_group". The method returns an opaque handle (groupId) to
## identify the newly created group.
runDiagInfo.groupId = dcgm_agent.dcgmGroupCreate(handle, dcgm_structs.DCGM_GROUP_DEFAULT, "all_gpus_group")
## Invoke method to get information on the newly created group
groupInfo = dcgm_agent.dcgmGroupGetInfo(handle, runDiagInfo.groupId)
## define the actions and validations for those actions to take place
runDiagInfo.validate = dcgm_structs.DCGM_POLICY_VALID_SV_SHORT
## This will go ahead and perform a "prologue" diagnostic
## to make sure everything is ready to run
## currently this calls an outside diagnostic binary but eventually
## that binary will be merged into the DCGM framework
## The "response" is a dcgmDiagResponse structure that can be parsed for errors
response = dcgm_agent.dcgmActionValidate_v2(handle, runDiagInfo)
## This will perform an "eiplogue" diagnostic that will stress the system
## Currently commented out because it takes several minutes to execute
# runDiagInfo.validate = dcgm_structs.DCGM_POLICY_VALID_SV_LONG
#response = dcgm_agent.dcgmActionValidate_v2(handle, dcgmRunDiagInfo)
## prime the policy manager to look for ECC, PCIe events
## if a callback occurs the function above is called. Currently the data returned
## corresponds to the error that occurred (PCI, DBE, etc.) but in the future it will be a
## dcgmPolicyViolation_t or similar
ret = dcgm_agent.dcgmPolicyRegister(handle, runDiagInfo.groupId, dcgm_structs.DCGM_POLICY_COND_PCI | dcgm_structs.DCGM_POLICY_COND_DBE, None, c_callback)
## trigger the policy loop
## typically this would be looped in a separate thread or called on demand
ret = dcgm_agent.dcgmPolicyTrigger(handle)
## Destroy the group
try:
dcgm_agent.dcgmGroupDestroy(handle, runDiagInfo.groupId)
except dcgm_structs.DCGMError as e:
print("Failed to remove the test group, error: %s" % e, file=sys.stderr)
sys.exit(1)
| DCGM-master | testing/python3/isv_scripts/dcgm_diagnostic.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
sys.path.insert(0,'..')
import dcgm_agent
import dcgm_agent_internal
import dcgm_structs
import dcgm_fields
import dcgmvalue
import dcgm_structs_internal
from ctypes import *
import time
def helper_get_status_list(statusHandle):
"""
Helper method to get status list from the provided status handle
"""
errorList = list()
errorInfo = dcgm_agent.dcgmStatusPopError(statusHandle)
while (errorInfo != None):
errorList.append(errorInfo)
errorInfo = dcgm_agent.dcgmStatusPopError(statusHandle)
return errorList
def helper_verify_config_values_standalone(handle, groupId, expected_power, expected_ecc, \
expected_proc_clock, expected_mem_clock, expected_compute_mode, \
expected_sync_boost, expected_auto_boost):
"""
Helper Method to verify all the values for the current configuration are as expected
"""
groupInfo = dcgm_agent.dcgmGroupGetInfo(handle, groupId, dcgm_structs.c_dcgmGroupInfo_version2)
status_handle = dcgm_agent.dcgmStatusCreate()
config_values = dcgm_agent.dcgmConfigGet(handle, groupId, dcgm_structs.DCGM_CONFIG_CURRENT_STATE, groupInfo.count, status_handle)
assert len(config_values) > 0, "Failed to get configuration using dcgmConfigGet"
for x in range(0,groupInfo.count):
assert config_values[x].mPowerLimit.type == dcgm_structs.DCGM_CONFIG_POWER_CAP_INDIVIDUAL, \
"The power limit type for gpuId %d is incorrect. Returned: %d Expected :%d" \
% (x, config_values[x].mPowerLimit.type, dcgm_structs.DCGM_CONFIG_POWER_CAP_INDIVIDUAL)
assert config_values[x].mPowerLimit.val == expected_power, "The power limit value for gpuID %d is incorrect. Returned: %d Expected: %d" \
% (x, config_values[x].mPowerLimit.val, expected_power)
assert config_values[x].mPerfState.syncBoost == expected_sync_boost, "The syncboost value for gpuID %d is incorrect."\
" Returned: %d Expected: %d" \
% (x, config_values[x].mPerfState.syncBoost, expected_sync_boost)
assert config_values[x].mPerfState.autoBoost == expected_auto_boost, "The autoboost value for gpuID %d is incorrect."\
" Returned: %d Expected: %d" \
% (x, config_values[x].mPerfState.autoBoost, expected_auto_boost)
assert config_values[x].mPerfState.minVPState.memClk == expected_mem_clock, "The min mem clock value for gpuID %d is incorrect."\
" Returned: %d Expected: %d" \
% (x, config_values.mPerfState.minVPState.memClk , expected_mem_clock)
assert config_values[x].mPerfState.minVPState.procClk == expected_proc_clock, "The min proc clock value for gpuID %d is incorrect."\
" Returned: %d Expected: %d" \
% (x, config_values[x].mPerfState.minVPState.procClk , expected_proc_clock)
assert config_values[x].mComputeMode == expected_compute_mode, "The compute mode value for gpuID %d is incorrect."\
" Returned: %d Expected: %d" \
% (x, config_values[x].mComputeMode, expected_compute_mode)
assert config_values[x].mEccMode == expected_ecc, "The ecc mode value for gpuID %d is incorrect."\
" Returned: %d Expected: %d" \
% (x, config_values[x].mEccMode, expected_ecc)
pass
ret = dcgm_agent.dcgmStatusDestroy(status_handle)
assert(ret == dcgm_structs.DCGM_ST_OK), "Failed to remove status handler, error: %s" % ret
dcgm_structs._LoadDcgmLibrary()
handle = dcgm_agent.dcgmInit()
devices = dcgm_agent.dcgmGetAllDevices(handle)
validDevices = list()
for x in devices:
fvSupported = dcgm_agent_internal.dcgmGetLatestValuesForFields(handle, x, [dcgm_fields.DCGM_FI_DEV_RETIRED_DBE, ])
if (fvSupported[0].value.i64 != dcgmvalue.DCGM_INT64_NOT_SUPPORTED):
validDevices.append(x)
if (len(validDevices) == 0):
print("Can only run if at least one GPU with ECC is present")
sys.exit(1)
print("Number of valid devices: %d" % len(validDevices))
groupId = dcgm_agent.dcgmGroupCreate(handle, dcgm_structs.DCGM_GROUP_EMPTY, "test1")
statusHandle = dcgm_agent.dcgmStatusCreate()
for device in validDevices:
ret = dcgm_agent.dcgmGroupAddDevice(handle, groupId, device)
assert (ret == dcgm_structs.DCGM_ST_OK)
## Get attributes for all the devices
attributesForDevices = list()
for device in validDevices:
attributes = dcgm_agent.dcgmGetDeviceAttributes(handle, device)
attributesForDevices.append(attributes)
assert len(attributesForDevices) != 0, "Can't get attributes for all the devices"
device0_name = attributesForDevices[0].identifiers.deviceName
for attribute in attributesForDevices:
if attribute.identifiers.deviceName != device0_name:
print("Can only run test if all the GPUs are same")
sys.exit(1)
powerLimit_set = dcgmvalue.DCGM_INT32_BLANK
fvSupported = dcgm_agent_internal.dcgmGetLatestValuesForFields(handle, x, [dcgm_fields.DCGM_FI_DEV_POWER_MGMT_LIMIT, ])
if (fvSupported[0].value.i64 != dcgmvalue.DCGM_INT64_NOT_SUPPORTED):
powerLimit_set = (attributesForDevices[0].powerLimits.maxPowerLimit + attributesForDevices[0].powerLimits.minPowerLimit)/2
print("configure power limit")
autoBoost_set = dcgmvalue.DCGM_INT32_BLANK
fvSupported = dcgm_agent_internal.dcgmGetLatestValuesForFields(handle, x, [dcgm_fields.DCGM_FI_DEV_AUTOBOOST, ])
if (fvSupported[0].value.i64 != dcgmvalue.DCGM_INT64_NOT_SUPPORTED):
autoBoost_set = 1
print("configure autobost")
assert attributesForDevices[0].vpStates.count > 0, "Can't find clocks for the device"
total_clocks = attributesForDevices[0].vpStates.count
proc_clk_set = attributesForDevices[0].vpStates.vpState[total_clocks/2].procClk
mem_clk_set = attributesForDevices[0].vpStates.vpState[total_clocks/2].memClk
## Always Switch the ecc mode
ecc_set = 1
groupInfo = dcgm_agent.dcgmGroupGetInfo(handle, groupId, dcgm_structs.c_dcgmGroupInfo_version2)
config_values = dcgm_agent.dcgmConfigGet(handle, groupId, dcgm_structs.DCGM_CONFIG_CURRENT_STATE, groupInfo.count, 0)
assert len(config_values) > 0, "Failed to work with NULL status handle"
eccmodeOnGroupExisting = config_values[0].mEccMode
if eccmodeOnGroupExisting == 0:
ecc_set = 1
else:
ecc_set = 0
syncboost_set = 1
compute_set = dcgm_structs.DCGM_CONFIG_COMPUTEMODE_DEFAULT
config_values = dcgm_structs.c_dcgmDeviceConfig_v1()
config_values.mEccMode = ecc_set
config_values.mPerfState.syncBoost = syncboost_set
config_values.mPerfState.autoBoost = autoBoost_set
config_values.mPerfState.minVPState.memClk = mem_clk_set
config_values.mPerfState.minVPState.procClk = proc_clk_set
config_values.mPerfState.maxVPState.memClk = mem_clk_set
config_values.mPerfState.maxVPState.procClk = proc_clk_set
config_values.mComputeMode = compute_set
config_values.mPowerLimit.type = dcgm_structs.DCGM_CONFIG_POWER_CAP_INDIVIDUAL
config_values.mPowerLimit.val = powerLimit_set
## Set Config and verify the value
status_handle = dcgm_agent.dcgmStatusCreate()
ret = dcgm_agent.dcgmConfigSet(handle, groupId, config_values, statusHandle)
errors = helper_get_status_list(status_handle)
ecc_to_verify = ecc_set
if len(errors) > 0:
## Possible that reset failed. Check the error codes
for error in errors:
if error.fieldId == dcgm_fields.DCGM_FI_DEV_ECC_CURRENT:
ecc_to_verify = eccmodeOnGroupExisting
#assert(ret == dcgm_structs.DCGM_ST_OK), "Failed to set configuration for the group: %s" % ret
dcgm_agent.dcgmStatusClear(statusHandle)
helper_verify_config_values_standalone(handle, groupId, powerLimit_set, ecc_to_verify, proc_clk_set, mem_clk_set, compute_set, syncboost_set, autoBoost_set)
print("Verification Successful")
ret = dcgm_agent.dcgmGroupDestroy(handle, groupId)
assert(ret == dcgm_structs.DCGM_ST_OK), "Failed to remove the test group, error: %s" % ret
ret = dcgm_agent.dcgmStatusDestroy(statusHandle)
assert(ret == dcgm_structs.DCGM_ST_OK), "Failed to remove status handler, error: %s" % ret
dcgm_agent.dcgmShutdown()
| DCGM-master | testing/python3/burn-in-individuals/test_config.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
sys.path.insert(0,'..')
import dcgm_agent
import dcgm_agent_internal
import dcgm_structs
import dcgm_fields
import dcgmvalue
import dcgm_structs_internal
from ctypes import *
import time
global callbackCalled
callbackCalled = False
C_FUNC = CFUNCTYPE(None, c_void_p)
def helper_verify_power_value_standalone(handle, groupId, expected_power):
"""
Helper Method to verify power value
"""
groupInfo = dcgm_agent.dcgmGroupGetInfo(handle, groupId, dcgm_structs.c_dcgmGroupInfo_version2)
status_handle = dcgm_agent.dcgmStatusCreate()
config_values = dcgm_agent.dcgmConfigGet(handle, groupId, dcgm_structs.DCGM_CONFIG_CURRENT_STATE, groupInfo.count, status_handle)
assert len(config_values) > 0, "Failed to get configuration using dcgmConfigGet"
for x in range(0,groupInfo.count):
if (config_values[x].mPowerLimit.val != dcgmvalue.DCGM_INT32_NOT_SUPPORTED):
assert config_values[x].mPowerLimit.type == dcgm_structs.DCGM_CONFIG_POWER_CAP_INDIVIDUAL, \
"The power limit type for gpuId %d is incorrect. Returned: %d Expected :%d" \
% (x, config_values[x].mPowerLimit.type, dcgm_structs.DCGM_CONFIG_POWER_CAP_INDIVIDUAL)
assert config_values[x].mPowerLimit.val == expected_power, "The power limit value for gpuID %d is incorrect. Returned: %d Expected: %d" \
% (x, config_values[x].mPowerLimit.val, expected_power)
pass
ret = dcgm_agent.dcgmStatusDestroy(status_handle)
assert(ret == dcgm_structs.DCGM_ST_OK), "Failed to remove status handler, error: %s" % ret
def callback_function(data):
global callbackCalled
callbackCalled = True
c_callback = C_FUNC(callback_function)
dcgm_structs._LoadDcgmLibrary()
newPolicy = dcgm_structs.c_dcgmPolicy_v1()
handle = dcgm_agent.dcgmInit()
newPolicy.version = dcgm_structs.dcgmPolicy_version1
newPolicy.condition = dcgm_structs.DCGM_POLICY_COND_MAX_PAGES_RETIRED
newPolicy.action = dcgm_structs.DCGM_POLICY_ACTION_GPURESET
newPolicy.validation = dcgm_structs.DCGM_POLICY_VALID_SV_SHORT
newPolicy.parms[2].tag = 1
newPolicy.parms[2].val.llval = 5
# find a GPU that supports retired pages (otherwise internal test will ignore it)
devices = dcgm_agent.dcgmGetAllDevices(handle)
validDevice = -1
for x in devices:
fvSupported = dcgm_agent_internal.dcgmGetLatestValuesForFields(handle, x, [dcgm_fields.DCGM_FI_DEV_RETIRED_DBE, ])
if (fvSupported[0].value.i64 != dcgmvalue.DCGM_INT64_NOT_SUPPORTED):
validDevice = x
break
if (validDevice == -1):
print("Can only run if at least one GPU with ECC is present")
sys.exit(1)
groupId = dcgm_agent.dcgmGroupCreate(handle, dcgm_structs.DCGM_GROUP_EMPTY, "test1")
statusHandle = dcgm_agent.dcgmStatusCreate()
ret = dcgm_agent.dcgmGroupAddDevice(handle, groupId, validDevice)
assert (ret == dcgm_structs.DCGM_ST_OK)
## Add Configuration to be different than default
## Get Min and Max Power limit on the group
attributes = dcgm_agent.dcgmGetDeviceAttributes(handle, validDevice)
## Verify that power is supported on the GPUs in the group
powerLimit_set = (attributes.powerLimits.maxPowerLimit + attributes.powerLimits.minPowerLimit)/2
config_values = dcgm_structs.c_dcgmDeviceConfig_v1()
config_values.mEccMode = dcgmvalue.DCGM_INT32_BLANK
config_values.mPerfState.syncBoost = dcgmvalue.DCGM_INT32_BLANK
config_values.mPerfState.autoBoost = dcgmvalue.DCGM_INT32_BLANK
config_values.mPerfState.minVPState.memClk = dcgmvalue.DCGM_INT32_BLANK
config_values.mPerfState.minVPState.procClk = dcgmvalue.DCGM_INT32_BLANK
config_values.mPerfState.maxVPState.memClk = dcgmvalue.DCGM_INT32_BLANK
config_values.mPerfState.maxVPState.procClk = dcgmvalue.DCGM_INT32_BLANK
config_values.mComputeMode = dcgmvalue.DCGM_INT32_BLANK
config_values.mPowerLimit.type = dcgm_structs.DCGM_CONFIG_POWER_CAP_INDIVIDUAL
config_values.mPowerLimit.val = powerLimit_set
## Set Config and verify the value
ret = dcgm_agent.dcgmConfigSet(handle, groupId, config_values, statusHandle)
assert(ret == dcgm_structs.DCGM_ST_OK), "Failed to set configuration for the group: %s" % ret
dcgm_agent.dcgmStatusClear(statusHandle)
helper_verify_power_value_standalone(handle, groupId, powerLimit_set)
ret = dcgm_agent.dcgmPolicySet(handle, groupId, newPolicy, statusHandle)
assert (ret == dcgm_structs.DCGM_ST_OK)
time.sleep(5) # give the policy manager a chance to start
requestId = dcgm_agent.dcgmPolicyRegister(handle, groupId, dcgm_structs.DCGM_POLICY_COND_MAX_PAGES_RETIRED, c_callback, c_callback)
assert(requestId != None)
# inject an error into page retirement
field = dcgm_structs_internal.c_dcgmInjectFieldValue_v1()
field.version = dcgm_structs_internal.dcgmInjectFieldValue_version1
field.fieldId = dcgm_fields.DCGM_FI_DEV_RETIRED_DBE
field.status = 0
field.fieldType = ord(dcgm_fields.DCGM_FT_INT64)
field.ts = int((time.time()+11) * 1000000.0) # set the injected data into the future
field.value.i64 = 10
ret = dcgm_agent_internal.dcgmInjectFieldValue(handle, validDevice, field)
assert (ret == dcgm_structs.DCGM_ST_OK)
now = time.time()
while not callbackCalled:
if time.time() == now + 60: # wait 60 seconds
print("Timeout waiting for callback")
sys.exit(1)
print("Callback successfully received.")
## Verify that configuration is auto-enforced after GPU reset
#dcgm_agent.dcgmStatusClear(statusHandle)
#ret = dcgm_agent.dcgmConfigEnforce(handle, groupId, statusHandle)
helper_verify_power_value_standalone(handle, groupId, powerLimit_set)
print("Config enforce verification successful")
ret = dcgm_agent.dcgmGroupDestroy(handle, groupId)
assert(ret == dcgm_structs.DCGM_ST_OK), "Failed to remove the test group, error: %s" % ret
ret = dcgm_agent.dcgmStatusDestroy(statusHandle)
assert(ret == dcgm_structs.DCGM_ST_OK), "Failed to remove status handler, error: %s" % ret
dcgm_agent.dcgmShutdown()
| DCGM-master | testing/python3/burn-in-individuals/test_pr.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
sys.path.insert(0,'..')
import test_utils
import apps
import time
import dcgm_agent
import dcgm_agent_internal
import dcgm_structs
import dcgm_fields
import logger
import option_parser
import random
from dcgm_structs import dcgmExceptionClass
g_processesPerSecond = 5 #How many processes should we launch per second
g_processRunTime = 0.1 #How long should each process run in seconds
g_runHostEngineLockStep = False #Should we run the host engine in lock step or auto mode?
g_embeddedMode = True #Run the host engine embedded in python = True. Remote = False
if g_runHostEngineLockStep:
g_engineMode = dcgm_structs.DCGM_OPERATION_MODE_MANUAL
else:
g_engineMode = dcgm_structs.DCGM_OPERATION_MODE_AUTO
class ProcessStatsStressPid:
def __init__(self):
self.pid = 0
self.gpuId = 0
self.appObj = None
class ProcessStatsStressGpu:
def __init__(self):
self.gpuId = -1 #DCGM gpu ID
self.busId = "" #Bus ID string
class ProcessStatsStress:
def __init__(self, embeddedMode, heHandle):
self.gpus = [] #Array of ProcessStatsStressGpu objects
self.groupName = "pss_group"
self.groupId = None
self.addedPids = []
self.embeddedMode = embeddedMode
self.heHandle = heHandle
def __del__(self):
if self.groupId is not None:
dcgm_agent.dcgmGroupDestroy(self.heHandle, self.groupId)
self.groupId = None
self.heHandle = None
def Log(self, strVal):
print(strVal) #Just print for now. Can do more later
def GetGpus(self):
"""
Populate self.gpus
"""
self.groupId = dcgm_agent.dcgmGroupCreate(self.heHandle, dcgm_structs.DCGM_GROUP_DEFAULT, self.groupName)
groupInfo = dcgm_agent.dcgmGroupGetInfo(self.heHandle, self.groupId, dcgm_structs.c_dcgmGroupInfo_version2)
gpuIds = groupInfo.gpuIdList[0:groupInfo.count]
self.Log("Running on %d GPUs" % len(gpuIds))
for gpuId in gpuIds:
newGpu = ProcessStatsStressGpu()
newGpu.gpuId = gpuId
self.gpus.append(newGpu)
#Get the busid of the GPU
fieldId = dcgm_fields.DCGM_FI_DEV_PCI_BUSID
updateFreq = 100000
maxKeepAge = 3600.0 #one hour
maxKeepEntries = 0 #no limit
dcgm_agent_internal.dcgmWatchFieldValue(self.heHandle, gpuId, fieldId, updateFreq, maxKeepAge, maxKeepEntries)
#Update all of the new watches
dcgm_agent.dcgmUpdateAllFields(self.heHandle, 1)
for gpu in self.gpus:
values = dcgm_agent_internal.dcgmGetLatestValuesForFields(self.heHandle, gpuId, [fieldId,])
busId = values[0].value.str.decode('utf-8')
gpu.busId = busId
self.Log(" GPUID %d, busId %s" % (gpu.gpuId, gpu.busId))
def WatchProcessStats(self):
#watch the process info fields
updateFreq = 1000000
maxKeepAge = 3600.0
maxKeepEntries = 0
dcgm_agent.dcgmWatchPidFields(self.heHandle, self.groupId, updateFreq, maxKeepAge, maxKeepEntries)
def StartAppOnGpus(self):
for gpu in self.gpus:
pidObj = ProcessStatsStressPid()
appTimeout = int(1000 * g_processRunTime)
#Start a cuda app so we have something to accounted
appParams = ["--ctxCreate", gpu.busId,
"--busyGpu", gpu.busId, str(appTimeout),
"--ctxDestroy", gpu.busId]
app = apps.CudaCtxCreateAdvancedApp(appParams)
app.start(appTimeout*2)
pidObj.pid = app.getpid()
pidObj.appObj = app
self.addedPids.append(pidObj)
self.Log("Started PID %d. Runtime %d ms" % (pidObj.pid, appTimeout))
def LoopOneIteration(self):
for i in range(g_processesPerSecond):
self.StartAppOnGpus()
#How many PIDs should we buffer by? Below is 3 seconds worth
pidBuffer = (3 * g_processesPerSecond * len(self.gpus)) + 1
#Do we have any pids that have finished yet? Clean them up
while len(self.addedPids) > pidBuffer:
#Look up PID info on a random PID that should be done. Assuming 3 seconds is enough
pidIndex = random.randint(0, len(self.addedPids) - pidBuffer)
pidObj = self.addedPids[pidIndex]
try:
processStats = dcgm_agent.dcgmGetPidInfo(self.heHandle, self.groupId, pidObj.pid)
self.Log("Found pid stats for pid %d. gpuId %d. returned pid %d" % (pidObj.pid, pidObj.gpuId, processStats.pid))
except dcgmExceptionClass(dcgm_structs.DCGM_ST_NO_DATA):
self.Log("Pid %d hasn't finished yet. Sleeping to allow cuda to catch up" % pidObj.pid)
time.sleep(1.0)
break
#Finalize the resources the app object watches
pidObj.appObj.wait()
#Delete the found pid so we don't run out of file handles
del self.addedPids[pidIndex]
pidObj = None
def RunLoop(self):
while True:
loopStart = time.time()
#Do the loop work
self.LoopOneIteration()
loopEnd = time.time()
loopDuration = loopEnd - loopStart
if loopDuration > 1.0:
self.Log("Warning: Loop took %.2f seconds" % loopDuration)
continue #Keep on going
sleepFor = 1.0 - loopDuration
time.sleep(sleepFor)
def Run(self):
self.GetGpus()
self.WatchProcessStats()
self.RunLoop()
def processMatchFn(stdoutStr):
'''
Callback passed to HostEngineApp.stdout_readtillmatch to see if the host engine has started
'''
if stdoutStr.find("Host Engine Listener Started") >= 0:
return True
else:
return False
def main():
#Make sure logging stuff is bootstrapped
try:
option_parser.parse_options()
option_parser.options.no_logging = True #Don't log anything
heHandle = None
heAppRunner = None
dcgm_structs._LoadDcgmLibrary()
if g_embeddedMode:
host = 0
else:
#Start host engine
heAppRunner = apps.NvHostEngineApp()
heAppRunner.start(timeout=1000000000)
time.sleep(2.0)
host = "127.0.0.1"
heHandle = dcgm_agent.dcgmInit()
pssObj = ProcessStatsStress(g_embeddedMode, heHandle)
pssObj.Run()
del(pssObj) #Force destructor
heAppRunner.wait()
except Exception as e:
raise
finally:
apps.AppRunner.clean_all()
if heHandle is not None:
dcgm_agent.dcgmShutdown()
if __name__ == "__main__":
main()
| DCGM-master | testing/python3/burn-in-individuals/process_stats_stress.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pydcgm
import dcgm_fields
import dcgm_agent_internal
import dcgm_structs
import time
dcgmHandle = pydcgm.DcgmHandle(ipAddress="127.0.0.1")
dcgmSystem = dcgmHandle.GetSystem()
dcgmGroup = dcgmSystem.GetDefaultGroup()
#Discover which fieldIds are valid
g_fieldTags = {}
for fieldId in range(1, dcgm_fields.DCGM_FI_MAX_FIELDS):
fieldMeta = dcgm_fields.DcgmFieldGetById(fieldId)
if fieldMeta is None:
continue
g_fieldTags[fieldId] = fieldMeta.tag
#print("Found field tags: " + str(g_fieldTags))
fieldIds = sorted(g_fieldTags.keys())
gpuIds = dcgmGroup.GetGpuIds()
totalSampleCount = 0
cycleCount = 0
while True:
cycleCount += 1
print(("Cycle %d. Fields that updated in the last 60 seconds" % cycleCount))
driverTimeByFieldId = {}
watchIntervalByFieldId = {}
for gpuId in gpuIds:
for fieldId in fieldIds:
watchInfo = None
try:
watchInfo = dcgm_agent_internal.dcgmGetCacheManagerFieldInfo(dcgmHandle.handle, gpuId, dcgm_fields.DCGM_FE_GPU, fieldId)
except:
pass
if watchInfo is None:
continue
nowTs = int(time.time() * 1000000)
oneMinuteAgoTs = nowTs - 60000000
if watchInfo.newestTimestamp < oneMinuteAgoTs:
continue
perUpdate = 0
if watchInfo.fetchCount > 0:
perUpdate = watchInfo.execTimeUsec / watchInfo.fetchCount
if fieldId not in driverTimeByFieldId:
driverTimeByFieldId[fieldId] = [perUpdate, ]
else:
driverTimeByFieldId[fieldId].append(perUpdate)
lastUpdateSec = (nowTs - watchInfo.newestTimestamp) / 1000000.0
if fieldId not in watchIntervalByFieldId:
watchIntervalByFieldId[fieldId] = watchInfo.monitorIntervalUsec
else:
watchIntervalByFieldId[fieldId] = max(watchInfo.monitorIntervalUsec, watchIntervalByFieldId[fieldId])
#print("gpuId %d, fieldId %d (%s). lastUpdate: %f s, execTime %d, fetchCount %d, perUpdate: %d" %
# (gpuId, fieldId, g_fieldTags[fieldId], lastUpdateSec,
# watchInfo.execTimeUsec, watchInfo.fetchCount, perUpdate))
totalDriverTime = 0
for fieldId in list(driverTimeByFieldId.keys()):
numGpus = len(driverTimeByFieldId[fieldId])
fieldDriverTime = sum(driverTimeByFieldId[fieldId])
totalDriverTime += fieldDriverTime
driverTimeAvg = fieldDriverTime / numGpus
print(("fieldId %d (%s), numGpus %u, driverTimePerUpdate %d usec, watchInterval %d usec" %
(fieldId, g_fieldTags[fieldId], numGpus, driverTimeAvg, watchIntervalByFieldId[fieldId])))
print(("Total Driver Time: %d usec" % totalDriverTime))
print("")
time.sleep(5.0)
| DCGM-master | testing/python3/internal_scripts/printWatchTable.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
try:
import pydcgm
except ImportError:
print("Unable to find pydcgm. You need to add the location of "
"pydcgm.py to your environment as PYTHONPATH=$PYTHONPATH:[path-to-pydcgm.py]")
import sys
import os
import time
import dcgm_field_helpers
import dcgm_fields
import dcgm_structs
class NvSwitchErrorMonitor:
def __init__(self, hostname):
self._pidPostfix = "_" + str(os.getpid()) #Add this to any names so we can run multiple instances
self._updateIntervalSecs = 5.0 #How often to print out new rows
self._hostname = hostname
self._InitFieldLists()
self._InitHandles()
def _InitFieldLists(self):
#NVSwitch error field Ids
self._nvSwitchErrorFieldIds = []
self._nvSwitchErrorFieldIds.append(dcgm_fields.DCGM_FI_DEV_NVSWITCH_FATAL_ERRORS)
self._nvSwitchErrorFieldIds.append(dcgm_fields.DCGM_FI_DEV_NVSWITCH_NON_FATAL_ERRORS)
#GPU error field Ids
self._gpuErrorFieldIds = []
self._gpuErrorFieldIds.append(dcgm_fields.DCGM_FI_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_TOTAL)
self._gpuErrorFieldIds.append(dcgm_fields.DCGM_FI_DEV_NVLINK_CRC_DATA_ERROR_COUNT_TOTAL)
self._gpuErrorFieldIds.append(dcgm_fields.DCGM_FI_DEV_NVLINK_REPLAY_ERROR_COUNT_TOTAL)
self._gpuErrorFieldIds.append(dcgm_fields.DCGM_FI_DEV_NVLINK_RECOVERY_ERROR_COUNT_TOTAL)
self._gpuErrorFieldIds.append(dcgm_fields.DCGM_FI_DEV_XID_ERRORS)
self._gpuErrorFieldIds.append(dcgm_fields.DCGM_FI_DEV_GPU_NVLINK_ERRORS)
#self._gpuErrorFieldIds.append(dcgm_fields.DCGM_FI_DEV_GPU_TEMP) #Will always generate output
def _InitHandles(self):
self._dcgmHandle = pydcgm.DcgmHandle(ipAddress=self._hostname)
groupName = "error_mon_gpus" + self._pidPostfix
self._allGpusGroup = pydcgm.DcgmGroup(self._dcgmHandle, groupName=groupName, groupType=dcgm_structs.DCGM_GROUP_DEFAULT)
print(("Found %d GPUs" % (len(self._allGpusGroup.GetEntities()))))
groupName = "error_mon_nvswitches" + self._pidPostfix
self._allNvSwitchesGroup = pydcgm.DcgmGroup(self._dcgmHandle, groupName=groupName, groupType=dcgm_structs.DCGM_GROUP_DEFAULT_NVSWITCHES)
print(("Found %d NvSwitches" % len(self._allNvSwitchesGroup.GetEntities())))
fgName = "error_mon_nvswitches" + self._pidPostfix
self._nvSwitchErrorFieldGroup = pydcgm.DcgmFieldGroup(self._dcgmHandle, name=fgName, fieldIds=self._nvSwitchErrorFieldIds)
fgName = "error_mon_gpus" + self._pidPostfix
self._gpuErrorFieldGroup = pydcgm.DcgmFieldGroup(self._dcgmHandle, name=fgName, fieldIds=self._gpuErrorFieldIds)
updateFreq = int(self._updateIntervalSecs / 2.0) * 1000000
maxKeepAge = 3600.0 #1 hour
maxKeepSamples = 0 #Rely on maxKeepAge
self._nvSwitchWatcher = dcgm_field_helpers.DcgmFieldGroupEntityWatcher(
self._dcgmHandle.handle, self._allNvSwitchesGroup.GetId(),
self._nvSwitchErrorFieldGroup, dcgm_structs.DCGM_OPERATION_MODE_AUTO,
updateFreq, maxKeepAge, maxKeepSamples, 0)
self._gpuWatcher = dcgm_field_helpers.DcgmFieldGroupEntityWatcher(
self._dcgmHandle.handle, self._allGpusGroup.GetId(),
self._gpuErrorFieldGroup, dcgm_structs.DCGM_OPERATION_MODE_AUTO,
updateFreq, maxKeepAge, maxKeepSamples, 0)
def _GetLatestGpuErrorSamples(self):
numErrors = 0
nowStr = time.strftime("%m/%d/%Y %H:%M:%S")
self._gpuWatcher.GetMore()
for entityGroupId in list(self._gpuWatcher.values.keys()):
for entityId in self._gpuWatcher.values[entityGroupId]:
for fieldId in self._gpuWatcher.values[entityGroupId][entityId]:
for value in self._gpuWatcher.values[entityGroupId][entityId][fieldId].values:
if not value.isBlank and value.value > 0:
fieldMeta = dcgm_fields.DcgmFieldGetById(fieldId)
print("%s: Got error for GPU %d, field Id %s, value %d" % (nowStr, entityId, fieldMeta.tag, int(value.value)))
numErrors += 1
self._gpuWatcher.EmptyValues()
if numErrors == 0:
print("%s: No GPU errors." % nowStr)
def _GetLatestSwitchErrorSamples(self):
numErrors = 0
nowStr = time.strftime("%m/%d/%Y %H:%M:%S")
self._nvSwitchWatcher.GetMore()
for entityGroupId in list(self._nvSwitchWatcher.values.keys()):
for entityId in self._nvSwitchWatcher.values[entityGroupId]:
for fieldId in self._nvSwitchWatcher.values[entityGroupId][entityId]:
for value in self._nvSwitchWatcher.values[entityGroupId][entityId][fieldId].values:
if not value.isBlank and value.value > 0:
fieldMeta = dcgm_fields.DcgmFieldGetById(fieldId)
print("%s: Got error for NvSwitch %d, field Id %s, value %d" % (nowStr, entityId, fieldMeta.tag, int(value.value)))
numErrors += 1
self._nvSwitchWatcher.EmptyValues()
if numErrors == 0:
print("%s: No Switch errors." % nowStr)
def _MonitorOneCycle(self):
self._GetLatestGpuErrorSamples()
self._GetLatestSwitchErrorSamples()
def Monitor(self):
self._gpuWatcher.EmptyValues()
self._nvSwitchWatcher.EmptyValues()
try:
while True:
self._MonitorOneCycle()
time.sleep(self._updateIntervalSecs)
except KeyboardInterrupt:
print("Got CTRL-C. Exiting")
return
def main():
hostname = "localhost"
if len(sys.argv) > 1:
hostname = sys.argv[1]
print(("Using hostname " + hostname))
errorMonitor = NvSwitchErrorMonitor(hostname)
errorMonitor.Monitor()
if __name__ == "__main__":
main()
| DCGM-master | testing/python3/internal_scripts/nvswitch_error_monitor.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import time
try:
import pydcgm
import dcgm_structs
except:
print("The DCGM modules were not found. Make sure to provide the 'PYTHONPATH=../' environmental variable")
sys.exit(1)
try:
import psutil
except:
print("psutil is missing. Install with 'pip3 install psutil'")
sys.exit(1)
numClients = 4 #How many clients to simulate
watchInterval = 10.0 #How often to update watches in seconds
prodWatchInterval = 30.0 #How often DCGM-exporter updates watches in seconds
prodDivisor = prodWatchInterval / watchInterval
gpuIds = None #Either set this to None to use all gpus or a list of gpuIds like [0,1]
fieldIds = [1001,1004,1005,1009,1010,1011,1012] #DCGM-exporter default list
print("Watch list: %s" % (str(fieldIds)))
dcgm_structs._dcgmInit('../apps/amd64')
def getNvHostEngineProcessObject():
for proc in psutil.process_iter(['name', 'pid']):
if proc.info['name'] == 'nv-hostengine':
return psutil.Process(proc.info['pid'])
return None
dcgmProcess = getNvHostEngineProcessObject()
if dcgmProcess is None:
print("nv-hostengine was not running")
sys.exit(1)
def getProcessPct(process):
'''
Get CPU usage of the passed in process object since last call. Call this
once at the start and then at the end of each test iteration
Note that this is % of a single core. so 100% of 1 core in a 12 core system is 100.0.
'''
return process.cpu_percent(None)
print("DCGM's PID is %d" % dcgmProcess.pid)
discard = getProcessPct(dcgmProcess)
time.sleep(1.0)
noClientsPct = getProcessPct(dcgmProcess)
print("DCGM used %.f%% CPU with no clients (idle)" % noClientsPct)
clientHandles = []
for i in range(numClients):
clientHandles.append(pydcgm.DcgmHandle(ipAddress="127.0.0.1"))
discard = getProcessPct(dcgmProcess)
time.sleep(1.0)
idleClientsPct = getProcessPct(dcgmProcess)
print("DCGM used %.f%% CPU with %d idle clients" % (idleClientsPct, numClients))
nameIncrement = 0
class FieldWatcher:
def __init__(self, dcgmHandle, gpuIds, fieldIds, watchIntervalSecs):
global nameIncrement
self._dcgmHandle = dcgmHandle
self._dcgmSystem = dcgmHandle.GetSystem()
gpuGroupName = "%d_%d" % (os.getpid(), nameIncrement)
nameIncrement += 1
if gpuIds is None:
self._dcgmGroup = self._dcgmSystem.GetDefaultGroup()
else:
self._dcgmGroup = self._dcgmSystem.GetGroupWithGpuIds(gpuGroupName, gpuIds)
self._watchIntervalSecs = watchIntervalSecs
fieldGroupName = "%d_%d" % (os.getpid(), nameIncrement)
nameIncrement += 1
self._dcgmFieldGroup = pydcgm.DcgmFieldGroup(dcgmHandle, fieldGroupName, fieldIds, None)
def Watch(self):
self._dcgmGroup.samples.WatchFields(self._dcgmFieldGroup, int(self._watchIntervalSecs * 1000000), 0, 1)
def Unwatch(self):
self._dcgmGroup.samples.UnwatchFields(self._dcgmFieldGroup, self._dcgmFieldGroup)
def GetLatestSample(self):
return self._dcgmGroup.samples.GetLatest(self._dcgmFieldGroup)
def __del__(self):
del self._dcgmFieldGroup
del self._dcgmGroup
watchers = []
print ("====Starting DCP overhead test ====")
for i in range(numClients):
watchers.append(FieldWatcher(clientHandles[i], gpuIds, fieldIds, watchInterval))
watchers[-1].Watch()
discard = getProcessPct(dcgmProcess) #Don't measure watch start-up overhead
sleepTime = watchInterval * 2
#print("Sleeping %.1f seconds to allow DCP to work in the background" % sleepTime)
time.sleep(sleepTime)
pct = getProcessPct(dcgmProcess)
print("DCGM used %.2f%% CPU (%.2f%% PROD cpu) with %d clients with watches but no field retrieval" % (pct, (pct / prodDivisor), len(watchers)))
time.sleep(0.5) #psutil suggests this as a minimum polling interval
print ("====Starting DCP with polling overhead test ====")
discard = getProcessPct(dcgmProcess)
for numClientsToPoll in range(numClients):
for i in range(3):
for j in range(numClientsToPoll):
sample = watchers[j].GetLatestSample()
#print(str(sample))
time.sleep(watchInterval)
pct = getProcessPct(dcgmProcess)
print("DCGM used %.2f%% CPU (%.2f%% PROD cpu) with %d clients with watches + samples" % (pct, (pct / prodDivisor), numClientsToPoll+1))
del watchers
| DCGM-master | testing/python3/internal_scripts/multiClientDcpCpuOverhead.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
try:
import pydcgm
except ImportError:
print("Unable to find pydcgm. You need to add the location of "
"pydcgm.py to your environment as PYTHONPATH=$PYTHONPATH:[path-to-pydcgm.py]")
import sys
import os
import time
import dcgm_field_helpers
import dcgm_fields
import dcgm_structs
class NvSwitchCounterMonitor:
def __init__(self, hostname):
self._pidPostfix = "_" + str(os.getpid()) #Add this to any names so we can run multiple instances
self._updateIntervalSecs = 30.0 #How often to print out new rows
self._hostname = hostname
self.NVSWITCH_NUM_LINKS = 18
self._InitFieldLists()
self._InitHandles()
def _InitFieldLists(self):
self._nvSwitchLatencyFieldIds = []
#get the low/medium/high/max latency bucket field ids, each switch port has 4 values.
#the field ids are contiguous, where first 4 ids are for port0, next 4 for port1 and so on.
for i in range(dcgm_fields.DCGM_FI_DEV_NVSWITCH_LATENCY_LOW_P00, dcgm_fields.DCGM_FI_DEV_NVSWITCH_LATENCY_MAX_P17+1, 1):
self._nvSwitchLatencyFieldIds.append(i)
#need two lists because there is gap between bandwidth0 and bandwidth1 field Ids.
#each counter has two values, TX_0 and RX_0.
#the field ids are contiguous, where first 2 ids are for port0, next 2 for port1 and so on.
self._nvSwitchBandwidth0FieldIds = []
for i in range(dcgm_fields.DCGM_FI_DEV_NVSWITCH_BANDWIDTH_TX_0_P00, dcgm_fields.DCGM_FI_DEV_NVSWITCH_BANDWIDTH_RX_0_P17+1, 1):
self._nvSwitchBandwidth0FieldIds.append(i)
#get bandwidth counter1 field ids, ie TX_1, RX_1
self._nvSwitchBandwidth1FieldIds = []
for i in range(dcgm_fields.DCGM_FI_DEV_NVSWITCH_BANDWIDTH_TX_1_P00, dcgm_fields.DCGM_FI_DEV_NVSWITCH_BANDWIDTH_RX_1_P17+1, 1):
self._nvSwitchBandwidth1FieldIds.append(i)
def _InitHandles(self):
self._dcgmHandle = pydcgm.DcgmHandle(ipAddress=self._hostname)
groupName = "bandwidth_mon_nvswitches" + self._pidPostfix
self._allNvSwitchesGroup = pydcgm.DcgmGroup(self._dcgmHandle, groupName=groupName, groupType=dcgm_structs.DCGM_GROUP_DEFAULT_NVSWITCHES)
print(("Found %d NVSwitches" % len(self._allNvSwitchesGroup.GetEntities())))
fgName = "latency_mon_nvswitches" + self._pidPostfix
self._nvSwitchLatencyFieldGroup = pydcgm.DcgmFieldGroup(self._dcgmHandle, name=fgName, fieldIds=self._nvSwitchLatencyFieldIds)
fgName = "bandwidth0_mon_nvswitches" + self._pidPostfix
self._nvSwitchBandwidth0FieldGroup = pydcgm.DcgmFieldGroup(self._dcgmHandle, name=fgName, fieldIds=self._nvSwitchBandwidth0FieldIds)
fgName = "bandwidth1_mon_nvswitches" + self._pidPostfix
self._nvSwitchBandwidth1FieldGroup = pydcgm.DcgmFieldGroup(self._dcgmHandle, name=fgName, fieldIds=self._nvSwitchBandwidth1FieldIds)
updateFreq = int(self._updateIntervalSecs / 2.0) * 1000000
maxKeepAge = 3600.0 #1 hour
maxKeepSamples = 0 #Rely on maxKeepAge
self._nvSwitchLatencyWatcher = dcgm_field_helpers.DcgmFieldGroupEntityWatcher(
self._dcgmHandle.handle, self._allNvSwitchesGroup.GetId(),
self._nvSwitchLatencyFieldGroup, dcgm_structs.DCGM_OPERATION_MODE_AUTO,
updateFreq, maxKeepAge, maxKeepSamples, 0)
self._nvSwitchBandwidth0Watcher = dcgm_field_helpers.DcgmFieldGroupEntityWatcher(
self._dcgmHandle.handle, self._allNvSwitchesGroup.GetId(),
self._nvSwitchBandwidth0FieldGroup, dcgm_structs.DCGM_OPERATION_MODE_AUTO,
updateFreq, maxKeepAge, maxKeepSamples, 0)
self._nvSwitchBandwidth1Watcher = dcgm_field_helpers.DcgmFieldGroupEntityWatcher(
self._dcgmHandle.handle, self._allNvSwitchesGroup.GetId(),
self._nvSwitchBandwidth1FieldGroup, dcgm_structs.DCGM_OPERATION_MODE_AUTO,
updateFreq, maxKeepAge, maxKeepSamples, 0)
def _MonitorOneCycle(self):
numErrors = 0
nowStr = time.strftime("%m/%d/%Y %H:%M:%S")
self._nvSwitchLatencyWatcher.GetMore()
self._nvSwitchBandwidth0Watcher.GetMore()
self._nvSwitchBandwidth1Watcher.GetMore()
#3D dictionary of [entityGroupId][entityId][fieldId](DcgmFieldValueTimeSeries)
# where entityId = SwitchID
for entityGroupId in list(self._nvSwitchLatencyWatcher.values.keys()):
for entityId in self._nvSwitchLatencyWatcher.values[entityGroupId]:
latencyFieldId = dcgm_fields.DCGM_FI_DEV_NVSWITCH_LATENCY_LOW_P00
for linkIdx in range(0, self.NVSWITCH_NUM_LINKS):
# if the link is not enabled, then the corresponding latencyFieldId key value will be
# empty, so skip those links.
if latencyFieldId in self._nvSwitchLatencyWatcher.values[entityGroupId][entityId]:
latencyLow = self._nvSwitchLatencyWatcher.values[entityGroupId][entityId][latencyFieldId].values[-1].value
latencyFieldId += 1
latencyMed = self._nvSwitchLatencyWatcher.values[entityGroupId][entityId][latencyFieldId].values[-1].value
latencyFieldId += 1
latencyHigh = self._nvSwitchLatencyWatcher.values[entityGroupId][entityId][latencyFieldId].values[-1].value
latencyFieldId += 1
latencyMax = self._nvSwitchLatencyWatcher.values[entityGroupId][entityId][latencyFieldId].values[-1].value
latencyFieldId += 1
print(("SwitchID %d LinkIdx %d Latency Low %d Medium %d High %d Max %d"
% (entityId, linkIdx, latencyLow, latencyMed, latencyHigh, latencyMax)))
else:
latencyFieldId += 4;
for entityGroupId in list(self._nvSwitchBandwidth0Watcher.values.keys()):
for entityId in self._nvSwitchBandwidth0Watcher.values[entityGroupId]:
bandwidth0FieldId = dcgm_fields.DCGM_FI_DEV_NVSWITCH_BANDWIDTH_TX_0_P00
bandwidth1FieldId = dcgm_fields.DCGM_FI_DEV_NVSWITCH_BANDWIDTH_TX_1_P00
for linkIdx in range(0, self.NVSWITCH_NUM_LINKS):
# if the link is not enabled, then the corresponding bandwidth0FieldId and
# bandwidth1FieldId key values will be empty, so skip those links.
if bandwidth0FieldId in self._nvSwitchBandwidth0Watcher.values[entityGroupId][entityId]:
counter0Tx = self._nvSwitchBandwidth0Watcher.values[entityGroupId][entityId][bandwidth0FieldId].values[-1].value
counter1Tx = self._nvSwitchBandwidth1Watcher.values[entityGroupId][entityId][bandwidth1FieldId].values[-1].value
bandwidth0FieldId += 1
bandwidth1FieldId += 1
counter0Rx = self._nvSwitchBandwidth0Watcher.values[entityGroupId][entityId][bandwidth0FieldId].values[-1].value
counter1Rx = self._nvSwitchBandwidth1Watcher.values[entityGroupId][entityId][bandwidth1FieldId].values[-1].value
bandwidth0FieldId += 1
bandwidth1FieldId += 1
print(("SwitchID %d LinkIdx %d counter0Tx %d counter0Rx %d counter1Tx %d counter1Rx %d"
% (entityId, linkIdx, counter0Tx, counter0Rx, counter1Tx, counter1Rx)))
else:
bandwidth0FieldId += 2
bandwidth1FieldId += 2
self._nvSwitchLatencyWatcher.EmptyValues()
self._nvSwitchBandwidth0Watcher.EmptyValues()
self._nvSwitchBandwidth1Watcher.EmptyValues()
def Monitor(self):
self._nvSwitchLatencyWatcher.EmptyValues()
self._nvSwitchBandwidth0Watcher.EmptyValues()
self._nvSwitchBandwidth1Watcher.EmptyValues()
try:
while True:
self._MonitorOneCycle()
time.sleep(self._updateIntervalSecs)
except KeyboardInterrupt:
print ("Got CTRL-C. Exiting")
return
def main():
if len(sys.argv) > 1:
hostname = sys.argv[1]
else:
hostname = "localhost"
counterMonitor = NvSwitchCounterMonitor(hostname)
print(("Using hostname %s and update interval as %d secs " % (hostname, counterMonitor._updateIntervalSecs)))
counterMonitor.Monitor()
if __name__ == "__main__":
main()
| DCGM-master | testing/python3/internal_scripts/nvswitch_counter_monitor.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import collections
import csv
class ParseDcgmSingleMetric:
"class for parsing a single metric"
def __init__(self):
self.d_cpunum_th = collections.OrderedDict()
self.d_cpu_count = {}
self.metric_label_list = []
self.dcgm_val_gpu = []
self.gpuCount = 1
def createFieldsFromMetricLabel(self, metricLabelString, gpu_list):
print(("GPU Count", gpuCount))
for i in range(0, gpuCount):
#self.metric_label_list.append(str(gpu_list[i]) + '_' + str(metricLabelString))
self.metric_label_list.append(str(metricLabelString) + '_' + str(gpu_list[i]))
def writeToCsv(self, sample_num):
dict_row = {}
if gpuCount == 8:
dict_row = {'Sample Number':sample_num, self.metric_label_list[0]:self.dcgm_val_gpu[0], self.metric_label_list[1]:self.dcgm_val_gpu[1], self.metric_label_list[2]:self.dcgm_val_gpu[2], self.metric_label_list[3]:self.dcgm_val_gpu[3], self.metric_label_list[4]:self.dcgm_val_gpu[4], self.metric_label_list[5]:self.dcgm_val_gpu[5], self.metric_label_list[6]:self.dcgm_val_gpu[6], self.metric_label_list[7]:self.dcgm_val_gpu[7],}
elif gpuCount == 7:
dict_row = {'Sample Number':sample_num, self.metric_label_list[0]:self.dcgm_val_gpu[0], self.metric_label_list[1]:self.dcgm_val_gpu[1], self.metric_label_list[2]:self.dcgm_val_gpu[2], self.metric_label_list[3]:self.dcgm_val_gpu[3], self.metric_label_list[4]:self.dcgm_val_gpu[4], self.metric_label_list[5]:self.dcgm_val_gpu[5], self.metric_label_list[6]:self.dcgm_val_gpu[6]}
elif gpuCount == 6:
dict_row = {'Sample Number':sample_num, self.metric_label_list[0]:self.dcgm_val_gpu[0], self.metric_label_list[1]:self.dcgm_val_gpu[1], self.metric_label_list[2]:self.dcgm_val_gpu[2], self.metric_label_list[3]:self.dcgm_val_gpu[3], self.metric_label_list[4]:self.dcgm_val_gpu[4], self.metric_label_list[5]:self.dcgm_val_gpu[5]}
elif gpuCount == 5:
dict_row = {'Sample Number':sample_num, self.metric_label_list[0]:self.dcgm_val_gpu[0], self.metric_label_list[1]:self.dcgm_val_gpu[1], self.metric_label_list[2]:self.dcgm_val_gpu[2], self.metric_label_list[3]:self.dcgm_val_gpu[3], self.metric_label_list[4]:self.dcgm_val_gpu[4]}
elif gpuCount == 4:
dict_row = {'Sample Number':sample_num, self.metric_label_list[0]:self.dcgm_val_gpu[0], self.metric_label_list[1]:self.dcgm_val_gpu[1], self.metric_label_list[2]:self.dcgm_val_gpu[2], self.metric_label_list[3]:self.dcgm_val_gpu[3]}
elif gpuCount == 3:
dict_row = {'Sample Number':sample_num, self.metric_label_list[0]:self.dcgm_val_gpu[0], self.metric_label_list[1]:self.dcgm_val_gpu[1], self.metric_label_list[2]:self.dcgm_val_gpu[2]}
elif gpuCount == 2:
dict_row = {'Sample Number':sample_num, self.metric_label_list[0]:self.dcgm_val_gpu[0], self.metric_label_list[1]:self.dcgm_val_gpu[1]}
elif gpuCount == 1:
dict_row = {'Sample Number':sample_num, self.metric_label_list[0]:self.dcgm_val_gpu[0]}
return dict_row
def parseAndWriteToCsv(self, fName, metric, gpu_list):
global gpuCount
csvFileName = 'dcgm_' + str(metric) + '.csv'
sample_num = 0
f = open(fName, 'r+')
lines = f.readlines() # read all lines at once
metric_label = lines[0].split()[2]
gpuCount = len((gpu_list).split(","))
self.createFieldsFromMetricLabel(metric_label, gpu_list.split(","))
with open(csvFileName, 'wb') as csvFile:
fieldnames = ['Sample Number']
for i in range(0, gpuCount):
fieldnames.append(self.metric_label_list[i])
writer = csv.DictWriter(csvFile, fieldnames=fieldnames)
writer.writeheader()
i = 2
while i < len(lines):
try:
line = lines[i]
row = line.split()
if row == [] or row[0] == 'Id' or row[1] == 'GPU/Sw':
#print ("Skipping non data row: " + str(row))
i = i+1
elif row[0].isdigit():
for k in range(0, gpuCount):
val = float(lines[i+k].split()[1])
#print (val, i)
self.dcgm_val_gpu.append(val)
sample_num = sample_num + 1
dict_row = self.writeToCsv(sample_num)
writer.writerow(dict_row)
i += gpuCount
self.dcgm_val_gpu[:] = []
except IndexError:
i = i+1
except StopIteration:
pass
print("Done")
def main(cmdArgs):
fName = cmdArgs.fileName
metric = cmdArgs.metric
gpu_list = cmdArgs.gpu_list
#parse and output the data
po = ParseDcgmSingleMetric()
po.parseAndWriteToCsv(fName, metric, gpu_list)
def parseCommandLine():
parser = argparse.ArgumentParser(description="Parse logs from dcgmLogs into a csv")
parser.add_argument("-f", "--fileName", required=True, help="fielName of the file to be parsed and outputted to csv")
parser.add_argument("-m", "--metric", required=True, help="metric for which the data is being analyzed")
parser.add_argument("-i", "--gpu_list", required=False, default='0,1,2,3,4,5,6,7', help="metric for which the data is being analyzed")
args = parser.parse_args()
return args
if __name__ == "__main__":
cmdArgs = parseCommandLine()
main(cmdArgs)
| DCGM-master | testing/python3/dcptestautomation/parse_dcgm_single_metric.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from subprocess import PIPE, Popen
###############################################################################################
#
# Utility function to execute bash commands from this script
# Prints the stdout to screen and returns a return code from the shell
#
###############################################################################################
def executeBashCmd(cmd, prnt):
"""
Executes a shell command as a separated process, return stdout, stderr and returncode
"""
ret_line = ''
print("executeCmd: \"%s\"" % str(cmd))
try:
result = Popen(cmd, stdout=PIPE, stderr=PIPE, shell=True)
ret_line = result.stdout.readline().decode('utf-8')
while True:
line = result.stdout.readline().decode('utf-8')
if prnt:
print(line.strip('\n'))
if not line:
break
sys.stdout.flush()
(stdout_buf, stderr_buf) = result.communicate()
stdout = stdout_buf.decode('utf-8')
stderr = stderr_buf.decode('utf-8')
if stdout:
print(stdout)#pass
if stderr:
print(stderr)
except Exception as msg:
print("Failed with: %s" % msg)
return result.returncode, ret_line.strip()
###############################################################################################
#
# This function removes cleans up all the dependent libraries
# It also uninstalls any existing installation of datacenter gpu manager
# Returns success in the end.
#
###############################################################################################
def removeDependencies(prnt):
#Remove existing installation files and binaries
ret = executeBashCmd("echo {0} | sudo pip uninstall pandas".format('y'), prnt)
print(("sudo pip uninstall pandas returned: ", ret[0]))
#if module is not installed, this returns 1, so check for both values
if (ret[0] in [0, 1]):
ret = executeBashCmd("echo {0} | sudo pip uninstall wget".format('y'), prnt)
print(("sudo pip uninstall wget returned: ", ret[0]))
if (ret[0] in [0, 1]):
ret = executeBashCmd("echo {0} | sudo pip uninstall xlwt".format('y'), prnt)
print(("sudo pip uninstall xlwt returned: ", ret[0]))
if (ret[0] in [0, 1]):
ret = executeBashCmd("echo {0} | sudo pip uninstall xlrd".format('y'), prnt)
print(("sudo pip uninstall xlrd returned: ", ret[0]))
if ret[0] in [0, 1]:
print("\nRemoveDependencies returning 0")
return 0
print(("\nReturning: ", ret[0]))
return ret[0]
def installDependencies(prnt):
ret = 0
#Install all dependent libraries
ret = executeBashCmd("sudo pip install pandas", prnt)
if ret[0] == 0:
ret = executeBashCmd("sudo pip install wget", prnt)
if ret[0] == 0:
ret = executeBashCmd("sudo pip install xlwt", prnt)
if ret[0] == 0:
ret = executeBashCmd("sudo pip install xlrd", prnt)
print("InstallDependencies returning: ", ret[0])
return ret[0]
| DCGM-master | testing/python3/dcptestautomation/util.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##################################################################################################
#
# This is the main metrics validation files which validates the numbers reported by dcgm.
# It includes the following functionality
# 1.) Parses the command line arguments
# 2.) Gives the user to remove and install latest dcgm binaries
# 3.) Kills any existing instance of nv-hostengine and starts a new one.
# 4.) Starts dcgmproftestestor thread in its own instance - one each for number of GPU's the test
# is run.
# 5.) Starts the dcgm thread reporting metrics numbers on all GPU's under test.
# 6.) Captures the memory usage before dcgmproftester is started, while the tests are running and
# after the tests are done.
# 7.) Parses the log files generated by dcgm and dcgmproftester threads and compare the numbers
# and determine pass and fail.
# 8.) Compares the memory before and after the tests for each GPU and determines a pass or fail.
# 9.) Outputs a Pass or fail at the end of the run.
#
##################################################################################################
import csv
import argparse
import time as tm
import os
import sys
import subprocess
from multiprocessing import Process
import util
import wget
import pandas
'''
Profiling Fields
'''
DCGM_FI_PROF_GR_ENGINE_ACTIVE = 1001
DCGM_FI_PROF_SM_ACTIVE = 1002
DCGM_FI_PROF_SM_OCCUPANCY = 1003
DCGM_FI_PROF_PIPE_TENSOR_ACTIVE = 1004
DCGM_FI_PROF_DRAM_ACTIVE = 1005
DCGM_FI_PROF_PIPE_FP64_ACTIVE = 1006
DCGM_FI_PROF_PIPE_FP32_ACTIVE = 1007
DCGM_FI_PROF_PIPE_FP16_ACTIVE = 1008
DCGM_FI_PROF_PCIE_TX_BYTES = 1009
DCGM_FI_PROF_PCIE_RX_BYTES = 1010
class RunValidateDcgm:
def __init__(self):
self.tar_dir = os.path.realpath(sys.path[0])
self.prot_thread_gpu = []
self.init_range = 0
self.upper_range = 0
self.lower_range = 0
self.gpuCount = 0
self.results = {}
self.metrics_range_list = [DCGM_FI_PROF_PIPE_TENSOR_ACTIVE, DCGM_FI_PROF_PIPE_FP64_ACTIVE, \
DCGM_FI_PROF_PIPE_FP32_ACTIVE, DCGM_FI_PROF_PIPE_FP16_ACTIVE]
self.metrics_util_list = [DCGM_FI_PROF_GR_ENGINE_ACTIVE, DCGM_FI_PROF_SM_ACTIVE, \
DCGM_FI_PROF_SM_OCCUPANCY, DCGM_FI_PROF_DRAM_ACTIVE, \
DCGM_FI_PROF_PCIE_TX_BYTES, DCGM_FI_PROF_PCIE_RX_BYTES]
self.metrics_range = {DCGM_FI_PROF_PIPE_TENSOR_ACTIVE:0.75, \
DCGM_FI_PROF_PIPE_FP64_ACTIVE:0.92, \
DCGM_FI_PROF_PIPE_FP32_ACTIVE:0.85, \
DCGM_FI_PROF_PIPE_FP16_ACTIVE:0.75}
self.metrics_label = {DCGM_FI_PROF_GR_ENGINE_ACTIVE:'GRACT', \
DCGM_FI_PROF_SM_ACTIVE:'SMACT', \
DCGM_FI_PROF_SM_OCCUPANCY:'SMOCC', \
DCGM_FI_PROF_PIPE_TENSOR_ACTIVE:'TENSOR', \
DCGM_FI_PROF_DRAM_ACTIVE:'DRAMA', \
DCGM_FI_PROF_PIPE_FP64_ACTIVE:'FP64A', \
DCGM_FI_PROF_PIPE_FP32_ACTIVE:'FP32A', \
DCGM_FI_PROF_PIPE_FP16_ACTIVE:'FP16A', \
DCGM_FI_PROF_PCIE_TX_BYTES:'PCITX', \
DCGM_FI_PROF_PCIE_RX_BYTES:'PCIRX'}
def getLatestDcgm(self):
print("Getting the URL for latest dcgm package\n")
baseurl = "http://cqa-fs01/dvsshare/dcgm/daily/r418_00/"
cmd = 'wget -q -O - http://cqa-fs01/dvsshare/dcgm/daily/r418_00/ | grep -Eo \
\\2019[0-9]{8} | tail -1'
ret, folder_name = util.executeBashCmd(cmd, True)
if "$folder_name" == "":
print("Package index not found. Maybe the server is down?")
dcgm_url = baseurl + folder_name + '/testing_dcgm/x86_64/testing_dcgm.tar.gz'
deb_url = baseurl + folder_name + '/DEBS/datacenter-gpu-manager-dcp-nda-only_1.6.4_amd64.deb'
return dcgm_url, deb_url
#############################################################################################
#
# This function removes any dcgmi and dcgmproftester binaries
# It also uninstalls any existing installation of datacenter gpu manager
# Returns success in the end.
#
#############################################################################################
def removeBinaries(self, prnt):
#Remove existing installation files and binaries
ret = util.executeBashCmd("sudo rm -rf testing_dcgm*", prnt)
ret = util.executeBashCmd("sudo rm -rf datacenter-gpu-manager*.deb", prnt)
ret = util.executeBashCmd("sudo rm -rf _out/", prnt)
ret = util.executeBashCmd("sudo rm -rf /usr/bin/dcgmproftester", prnt)
ret = util.executeBashCmd("sudo rm -rf *.txt", prnt)
ret = util.executeBashCmd("sudo rm -rf *.csv", prnt)
ret = util.executeBashCmd("sudo rm -rf *.pyc", prnt)
#Uninstall dcgmi
print("Removing existing installation of dcgmi")
uninstall_cmd = "sudo dpkg --purge datacenter-gpu-manager-dcp-nda-only"
ret = util.executeBashCmd(uninstall_cmd, prnt)
if ret[0] != 0:
print(("Error: Couldnt purge existing installation of \
datacenter-gpu-manager-dcp-nda-on, ret: ", ret))
else:
print("\nSUCCESS: No error on uninstall")
uninstall_cmd = "sudo apt-get remove --purge datacenter-gpu-manager"
ret = util.executeBashCmd(uninstall_cmd, prnt)
if ret[0] != 0:
print(("Error: Couldnt purge existing installation of datacenter-gpu-manager, ret: ", \
ret))
else:
print("\nSUCCESS: No error on uninstalling datacenter-gpu-manager")
return 0
#############################################################################################
#
# This function downloads the latest version of testing_dcgm.tar.gz and
# datacenter-gpu-manager-dcp-nda-only_1.6.4_amd64.deb It is called only if user
# specified "-d" option. Returns 0 for SUCCESS, -1 for any failure.
#
#############################################################################################
def downloadInstallers(self, dcgm_url, deb_url):
print(("&&&& INFO: Downloading latest testing_dcgm.tar.gz from", dcgm_url))
#fileName = wget.download(self.tar_file, out=None)#, bar=None) # no progress bar is shown
fileName = wget.download(dcgm_url, out=None)#, bar=None) # no progress bar is shown
if not os.path.isfile(os.path.join(self.tar_dir, fileName)):
print(("ERROR", "Unable to download specified packages: \n %s" % fileName))
return -1
else:
print("SUCCESS: nDownload Success\n")
print(("&&&& INFO: Downloading latest datacenter-gpu-manager-dcp-nda-only_1.6.4_amd64.deb \
from", deb_url))
self.deb_fileName = wget.download(deb_url, out=None) # no progress bar is shown
if not os.path.isfile(os.path.join(self.tar_dir, self.deb_fileName)):
print(("ERROR", "Unable to download specified packages:\n %s" % self.deb_fileName))
return -1
print("\nSUCCESS: Download completed successfully")
return 0
def killNvHostEngine(self):
print("\n&&&& INFO: Killing any existing nvhostengine instance")
ret = util.executeBashCmd("sudo /usr/bin/nv-hostengine -t", True)
print("\n&&&& INFO: Stopping dcgm service ")
ret = util.executeBashCmd("sudo service dcgm stop", True)
def startNvHostEngine(self):
print("\n&&&& INFO: Killing any existing nvhostengine instance")
ret = util.executeBashCmd("sudo /usr/bin/nv-hostengine -t", True)
print("\n&&&& INFO: Stopping dcgm service ")
ret = util.executeBashCmd("sudo service dcgm stop", True)
print("\n&&&& INFO: Starting nvhostengine")
ret = util.executeBashCmd("sudo /usr/bin/nv-hostengine", True)
print("\n&&&& INFO: dcgmi discovery output")
ret = util.executeBashCmd("sudo /usr/bin/dcgmi discovery -l", True)
return ret
def installDcgm(self):
print("\n&&&& INFO: Installing latest version of datacenter-gpu-manager-dcp-nda-on")
ret = util.executeBashCmd("sudo dpkg -i datacenter-gpu-manager-dcp-nda-only_1.6.4_amd64.deb", True)
if ret[0] != 0:
print(("ERROR: Couldnt install dcgmi, ret: ", ret))
else:
print("\nSUCCESS: Installed datacenter-gpu-manager-dcp-nda-on successfully")
return ret[0]
def installProfTester(self):
ret = util.executeBashCmd("tar xvf testing_dcgm.tar.gz", True)
if ret[0] == 0:
ret = util.executeBashCmd("sudo cp _out/Linux_amd64_release/testing/apps/amd64/dcgmproftester /usr/bin/", True)
else:
print(("ERROR: Something went wrong in extracting testing_dcgm.tar.gz??, \
command returned: \n", ret))
return ret[0]
def _runDcgmLoadProfilingModule(self):
print("\n&&&& INFO: Running dcgm to just to load profiling module once.")
ret = util.executeBashCmd("timeout 3s /usr/bin/dcgmi dmon -e 1001 -i 0", False)
#############################################################################################
#
# This function is called when dcgm thread is spawned to collect the metrics.
# It executes dcgmi to collect metrics on all GPU's under test for a specified amount of time.
# All information regarding how many GPU's to run the test on, time to run the tests, which
# metrics to gather is specified by user. This thread is executing in parallel, control
# immediately returns to the calling function.
#
#############################################################################################
def _runDcgm(self, metrics, gpuid_list, time):
print("\n&&&& INFO: Running dcgm to collect metrics on {0}".format(metrics))
ret = util.executeBashCmd("echo {0} | timeout {0}s /usr/bin/dcgmi dmon -e {1} -i {2} 2>&1 | tee dcgmLogs_{3}.txt".format(time, metrics, gpuid_list, metrics), False)
#############################################################################################
#
# This function is called when dcgmproftester thread is spawned to generate a workload.
# It executes dcgmproftester to generate a workload on one GPU under test for a specified
# amount of time. Information regarding time to run the tests, which metrics to gather,
# and gpu to test this is specified by user. One thread is spawned for every GPU as generating
# workload on multiple GPUs in the same instance is not supported by dcgmproftester yet.
# This thread is executing in parallel, control immediately returns to the calling function.
#
#############################################################################################
def _runProftester(self, gpuIndex, metric, time):
metrics = str(metric)
print("\n&&&& INFO: Running dcgmproftester to collect metrics on gpu {0}".format(gpuIndex))
os.environ['CUDA_VISIBLE_DEVICES'] = str(gpuIndex)
util.executeBashCmd("echo {0} | /usr/bin/dcgmproftester -d {0} -t {1} 2>&1 | tee dcgmLogsproftester_{2}_gpu{3}.txt".format(time, metrics, metrics, gpuIndex), False)
#############################################################################################
#
# This function returns the column names in excel to read the data from, its based on metrics
# information passed.
#
#############################################################################################
def getColNames(self, metrics):
colnames = []
name = self.metrics_label[metrics]
for i in range(0, self.gpuCount):
colName = name + '_' + str(i)
colnames.append(colName)
return colnames, self.metrics_label[metrics]
#############################################################################################
#
# This function defines an error margin based on the metrics.
# For certain number of times, if dcgm reports numbers outside the error margin as compared to
# what dcgmproftester is expecting, test will fail.
#
#############################################################################################
def getMarginRange(self, metrics):
if metrics == DCGM_FI_PROF_PCIE_RX_BYTES or metrics == DCGM_FI_PROF_PCIE_TX_BYTES:
return 17.0, -10.0, 17.0
elif metrics == DCGM_FI_PROF_GR_ENGINE_ACTIVE or metrics == DCGM_FI_PROF_SM_ACTIVE \
or metrics == DCGM_FI_PROF_SM_OCCUPANCY or metrics == DCGM_FI_PROF_DRAM_ACTIVE:
return 10.0, -10.0, 10.0
#############################################################################################
#
# This function finds the first index in dcgm for which the value is within the error margin
# when compared to dcgmproftester.This is for the metrics for PCIE metrics for which bandwidth
# is reported instead of utilization. After we find this index, we continue to compare each
# value and increment both dcgm and dcgmproftester values by 1.
#
#############################################################################################
def findClosestIndexForBand(self, dcgm_list, prof_list, metrics):
self.upper_range, self.lower_range, self.init_range = self.getMarginRange(metrics)
print("\nPROFTESTER[9][1]: " + str(prof_list[9][1]))
for dcgm_delay in range(6, len(dcgm_list)):
err_mar = ((float(dcgm_list[dcgm_delay]) - float(prof_list[9][1]))*100/float(prof_list[9][1]))
print('FINDING OUT - dcgm[' + str(dcgm_list[dcgm_delay])+'] proftester[' + str(prof_list[9][1])+'] error margin[' + str(err_mar)+ '] dcgm_delay[' + str(dcgm_delay)+ ']')
if abs(err_mar) < self.init_range:
err_mar_next = ((float(dcgm_list[dcgm_delay+1]) - float(prof_list[9][1]))*100/float(prof_list[9][1]))
print('FINDING NXT--dcgm['+str(dcgm_list[dcgm_delay+1])+'] proftester[' +str(prof_list[9][1])+'] err margin['+str(err_mar_next)+'] dcgm_delay['+str(dcgm_delay)+ ']')
if abs(err_mar_next) - self.init_range <= abs(err_mar) - self.init_range:
dcgm_delay = dcgm_delay + 1
print((abs(err_mar_next), self.init_range))
break
else:
print((abs(err_mar), self.init_range))
break
else:
dcgm_delay = dcgm_delay + 1
return dcgm_delay
#############################################################################################
#
# This function finds the first index in dcgm for which the value is within the error margin
# when compared to dcgmproftester. This is for the metrics for which utilization numbers are
# reported. After we find this index, we continue to compare each value and increment both
# dcgm and dcgmproftester values by 1.
#
#############################################################################################
def getClosestIndexForUtil(self, dcgm_list, dcgm_init_val):
i = 0
#print dcgm_list
for i in range(6, len(dcgm_list)):
#print("COMPARING..." + str(dcgm_list[i]))
if float(dcgm_list[i]) > dcgm_init_val:
if abs(float(dcgm_list[i]) - dcgm_init_val) > abs(float(dcgm_list[i-1]) - dcgm_init_val):
i = i - 1
break
return i
def getClosestIndex(self, dcgm_list, prof_list, metrics, dcgm_init_val):
i = 0
if metrics == DCGM_FI_PROF_PCIE_TX_BYTES or metrics == DCGM_FI_PROF_PCIE_RX_BYTES:
i = self.findClosestIndexForBand(dcgm_list, prof_list, metrics)
elif metrics == DCGM_FI_PROF_GR_ENGINE_ACTIVE or metrics == DCGM_FI_PROF_SM_ACTIVE or metrics == DCGM_FI_PROF_SM_OCCUPANCY:
i = self. getClosestIndexForUtil(dcgm_list, dcgm_init_val)
return i
#############################################################################################
#
# This function validates that the dcgm data gathered is within the expected range which is
# pre-defined for some metrics.
#
#############################################################################################
def validateAccuracyForRanges(self, dcgmCsvFile, gpu_index, metrics):
ret = 0
colnames, metric_label = self.getColNames(metrics)
dcgm_col = metric_label + '_' + str(gpu_index)
data = pandas.read_csv(dcgmCsvFile, skiprows=[0], names=colnames)
dcgm_list = data[dcgm_col].tolist()
for i in range(4, len(dcgm_list)-5):
if dcgm_list[i] < self.metrics_range[metrics] or dcgm_list[i] > 1.0:
print("FAILED: Entry num" + str(i) + ": " + str(dcgm_list[i]))
ret = -1
#else:
# print("Entry num" + str(i) + ": " + str(dcgm_list[i]))
return ret
#############################################################################################
#
# This function validates that the dcgm data gathered is within the expected error margin with
# what is reported by dcgmproftester. If there are certain number of times that the data is out
# of error margin, the test will fail.
#
#############################################################################################
def validateAccuracyForUtilForUtil(self, dcgmCsvFile, dcgmProfTesterCsvFile, gpu_index, metrics):
i = 0
mismatches = 0
spikes = 0
dcgm_init_index = 5
self.upper_range, self.lower_range, self.init_range = self.getMarginRange(metrics)
with open(dcgmProfTesterCsvFile, 'r') as f2:
c1 = csv.reader(f2)
prof_list = list(c1)
dcgm_init_val = float(prof_list[dcgm_init_index][1])
print('\nValidating the accuracy of gpu' + str(gpu_index))
colnames, metric_label = self.getColNames(metrics)
dcgm_col = metric_label + '_' + str(gpu_index)
data = pandas.read_csv(dcgmCsvFile, skiprows=[0], names=colnames)
dcgm_list = data[dcgm_col].tolist()
i = self.getClosestIndex(dcgm_list, prof_list, metrics, dcgm_init_val)
dcgm_delay = 0
try:
# Ignoring the first few entries, starting from index 9
len_dcgm = len(dcgm_list)
tot_comp = len_dcgm-i
while i < len_dcgm - (dcgm_delay):
err_mar = ((float(dcgm_list[i+dcgm_delay]) - float(prof_list[dcgm_init_index][1]))*100/float(prof_list[dcgm_init_index][1]))
err_mar_next_line = ((float(dcgm_list[i+dcgm_delay+1]) - float(prof_list[dcgm_init_index][1]))*100/float(prof_list[dcgm_init_index][1]))
if (err_mar > self.upper_range or err_mar < self.lower_range):
mismatches = mismatches + 1
print('1st check failed - dcgm['+str(dcgm_list[i+dcgm_delay])+'] dcgmproftester[' + str(prof_list[dcgm_init_index][1]) + '] Error margin[' + str(err_mar) + ']')
if (err_mar_next_line > self.upper_range or err_mar_next_line < self.lower_range):
spikes = spikes + 1
print('Failed 2nd time - dcgm[' + str(dcgm_list[i+dcgm_delay+1]) + '] dcgmproftester[' + str(prof_list[dcgm_init_index][1])+ '] Err Mar['+ str(err_mar_next_line))
i = i + 3
dcgm_init_index = dcgm_init_index + 3
continue
else:
print('SUCCESS with next entry - dcgm[' + str(dcgm_list[i+dcgm_delay+1]) + '] [dcgmproftester[' + str(prof_list[dcgm_init_index][1])+'] Err Mar[' + str(err_mar_next_line))
dcgm_delay = dcgm_delay + 1
else:
i = i+1
dcgm_init_index = dcgm_init_index + 1
except (IndexError, ZeroDivisionError) as e:
print("\nIndex or ZeroDiv Exception occured..Ignoring: ")
i = i+1
dcgm_init_index = dcgm_init_index + 1
print('Spikes for gpu' + str(gpu_index) + ': ' + str(spikes))
print('Total comparisons: ' + str(tot_comp) + ', Mismatches: ' + str(mismatches))
failure_perc = float(mismatches * 100)/tot_comp
print('Failure % for gpu' + str(gpu_index) + ': ' + str(failure_perc))
if mismatches > 5:
return -1
return 0
def validateAccuracy(self, dcgmCsvFile, dcgmProfTesterCsvFile, gpu_index, metrics):
ret = 0
if metrics in self.metrics_util_list:
ret = self.validateAccuracyForUtilForUtil(dcgmCsvFile, dcgmProfTesterCsvFile, \
gpu_index, metrics)
elif metrics in self.metrics_range:
ret = self.validateAccuracyForRanges(dcgmCsvFile, gpu_index, metrics)
else:
print("Metrics: " + str(metrics) + "not supported\n")
return ret
##############################################################################################
#
# This function gets the output of nvidia-smi for the calling function to get the memory
# information
#
##############################################################################################
def getSmiOp(self):
out = subprocess.Popen(['nvidia-smi'], \
stdout=subprocess.PIPE, \
stderr=subprocess.STDOUT)
stdout, stderr = out.communicate()
return stdout
##############################################################################################
#
# This function gets memory information out of nvidia-smi output
#
##############################################################################################
def getMemUsage(self, smi, gpu_list):
mem_list = []
smi_list = smi.split()
indices = [i for i, s in enumerate(smi_list) if 'MiB' in s]
for i in range(0, len(gpu_list)*2, 2):
mem_list.append(smi_list[indices[i]])
return mem_list
def main(cmdArgs):
metrics = int(cmdArgs.metrics)
gpuid_list = cmdArgs.gpuid_list
time = int(cmdArgs.time)
download_bin = cmdArgs.download_bin
print(("Download_binaries: ", download_bin))
if time < int(10):
print('Modifying the time to 10s which is minimum\n')
time = 10
print(cmdArgs)
ro = RunValidateDcgm()
if download_bin:
#Remove existing installation of dcgmi and dcgmproftestor
ret = ro.removeBinaries(True)
#download latest installers
if ret == 0:
dcgm_url, deb_url = ro.getLatestDcgm()
ret = ro.downloadInstallers(dcgm_url, deb_url)
else:
print("ERROR: Some problem with removing binaries\n")
print(ret)
#Install latest dcgm
if ret == 0:
ret = ro.installDcgm()
#Install latest dcgmproftester
if ret == 0:
ret = ro.installProfTester()
else:
print("Something went wrong installing dcgmproftester\n")
#if(ret == 0):
ret = ro.startNvHostEngine()
print("\nSleeping for 2 seconds")
tm.sleep(2)
gpu_list = gpuid_list.split(",")
ro.gpuCount = len(gpu_list)
#spawn dcgmi thread to load the profiling module once.
print("Start : %s" % tm.ctime())
tm.sleep(2)
dcgm_time = int(time) + 4
dcgm_thread_load_profiling_module = Process(target=ro._runDcgmLoadProfilingModule, \
name="dcgm_worker-%d" %metrics)
dcgm_thread_load_profiling_module.start()
#wait for the thread to finish
dcgm_thread_load_profiling_module.join()
smi_in_beg = ro.getSmiOp()
mem_in_beg = ro.getMemUsage(smi_in_beg, gpu_list)
#print ("In Beginning: \n" + str(smi_in_beg))
#spawn dcgmproftester threads, one each for every GPU
for i in range(0, len(gpu_list)):
threadName = 'dcgmproftester_worker-' + str(gpu_list[i])
print("\n&&&& RUNNING GPU_" + str(gpu_list[i]) + "_metric_validation_test")
ro.prot_thread_gpu.append(Process(target=ro._runProftester, args=[gpu_list[i], metrics, \
time], name=threadName))
#print gpu_list, len(gpu_list)
ro.prot_thread_gpu[i].start()
#spawn dcgmi thread
print("Start : %s" % tm.ctime())
tm.sleep(2)
dcgm_time = int(time) + 4
dcgm_thread = Process(target=ro._runDcgm, args=[metrics, gpuid_list, dcgm_time], \
name="dcgm_worker-%s" %metrics)
dcgm_thread.start()
tm.sleep(time/2)
smi_while_running = ro.getSmiOp()
mem_in_between = ro.getMemUsage(smi_while_running, gpu_list)
#print ("In Between: \n" + str(smi_while_running))
#wait for the thread to finish
dcgm_thread.join()
for i in range(0, len(gpu_list)):
ro.prot_thread_gpu[i].join()
#Copy the dcgm data in csv file
cmd = '{executable} parse_dcgm_single_metric.py -f dcgmLogs_{0}.txt -m {1} -i {2}'.format(metrics, \
metrics, gpuid_list, executable=sys.executable)
ret = util.executeBashCmd(cmd, True)
#Copy the dcgmproftester data in csv
if metrics in ro.metrics_util_list:
for i in range(0, len(gpu_list)):
cmd = '{executable} parse_dcgmproftester_single_metric.py -f \
dcgmLogsproftester_{0}_gpu{1}.txt -m {2} -i {3}'.format(metrics, gpu_list[i], \
metrics, gpu_list[i], executable=sys.executable)
ret = util.executeBashCmd(cmd, True)
#Compare the results and determine pass and fail
for i in range(0, len(gpu_list)):
dcgm_file = 'dcgm_{0}.csv'.format(metrics)
dcgmproftester_file = 'dcgmProfTester_{0}_gpu{1}.csv'.format(metrics, gpu_list[i])
ret = ro.validateAccuracy(dcgm_file, dcgmproftester_file, int(gpu_list[i]), metrics)
if ret == 0:
print("\n&&&& PASSED GPU_" + str(gpu_list[i]) + "_metric_validation_test")
ro.results[gpu_list[i]] = 'PASS'
else:
print("\n&&&& FAILED GPU_" + str(gpu_list[i]) + "_metric_validation_test")
ro.results[gpu_list[i]] = 'FAIL'
print("\n")
#for i in range(0, len(gpu_list)):
#print('Validation for GPU ' + str(gpu_list[i]) + ': ' + ro.results[gpu_list[i]])
smi_at_end = ro.getSmiOp()
mem_in_end = ro.getMemUsage(smi_at_end, gpu_list)
print("\nMemory in Beg of test run of all GPU's under test: " + str(mem_in_beg))
print("Memory in Between of test run of all GPU's under test: " + str(mem_in_between))
print("Memory in end of test run of all GPU's under test: " + str(mem_in_end) + "\n")
for i in range(0, len(gpu_list)):
print("\n&&&& RUNNING GPU_" + str(gpu_list[i]) + "_memory_validation_test")
val = int(mem_in_end[i][0:len(mem_in_end[i])-3])
#print ("Val without string: ", val)
if ((mem_in_beg[i] != mem_in_end[i]) or val > 156):
print("\n&&&& FAILED GPU_" + str(gpu_list[i]) + "_memory_validation_test")
else:
print("\n&&&& PASSED GPU_" + str(gpu_list[i]) + "_memory_validation_test")
if download_bin:
ret = ro.removeBinaries(False)
ret = ro.killNvHostEngine
#Send out an email with the chart
def parseCommandLine():
parser = argparse.ArgumentParser(description="Validation of dcgm metrics")
parser.add_argument("-m", "--metrics", required=True, help="Metrics to be validated E.g. \
\"1009\", etc")
parser.add_argument("-i", "--gpuid_list", required=False, default='0', help="comma separated \
gpu id list starting from 0, eg \"0,1,2\"")
parser.add_argument("-t", "--time", required=True, help="time in seconds")
parser.add_argument("-d", "--download_bin", action='store_true', required=False, \
default=False, help="If specified, download new binaries")
args = parser.parse_args()
return args
if __name__ == "__main__":
# Parsing command line options
CMDARGS = parseCommandLine()
main(CMDARGS)
| DCGM-master | testing/python3/dcptestautomation/run_validate_dcgm.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import util
import sys
#Copy the dcgm data in csv file
def main(cmdArgs):
metrics = cmdArgs.metrics
time = cmdArgs.time
gpuid_list = cmdArgs.gpuid_list
download_bin = cmdArgs.download_bin
ret = util.removeDependencies(True)
if ret == 0:
ret = util.installDependencies(True)
if ret == 0:
if download_bin:
cmd = '{executable} run_validate_dcgm.py -m {0} -t {1} -d -i {2}'\
.format(metrics, time, gpuid_list, executable=sys.executable)
else:
cmd = '{executable} run_validate_dcgm.py -m {0} -t {1} -i {2}'\
.format(metrics, time, gpuid_list, executable=sys.executable)
ret = util.executeBashCmd(cmd, True)
print("\nTests are done, removing dependencies")
ret = util.removeDependencies(False)
print("\n All Done")
def parseCommandLine():
parser = argparse.ArgumentParser(description="Validation of dcgm metrics")
parser.add_argument("-m", "--metrics", required=True, help="Metrics to be validated \
E.g. \"1009\", etc")
parser.add_argument("-i", "--gpuid_list", required=False, default='0', \
help="comma separated gpu id list starting from 0, eg \"0,1,2\"")
parser.add_argument("-t", "--time", required=True, help="time in seconds")
parser.add_argument("-d", "--download_bin", action='store_true', required=False, default=False,\
help="If specified, download new binaries")
args = parser.parse_args()
return args
if __name__ == "__main__":
# Parsing command line options
cmdArgs = parseCommandLine()
main(cmdArgs)
| DCGM-master | testing/python3/dcptestautomation/run_dcgm_tests.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import csv
import argparse
import re
class ParseDcgmProftesterSingleMetric:
"class for parsing a single metric"
def __init__(self):
self.data_lines_lines = ['PcieTxBytes', 'PcieRxBytes', 'GrActivity:', 'SmActivity', \
'SmActivity:', 'SmOccupancy', 'SmOccupancy:', \
'TensorEngineUtil', 'DramUtil', 'Fp64EngineUtil', \
'Fp32EngineUtil', 'Fp16EngineUtil']
self.d_cpu_count = {}
def getDataString(self, metric):
if metric == '1001':
return 'GrActivity'
elif metric == '1002':
return 'SmActivity'
elif metric == '1003':
return 'SmOccupancy'
elif metric == '1009':
return 'PcieTxBytes'
elif metric == '1010':
return 'PcieRxBytes'
else:
return 'None'
def getFirstIndex(self, metric):
if metric == '1010' or metric == '1009':
return 11
elif metric == '1001':
return 8
elif metric == '1002':
return 9
elif metric == '1003':
return 9
def parseAndWriteToCsv(self, fName, metric, gpu_index):
csvFileName = 'dcgmProfTester' + '_' + str(metric) + '_gpu'+ str(gpu_index) + '.csv'
sample_num = 0
f = open(fName, 'r+')
lines = f.readlines() # read all lines at once
metric_label = self.getDataString(metric)
with open(csvFileName, 'wb') as csvFile:
fieldnames = ['Sample Number', metric_label]
writer = csv.DictWriter(csvFile, fieldnames=fieldnames)
writer.writeheader()
i = 0
pattern = re.compile('\d+(\.\d+)?')
while i < len(lines):
try:
line = lines[i]
row = line.split()
if row == [] or row[0] not in self.data_lines_lines:
print("Skipping non data row: " + str(row))
elif pattern.match(row[1]):
#print ("Row[1]", row[1] + ' i[' + str(i) +']' )
sample_num = sample_num + 1
val = float(lines[i].split()[1])
dict_row = {'Sample Number': sample_num, metric_label: val}
writer.writerow(dict_row)
i = i + 1
except IndexError:
print("Excepting non data row: " + str(row))
i = i+1
pass
except StopIteration:
pass
print("Outside loop")
def main(cmdArgs):
fName = cmdArgs.fileName
metric = cmdArgs.metric
gpu_index = cmdArgs.gpu_index
#parse and output the data
po = ParseDcgmProftesterSingleMetric()
po.parseAndWriteToCsv(fName, metric, gpu_index)
def parseCommandLine():
parser = argparse.ArgumentParser(description="Parse logs from dcgmLogs into a csv")
parser.add_argument("-f", "--fileName", required=True, help="fielName of the \
file to be parsed and outputted to csv")
parser.add_argument("-m", "--metric", required=True, help="metric for which the data is being \
analyzed")
parser.add_argument("-i", "--gpu_index", required=False, default='0', help="metric for which \
the data is being analyzed")
args = parser.parse_args()
return args
if __name__ == "__main__":
cmdArgs = parseCommandLine()
main(cmdArgs)
| DCGM-master | testing/python3/dcptestautomation/parse_dcgmproftester_single_metric.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import cuda_utils
| DCGM-master | testing/python3/cuda/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import ctypes
import dcgm_structs
import test_utils
## Device structures
class struct_c_CUdevice(ctypes.Structure):
pass # opaque handle
c_CUdevice = ctypes.POINTER(struct_c_CUdevice)
# constants
CUDA_SUCCESS = 0
CU_DEVICE_ATTRIBUTE_PCI_BUS_ID = 33
CU_DEVICE_ATTRIBUTE_PCI_DEVICE_ID = 34
CU_DEVICE_ATTRIBUTE_PCI_DOMAIN_ID = 50
_cudaLib = None
def _loadCuda():
global _cudaLib
if _cudaLib is None:
_cudaLib = ctypes.CDLL("libcuda.so.1")
cuInitFn = getattr(_cudaLib, "cuInit")
assert CUDA_SUCCESS == cuInitFn(ctypes.c_uint(0))
def _unloadCuda():
global _cudaLib
_cudaLib = None
def cuDeviceGetCount():
global _cudaLib
_loadCuda()
cuDeviceGetCountFn = getattr(_cudaLib, "cuDeviceGetCount")
c_count = ctypes.c_uint(0)
assert CUDA_SUCCESS == cuDeviceGetCountFn(ctypes.byref(c_count))
_unloadCuda()
return c_count.value
def cuDeviceGet(idx):
global _cudaLib
_loadCuda()
cuDeviceGetFn = getattr(_cudaLib, "cuDeviceGet")
c_dev = c_CUdevice()
assert CUDA_SUCCESS == cuDeviceGetFn(ctypes.byref(c_dev), ctypes.c_uint(idx))
_unloadCuda()
return c_dev
def cuDeviceGetBusId(c_dev):
global _cudaLib
_loadCuda()
cuDeviceGetAttributeFn = getattr(_cudaLib, "cuDeviceGetAttribute")
c_domain = ctypes.c_uint()
c_bus = ctypes.c_uint()
c_device = ctypes.c_uint()
assert CUDA_SUCCESS == cuDeviceGetAttributeFn(ctypes.byref(c_domain),
ctypes.c_uint(CU_DEVICE_ATTRIBUTE_PCI_DOMAIN_ID), c_dev)
assert CUDA_SUCCESS == cuDeviceGetAttributeFn(ctypes.byref(c_bus),
ctypes.c_uint(CU_DEVICE_ATTRIBUTE_PCI_BUS_ID), c_dev)
assert CUDA_SUCCESS == cuDeviceGetAttributeFn(ctypes.byref(c_device),
ctypes.c_uint(CU_DEVICE_ATTRIBUTE_PCI_DEVICE_ID), c_dev)
_unloadCuda()
return "%04x:%02x:%02x.0" % (c_domain.value, c_bus.value, c_device.value)
| DCGM-master | testing/python3/cuda/cuda_utils.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# test the policy manager for DCGM
import dcgm_structs
import dcgm_agent_internal
import dcgm_agent
import logger
import test_utils
import dcgm_fields
import dcgmvalue
import pydcgm
from dcgm_structs import dcgmExceptionClass
import time
import inspect
from subprocess import check_output
def helper_dcgm_group_create_grp(handle):
handleObj = pydcgm.DcgmHandle(handle=handle)
groupObj = pydcgm.DcgmGroup(handleObj, groupName="test1")
groupId = groupObj.GetId()
assert(groupId != 0)
#Force the group to be deleted
del(groupObj)
@test_utils.run_with_embedded_host_engine()
def test_dcgm_group_create_grp_embedded(handle):
helper_dcgm_group_create_grp(handle)
@test_utils.run_with_standalone_host_engine(20)
@test_utils.run_with_initialized_client()
def test_dcgm_group_create_grp_standalone(handle):
helper_dcgm_group_create_grp(handle)
def helper_dcgm_group_update_grp(handle, gpuIds):
handleObj = pydcgm.DcgmHandle(handle=handle)
systemObj = handleObj.GetSystem()
groupObj = systemObj.GetEmptyGroup("test1")
groupId = dcgm_agent.dcgmGroupCreate(handle, dcgm_structs.DCGM_GROUP_EMPTY, "test1")
gpuIdList = gpuIds
assert len(gpuIdList) > 0, "Failed to get devices from the node"
for gpuId in gpuIdList:
groupObj.AddGpu(gpuId)
gpuIdListAfterAdd = groupObj.GetGpuIds()
assert gpuId in gpuIdListAfterAdd, "Expected gpuId %d in %s" % (gpuId, str(gpuIdListAfterAdd))
for gpuId in gpuIdList:
groupObj.RemoveGpu(gpuId)
gpuIdListAfterAdd = groupObj.GetGpuIds()
assert gpuId not in gpuIdListAfterAdd, "Expected gpuId %d NOT in %s" % (gpuId, str(gpuIdListAfterAdd))
#Force the group to be deleted
del(groupObj)
@test_utils.run_with_embedded_host_engine()
@test_utils.run_only_with_live_gpus()
def test_dcgm_group_update_grp_embedded(handle, gpuIds):
helper_dcgm_group_update_grp(handle, gpuIds)
@test_utils.run_with_standalone_host_engine(20)
@test_utils.run_with_initialized_client()
@test_utils.run_only_with_live_gpus()
def test_dcgm_group_update_grp_standalone(handle, gpuIds):
helper_dcgm_group_update_grp(handle, gpuIds)
def helper_dcgm_group_get_grp_info(handle, gpuIds):
handleObj = pydcgm.DcgmHandle(handle=handle)
systemObj = handleObj.GetSystem()
groupObj = systemObj.GetEmptyGroup("test1")
gpuIdList = gpuIds
assert len(gpuIdList) > 0, "Failed to get devices from the node"
for gpuId in gpuIdList:
groupObj.AddGpu(gpuId)
# We used to test fetching negative value throws Bad Param error here.
# This was only a usecase because we we mixing signed and unsigned values
# Now we're just testing that passing an invalid group ID results in the
# expected NOT_CONFIGURED error.
with test_utils.assert_raises(dcgmExceptionClass(dcgm_structs.DCGM_ST_NOT_CONFIGURED)):
ret = dcgm_agent.dcgmGroupGetInfo(handle, -1)
gpuIdListAfterAdd = groupObj.GetGpuIds()
assert gpuIdList == gpuIdListAfterAdd, "Expected all GPUs from %s to be added. Got %s" % (str(gpuIdList), str(gpuIdListAfterAdd))
@test_utils.run_with_embedded_host_engine()
@test_utils.run_only_with_live_gpus()
def test_dcgm_group_get_grp_info_embedded(handle, gpuIds):
helper_dcgm_group_get_grp_info(handle, gpuIds)
def helper_dcgm_group_get_grp_info_entities(handle, gpuIds):
handleObj = pydcgm.DcgmHandle(handle=handle)
systemObj = handleObj.GetSystem()
groupObj = systemObj.GetEmptyGroup("test1")
gpuIdList = gpuIds
assert len(gpuIdList) > 0, "Failed to get devices from the node"
for gpuId in gpuIdList:
groupObj.AddEntity(dcgm_fields.DCGM_FE_GPU, gpuId)
gpuIdListAfterAdd = groupObj.GetGpuIds()
assert gpuIdList == gpuIdListAfterAdd, "Expected all GPUs from %s to be added. Got %s" % (str(gpuIdList), str(gpuIdListAfterAdd))
entityListAfterAdd = groupObj.GetEntities()
gpuList2 = []
for entity in entityListAfterAdd:
assert entity.entityGroupId == dcgm_fields.DCGM_FE_GPU, str(entity.entityGroupId)
gpuList2.append(entity.entityId)
assert gpuIdList == gpuList2, "Expected all GPUs from %s to be added. Got %s" % (str(gpuIdList), str(gpuList2))
#Remove all GPUs
for gpuId in gpuIdList:
groupObj.RemoveEntity(dcgm_fields.DCGM_FE_GPU, gpuId)
entityListAfterRem = groupObj.GetEntities()
assert len(entityListAfterRem) == 0, str(entityListAfterRem)
@test_utils.run_with_embedded_host_engine()
@test_utils.run_only_with_live_gpus()
def test_helper_dcgm_group_get_grp_info_entities(handle, gpuIds):
helper_dcgm_group_get_grp_info_entities(handle, gpuIds)
@test_utils.run_with_standalone_host_engine(20)
@test_utils.run_with_initialized_client()
@test_utils.run_only_with_live_gpus()
def test_dcgm_group_get_grp_info_standalone(handle, gpuIds):
helper_dcgm_group_get_grp_info(handle, gpuIds)
@test_utils.run_with_standalone_host_engine(20)
@test_utils.run_with_initialized_client()
def test_dcgm_group_get_all_ids_standalone(handle):
"""
Get all the group IDS configured on the host engine
"""
handleObj = pydcgm.DcgmHandle(handle=handle)
systemObj = handleObj.GetSystem()
#Get the list of groups before we add ours so that we account for them
groupIdListBefore = dcgm_agent.dcgmGroupGetAllIds(handle)
expectedCount = len(groupIdListBefore)
groupObjs = []
for index in range(0,10):
expectedCount += 1
name = 'Test'
name += repr(index)
groupObj = systemObj.GetEmptyGroup(name)
groupObjs.append(groupObj) #keep reference so it doesn't go out of scope
pass
groupIdListAfter = dcgm_agent.dcgmGroupGetAllIds(handle)
assert len(groupIdListAfter) == expectedCount, "Num of groups less than expected. Expected: %d Returned %d" % (expectedCount, len(groupIdListAfter))
def dcgm_group_test_default_group(handle, gpuIds):
"""
Test that the default group can not be deleted, or manipulated and is returning all GPUs.
Note that we're not using groupObj for some tests because it protects against operations on the default group
"""
handleObj = pydcgm.DcgmHandle(handle=handle)
systemObj = handleObj.GetSystem()
groupObj = systemObj.GetDefaultGroup()
gpuIdList = gpuIds
assert len(gpuIdList) > 0, "Failed to get devices from the node"
with test_utils.assert_raises(dcgmExceptionClass(dcgm_structs.DCGM_ST_NOT_CONFIGURED)):
groupInfo = dcgm_agent.dcgmGroupGetInfo(handle, 9999)
groupGpuIdList = groupObj.GetGpuIds()
assert(gpuIdList == groupGpuIdList), "Expected gpuId list match %s != %s" % (str(gpuIdList), str(groupGpuIdList))
groupEntityList = groupObj.GetEntities()
gpuIdList2 = []
for entity in groupEntityList:
assert entity.entityGroupId == dcgm_fields.DCGM_FE_GPU, str(entity.entityGroupId)
gpuIdList2.append(entity.entityId)
assert gpuIdList == gpuIdList2, "Expected gpuId list to match entity list: %s != %s" % (str(gpuIdList), str(gpuIdList2))
for gpuId in gpuIdList:
with test_utils.assert_raises(dcgmExceptionClass(dcgm_structs.DCGM_ST_NOT_CONFIGURED)):
ret = dcgm_agent.dcgmGroupRemoveDevice(handle, dcgm_structs.DCGM_GROUP_ALL_GPUS, gpuId)
with test_utils.assert_raises(pydcgm.DcgmException):
groupObj.RemoveGpu(gpuId)
with test_utils.assert_raises(dcgmExceptionClass(dcgm_structs.DCGM_ST_NOT_CONFIGURED)):
ret = dcgm_agent.dcgmGroupDestroy(handle, dcgm_structs.DCGM_GROUP_ALL_GPUS)
@test_utils.run_with_standalone_host_engine(20)
@test_utils.run_with_initialized_client()
@test_utils.run_only_with_live_gpus()
def test_dcgm_group_test_default_group_standalone(handle, gpuIds):
dcgm_group_test_default_group(handle, gpuIds)
@test_utils.run_with_embedded_host_engine()
@test_utils.run_only_with_live_gpus()
def test_dcgm_group_test_default_group_embedded(handle, gpuIds):
dcgm_group_test_default_group(handle, gpuIds)
def helper_dcgm_group_delete_grp(handle):
handleObj = pydcgm.DcgmHandle(handle=handle)
groupObj = pydcgm.DcgmGroup(handleObj, groupName="test1")
groupId = groupObj.GetId().value
#Delete the group
groupObj.Delete()
ids = dcgm_agent.dcgmGroupGetAllIds(handle)
assert(groupId not in ids), "groupId %d in %s" % (groupId, str(ids))
@test_utils.run_with_embedded_host_engine()
def test_dcgm_group_delete_grp_embedded(handle):
helper_dcgm_group_delete_grp(handle)
@test_utils.run_with_standalone_host_engine(20)
@test_utils.run_with_initialized_client()
def test_dcgm_group_delete_grp_standalone(handle):
helper_dcgm_group_delete_grp(handle)
| DCGM-master | testing/python3/tests/test_groupmgmt.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pydcgm
import dcgm_structs
import dcgm_agent_internal
import dcgm_agent
import logger
import test_utils
import dcgm_fields
import apps
import dcgmvalue
import DcgmSystem
import DcgmDiag
from dcgm_structs import dcgmExceptionClass, DCGM_ST_NOT_CONFIGURED
import dcgm_structs_internal
import dcgm_internal_helpers
import utils
import threading
import option_parser
import shutil
import string
import time
import tempfile
from ctypes import *
import sys
import os
import pprint
from sys import stdout
import json
def _run_dcgmi_command(args):
''' run a command then return (retcode, stdout_lines, stderr_lines) '''
dcgmi = apps.DcgmiApp(args)
# Some commands (diag -r 2) can take a minute or two
dcgmi.start(250)
retValue = dcgmi.wait()
dcgmi.validate()
return retValue, dcgmi.stdout_lines, dcgmi.stderr_lines
def _is_eris_diag_inforom_failure(args, stdout_lines):
INFOROM_FAILURE_STRING = 'nvmlDeviceValidateInforom for nvml device'
if not option_parser.options.eris:
# This is used to skip diag tests. We only want to do that on Eris
return False
if len(args) > 0 and args[0] == 'diag' and INFOROM_FAILURE_STRING in stdout_lines:
return True
return False
def _assert_valid_dcgmi_results(args, retValue, stdout_lines, stderr_lines):
assert (len(stdout_lines) > 0), 'No output detected for args "%s"' % ' '.join(args[1:])
if _is_eris_diag_inforom_failure(args, stdout_lines):
# If we see inforom corruption, the test should not fail
test_utils.skip_test('Detected corrupt inforom for diag test')
return
output = ''
for line in stdout_lines:
output = output + line + ' '
if test_utils.is_mig_incompatible_failure(output):
test_utils.skip_test("Skipping this test because MIG is configured incompatibly (preventing access to the whole GPU)")
if retValue != c_ubyte(dcgm_structs.DCGM_ST_OK).value:
logger.error('Valid test - Function returned error code: %s . Args used: "%s"' % (retValue, ' '.join(args[1:])))
logger.error('Stdout:')
for line in stdout_lines:
logger.error('\t'+line)
logger.error('Stderr:')
for line in stderr_lines:
logger.error('\t'+line)
assert False, "See errors above."
errLines = _lines_with_errors(stdout_lines)
assert len(errLines) == 0, "Found errors in output. Offending lines: \n%s" % '\n'.join(errLines)
def _assert_invalid_dcgmi_results(args, retValue, stdout_lines, stderr_lines):
assert retValue != c_ubyte(dcgm_structs.DCGM_ST_OK).value, \
'Invalid test - Function returned error code: %s . Args used: "%s"' \
% (retValue, ', '.join(args[0:]))
assert len(_lines_with_errors(stderr_lines + stdout_lines)) >= 1, \
'Function did not display error message for args "%s". Returned: %s\nstdout: %s\nstderr: %s' \
% (' '.join(args[1:]), retValue, '\n'.join(stdout_lines), '\n'.join(stderr_lines))
def _lines_with_errors(lines):
errorLines = []
errorStrings = [
'error',
'invalid',
'incorrect',
'unexpected'
]
exceptionStrings = [
'nvlink error',
'flit error',
'data error',
'replay error',
'recovery error',
'ecc error',
'xid error'
]
for line in lines:
lineLower = line.lower()
for errorString in errorStrings:
if not errorString in lineLower:
continue
wasExcepted = False
for exceptionString in exceptionStrings:
if exceptionString in lineLower:
wasExcepted = True
break
if wasExcepted:
continue
errorLines.append(line)
return errorLines
def _create_dcgmi_group(groupType=dcgm_structs.DCGM_GROUP_EMPTY):
''' Create an empty group and return its group ID '''
createGroupArgs = ["group", "-c", "test_group"]
if groupType == dcgm_structs.DCGM_GROUP_DEFAULT:
createGroupArgs.append('--default')
elif groupType == dcgm_structs.DCGM_GROUP_DEFAULT_NVSWITCHES:
createGroupArgs.append('--defaultnvswitches')
retValue, stdout_lines, stderr_lines = _run_dcgmi_command(createGroupArgs)
_assert_valid_dcgmi_results(createGroupArgs, retValue, stdout_lines, stderr_lines)
# dcgmi "group -c" outputs a line like 'Successfully created group "test_group" with a group ID of 2'
# so we capture the last word as the group ID (it doesn't seem like there's a better way)
# convert to int so that if it's not an int, an exception is raised
return int(stdout_lines[0].strip().split()[-1])
def _test_valid_args(argsList):
for args in argsList:
retValue, stdout_lines, stderr_lines = _run_dcgmi_command(args)
_assert_valid_dcgmi_results(args, retValue, stdout_lines, stderr_lines)
def _test_invalid_args(argsList):
for args in argsList:
retValue, stdout_lines, stderr_lines = _run_dcgmi_command(args)
_assert_invalid_dcgmi_results(args, retValue, stdout_lines, stderr_lines)
@test_utils.run_with_standalone_host_engine(20)
@test_utils.run_with_initialized_client()
@test_utils.run_with_injection_gpus(2) #Injecting compute instances only works with live ampere or injected GPUs
@test_utils.run_with_injection_gpu_instances(2)
@test_utils.run_with_injection_gpu_compute_instances(2)
def test_dcgmi_group(handle, gpuIds, instanceIds, ciIds):
"""
Test DCGMI group
"""
DCGM_ALL_GPUS = dcgm_structs.DCGM_GROUP_ALL_GPUS
groupId = str(_create_dcgmi_group())
## keep args in this order. Changing it may break the test
_test_valid_args([
["group", "-l", ""], # list groups
["group", "-g", groupId, "-i"], # get info on created group
["group", "-g", groupId, "-a", str(gpuIds[0])], # add gpu to group
["group", "-g", groupId, "-r", str(gpuIds[0])], # remove that gpu from the group
["group", "-g", groupId, "-a", "instance:" + str(instanceIds[0])], # add instance to group
["group", "-g", groupId, "-r", "instance:" + str(instanceIds[0])], # remove instance from group
["group", "-g", groupId, "-a", "ci:" + str(ciIds[0])], # add CI to group
["group", "-g", groupId, "-r", "ci:" + str(ciIds[0])], # remove CI from group
["group", "-g", groupId, "-a", "gpu:" + str(gpuIds[0])], # add gpu to group with gpu tag
["group", "-g", groupId, "-r", "gpu:" + str(gpuIds[0])], # remove that gpu from the group with gpu tag
["group", "-g", groupId, "-a", "instance:" + str(instanceIds[0])], # add instance to group with instance tag
["group", "-g", groupId, "-r", "instance:" + str(instanceIds[0])], # remove instance from the group with instance tag
["group", "-g", groupId, "-a", "ci:" + str(instanceIds[0])], # add CI to group with compute instance tag
["group", "-g", groupId, "-r", "ci:" + str(instanceIds[0])], # remove CI from the group with compute instace tag
# Testing Cuda/MIG formats for entity ids.
# Fake GPUs have GPU-00000000-0000-0000-0000-000000000000 UUID
["group", "-g", groupId, "-a", "00000000-0000-0000-0000-000000000000"], # add a GPU to the group
["group", "-g", groupId, "-r", "00000000-0000-0000-0000-000000000000"], # remove the GPU from the group
["group", "-g", groupId, "-a", "GPU-00000000-0000-0000-0000-000000000000"], # add a GPU to the group
["group", "-g", groupId, "-r", "GPU-00000000-0000-0000-0000-000000000000"], # remove the GPU from the group
["group", "-g", groupId, "-a", "MIG-GPU-00000000-0000-0000-0000-000000000000"], # add a GPU to the group
["group", "-g", groupId, "-r", "MIG-GPU-00000000-0000-0000-0000-000000000000"], # remove the GPU from the group
["group", "-g", groupId, "-a", "GPU-00000000-0000-0000-0000-000000000000/0"], # add a GPU Instance to the group
["group", "-g", groupId, "-r", "GPU-00000000-0000-0000-0000-000000000000/0"], # remove the GPU Instance from the group
["group", "-g", groupId, "-a", "GPU-00000000-0000-0000-0000-000000000000/%d/%d" % (0, 0)], # add a CI to the group
["group", "-g", groupId, "-r", "GPU-00000000-0000-0000-0000-000000000000/%d/%d" % (0, 0)], # remove the CI from the group
["group", "-g", groupId, "-a", "GPU-00000000-0000-0000-0000-000000000000/%d/%d" % (1, 0)], # add another CI to the group
["group", "-g", groupId, "-r", "GPU-00000000-0000-0000-0000-000000000000/%d/%d" % (1, 0)], # remove the CI from the group
["group", "-g", groupId, "-a", "MIG-GPU-00000000-0000-0000-0000-000000000000/%d/%d" % (0, 0)], # add a CI to the group
["group", "-g", groupId, "-r", "MIG-GPU-00000000-0000-0000-0000-000000000000/%d/%d" % (0, 0)], # remove the CI from the group
["group", "-g", groupId, "-a", "MIG-GPU-00000000-0000-0000-0000-000000000000/%d/%d" % (1, 0)], # add another CI to the group
["group", "-g", groupId, "-r", "MIG-GPU-00000000-0000-0000-0000-000000000000/%d/%d" % (1, 0)], # remove the CI from the group
["group", "-g", groupId, "-a", "MIG-GPU-00000000-0000-0000-0000-000000000000/0/*"], # add all CIs for InstanceId_0
["group", "-g", groupId, "-r", "MIG-GPU-00000000-0000-0000-0000-000000000000/0/0"], # remove CI_0
# This one disabled as the run_with_injection_gpu_compute_instances decorator does not inject hierarchy for now.
# ["group", "-g", groupId, "-r", "MIG-GPU-00000000-0000-0000-0000-000000000000/0/1"], # remove CI_1
["group", "-g", groupId, "-a", "MIG-GPU-00000000-0000-0000-0000-000000000000/*/0"], # add all CI_0 for Instances 0 and 1
["group", "-g", groupId, "-r", "MIG-GPU-00000000-0000-0000-0000-000000000000/0/0"], # remove CI_0 for Instance 0
["group", "-g", groupId, "-r", "MIG-GPU-00000000-0000-0000-0000-000000000000/1/0"], # remove CI_0 for Instance 1
["group", "-g", groupId, "-a", "*"], # add all GPUs
["group", "-g", groupId, "-r", "*"], # remove all GPUs
["group", "-g", groupId, "-a", "*/*"], # add all GPU instances
["group", "-g", groupId, "-r", "*/*"], # remove all GPU instances
["group", "-g", groupId, "-a", "*/*/*"], # add all CIs
["group", "-g", groupId, "-r", "*/*/*"], # remove all CIs
["group", "-g", groupId, "-a", "*,*/*/*"], # add all GPUs and CIs
["group", "-g", groupId, "-r", "*,*/*/*"], # remove all GPUs and CIs
["group", "-d", groupId, ], # delete the group
["group", "-g", "0", "-i"], # Default group can be fetched by ID as long as group IDs start at 0
])
nonExistentGroupId = str(int(groupId) + 10)
groupId = str(_create_dcgmi_group())
## keep args in this order. Changing it may break the test
_test_invalid_args([
["group", "-c", "--default"], # Can't create a group called --default
["group", "-c", "--add"], # Can't create a group called --add
["group", "-c", "-a"], # Can't create a group called -a
["group", "-g", nonExistentGroupId, "-a", str(gpuIds[0])], # Can't add to a group that doesn't exist
["group", "-g", groupId, "-a", "129"], # Can't add a GPU that doesn't exist
["group", "-g", groupId, "-r", "129"], # Can't remove a GPU that doesn't exist
["group", "-g", groupId, "-a", "instance:2000"], # Can't add an instance that doesn't exist
["group", "-g", groupId, "-r", "instance:2000"], # Can't remove an instance that doesn't exist
["group", "-g", groupId, "-a", "ci:2000"], # Can't add a CI that doesn't exist
["group", "-g", groupId, "-r", "ci:2000"], # Can't remove a CI that doesn't exist
["group", "-g", nonExistentGroupId, "-r", str(gpuIds[0])], # Can't remove from a group that does't exist
["group", "-g", "0", "-r", "0"], # Can't remove from the default group (ID 0)
["group", "-g", str(DCGM_ALL_GPUS), "-r", str(gpuIds[0])], # Can't remove from the default group w/ handle
["group", "-d", "0"], # Can't delete the default group (ID 0)
["group", "-d", str(DCGM_ALL_GPUS)], # Can't delete the default group w/ handle
["group", "-d", nonExistentGroupId], # Can't delete a group that doesnt exist
["group", "-g", groupId, "-a", "11111111-1111-1111-1111-111111111111"], # add a GPU to the group
["group", "-g", groupId, "-r", "11111111-1111-1111-1111-111111111111"], # remove the GPU from the group
["group", "-g", groupId, "-a", "GPU-11111111-1111-1111-1111-111111111111"], # add a GPU to the group
["group", "-g", groupId, "-r", "GPU-11111111-1111-1111-1111-111111111111"], # remove the GPU from the group
["group", "-g", groupId, "-a", "MIG-GPU-11111111-1111-1111-1111-111111111111"], # add a GPU to the group
["group", "-g", groupId, "-r", "MIG-GPU-11111111-1111-1111-1111-111111111111"], # remove the GPU from the group
["group", "-g", groupId, "-a", "%d/%d" % (129, instanceIds[0])], # Can't add an instance that doesn't exits
["group", "-g", groupId, "-r", "%d/%d" % (129, instanceIds[0])], # Can't remove an instance that doesn't exist
["group", "-g", groupId, "-a", "%d/%d/%d" % (129, instanceIds[0], ciIds[0])], # Can't add a CI that doesn't exist
["group", "-g", groupId, "-r", "%d/%d/%d" % (129, instanceIds[0], ciIds[0])], # Can't remove a CI that doesn't exist
])
@test_utils.run_with_standalone_host_engine(20)
@test_utils.run_with_initialized_client()
@test_utils.run_with_injection_nvswitches(2)
def test_dcgmi_group_nvswitch(handle, switchIds):
groupId = str(_create_dcgmi_group(groupType=dcgm_structs.DCGM_GROUP_DEFAULT_NVSWITCHES))
## keep args in this order. Changing it may break the test
_test_valid_args([
["group", "-g", groupId, "-i"], # get info on created group
["group", "-g", groupId, "-r", "nvswitch:%s" % str(switchIds[0])], # remove a switch from the group
["group", "-g", groupId, "-a", "nvswitch:%s" % str(switchIds[0])], # add a switch to group
["group", "-g", groupId, "-r", "nvswitch:%s" % str(switchIds[1])], # remove a 2nd switch from the group
["group", "-g", groupId, "-a", "nvswitch:%s" % str(switchIds[1])], # add a 2nd switch to group
["group", "-g", groupId, "-r", "nvswitch:%s,nvswitch:%s" % (str(switchIds[0]), str(switchIds[1]))], # remove both switches at once
["group", "-g", groupId, "-a", "nvswitch:%s,nvswitch:%s" % (str(switchIds[0]), str(switchIds[1]))], # add both switches at once
])
nonExistantGroupId = str(int(groupId) + 10)
## keep args in this order. Changing it may break the test
_test_invalid_args([
["group", "-g", groupId, "-r", "taco:%s" % str(switchIds[0])], # remove a switch from an invalid entityGroupId
["group", "-g", groupId, "-a", "taco:%s" % str(switchIds[0])], # add a switch to an invalid entityGroupId
])
@test_utils.run_with_standalone_host_engine(20)
@test_utils.run_with_initialized_client()
@test_utils.run_only_with_live_gpus()
@test_utils.for_all_same_sku_gpus()
def test_dcgmi_config(handle, gpuIds):
"""
Test DCGMI config
"""
assert len(gpuIds) > 0, "Failed to get devices from the node"
dcgmHandle = pydcgm.DcgmHandle(handle=handle)
dcgmSystem = dcgmHandle.GetSystem()
# Getting GPU power limits
for gpuId in gpuIds:
gpuAttrib = dcgmSystem.discovery.GetGpuAttributes(gpuId)
dft_pwr = str(gpuAttrib.powerLimits.defaultPowerLimit)
max_pwr = str(gpuAttrib.powerLimits.maxPowerLimit)
groupId = str(_create_dcgmi_group())
## Keep args in order in all dcgmi command sequence arrays below.
## This is the list of dcgmi command arguments that are valid as root
## and non-root, and effect setup for additional tests. We use a list in
## case we want to add more than one.
setupArgsTestList = [
["group", "-g", groupId, "-a", str(gpuIds[0])], # add gpu to group
]
_test_valid_args(setupArgsTestList)
## This is the list of dcgmi command arguments that are valid as root,
## and invalid as non-root.
validArgsTestList = [
["config", "--get", "-g", groupId], # get default group configuration
["config", "--get", "-g", "0"], # get default group configuration by ID. This will work as long as group IDs start at 0
["config", "-g", groupId, "--set", "-P", dft_pwr], # set default power limit
["config", "-g", groupId, "--set", "-P", max_pwr], # set max power limit
["config", "--get", "-g", groupId, "--verbose"], # get verbose default group configuration
["config", "--enforce", "-g", groupId], # enforce default group configuration
["config", "--enforce", "-g", "0" ] # enforce group configuration on default group by ID
]
# Setting the compute mode is only supported when MIG mode is not enabled.
if not test_utils.is_mig_mode_enabled():
# set group configuration on default group by ID
validArgsTestList.append(["config", "--set", "-c", "0", "-g", "0" ])
#Config management only works when the host engine is running as root
if utils.is_root():
_test_valid_args(validArgsTestList)
else:
_test_invalid_args(validArgsTestList)
## This is the list of invalid dcgmi command arguments.
_test_invalid_args([
["config", "--get", "-g", "9999"], # Can't get config of group that doesn't exist
["config", "--get", "-g", "9999", "--verbose"], # Can't get config of group that doesn't exist
["config", "--set", ""], # Can't set group configuration to nothing
["config", "--set", "-c", "5"], # Can't set an invalid compute mode
["config", "--enforce", "-g", "9999"] # Can't enforce a configuration of group that doesn't exist
])
@test_utils.run_with_standalone_host_engine(20)
@test_utils.run_with_initialized_client()
@test_utils.run_with_injection_gpus() #Use injected GPUs for policy so this doesn't fail on GeForce and Quadro
def test_dcgmi_policy(handle, gpuIds):
"""
Test DCGMI policy
"""
dcgmHandle = pydcgm.DcgmHandle(handle)
dcgmSystem = dcgmHandle.GetSystem()
groupObj = dcgmSystem.GetGroupWithGpuIds("testgroup", gpuIds)
groupIdStr = str(groupObj.GetId().value)
DCGM_ALL_GPUS = dcgm_structs.DCGM_GROUP_ALL_GPUS
## keep args in this order. Changing it may break the test
_test_valid_args([
["policy", "--get", "", "-g", groupIdStr], # get default group policy
["policy", "--get", "-g", "0"], # get default group policy by ID. this will fail if groupIds ever start from > 0
["policy", "--get", "--verbose", "-g", groupIdStr], # get verbose default group policy
["policy", "--set", "0,0", "-p", "-e", "-g", groupIdStr], # set default group policy
["policy", "--set", "1,2", "-p", "-e", "-g", groupIdStr], # set default group policy
["policy", "--set", "1,0", "-x", "-g", groupIdStr], # set monitoring of xid errors
["policy", "--set", "1,0", "-x", "-n", "-g", groupIdStr], # set monitoring of xid errors and nvlink errors
#["policy", "--reg", ""] # register default group policy (causes timeout)
])
## keep args in this order. Changing it may break the test
_test_invalid_args([
["policy", "--get", "-g", "1000"], # Can't get policy of group that doesn't exist
["policy", "--get", "-g", "1000", "--verbose"], # Can't get policy of group that doesn't exist
["policy", "--set", "-p"], # Can't set group policy w/ no action/validaion
["policy", "--set", "0,0"], # Can't set group policy w/ no watches
["policy", "--set", "0,0", "-p", "-g", "1000" ], # Can't set group policy on group that doesn't exist
["policy", "--reg", "-g", "1000"] # Can't register a policy of group that doesn't exist
])
@test_utils.run_with_standalone_host_engine(20)
@test_utils.run_with_initialized_client()
@test_utils.run_only_with_live_gpus()
def test_dcgmi_health(handle, gpuIds):
"""
Test DCGMI Health
"""
## keep args in this order. Changing it may break the test
_test_valid_args([
["health", "--fetch", ""], # get default group health
["health", "--set", "pmit"], # set group health
["health", "--clear", ""] # clear group health watches
])
#Create group for testing
groupId = str(_create_dcgmi_group())
nonExistantGroupId = str(int(groupId) + 10)
## keep args in this order. Changing it may break the test
_test_invalid_args([
["health", "--fetch", "-g", nonExistantGroupId], # Can't get health of group that doesn't exist
["health", "--set", "a", "-g", nonExistantGroupId], # Can't get health of group that doesn't exist
["health", "--set", "pp"], # Can't set health of group with multiple of same tag
["health", "--get", "ap"], # Can't set health to all plus another tag
["health", "--set", ""], # Can't set group health w/ no arguments
["health", "--check", "-g", nonExistantGroupId], # Can't check health of group that doesn't exist
["health", "--check", "-g", groupId] # Can't check health of group that has no watches enabled
])
@test_utils.run_with_standalone_host_engine(20)
@test_utils.run_with_initialized_client()
@test_utils.run_only_with_live_gpus()
def test_dcgmi_discovery(handle, gpuIds):
"""
Test DCGMI discovery
"""
## keep args in this order. Changing it may break the test
_test_valid_args([
["discovery", "--list", ""], # list gpus
["discovery", "--info", "aptc"], # check group info
["discovery", "--info", "aptc", "--verbose"] # checl group info verbose
])
## keep args in this order. Changing it may break the test
_test_invalid_args([
["discovery", "--info", "a", "-g", "2"], # Cant check info on group that doesn't exist
["discovery", "--info", "a", "--gpuid", "123"] # Cant check info on gpu that doesn't exist
])
@test_utils.run_with_persistence_mode_on()
@test_utils.run_with_standalone_host_engine(320)
@test_utils.run_with_initialized_client()
@test_utils.run_only_with_live_gpus()
@test_utils.for_all_same_sku_gpus()
def test_dcgmi_diag(handle, gpuIds):
"""
Test DCGMI diagnostics
"""
allGpusCsv = ",".join(map(str,gpuIds))
## keep args in this order. Changing it may break the test
pciTestParameters = "pcie.h2d_d2h_single_unpinned.min_pci_width=1"
pciTestParameters += ";pcie.h2d_d2h_single_pinned.min_pci_width=1"
#Need to skip checks for down NvLinks or QA will file bugs
if test_utils.are_any_nvlinks_down:
pciTestParameters += ";pcie.test_nvlink_status=false"
pciTestCmdLineArgs = ["diag", "--run", "pcie", "-p", pciTestParameters, "-i", str(gpuIds[0])]
_test_valid_args([
["diag", "--run", "1", "-i", allGpusCsv], # run diagnostic other settings currently run for too long
["diag", "--run", "1", "-i", str(gpuIds[0]), "--debugLogFile aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa.txt"], # Test that we can pass a long debugLogFile
["diag", "--run", "1", "-i", allGpusCsv, "--parameters", "diagnostic.test_duration=30", "--fail-early"], # verifies --fail-early option
["diag", "--run", "1", "-i", allGpusCsv, "--parameters", "diagnostic.test_duration=30", "--fail-early", "--check-interval", "3"], # verifies --check-interval option
["diag", "--run", "1", "-i", allGpusCsv, "--throttle-mask", "HW_SLOWDOWN"], # verifies that --throttle-mask with HW_SLOWDOWN reason to be ignored
["diag", "--run", "1", "-i", allGpusCsv, "--throttle-mask", "SW_THERMAL"], # verifies that --throttle-mask with SW_THERMAL reason to be ignored
["diag", "--run", "1", "-i", allGpusCsv, "--throttle-mask", "HW_THERMAL"], # verifies that --throttle-mask with HW_THERMAL reason to be ignored
["diag", "--run", "1", "-i", allGpusCsv, "--throttle-mask", "HW_POWER_BRAKE"], # verifies that --throttle-mask with HW_POWER_BRAKE reason to be ignored
["diag", "--run", "1", "-i", allGpusCsv, "--throttle-mask", "HW_SLOWDOWN,SW_THERMAL,HW_POWER_BRAKE"], # verifies that --throttle-mask with HW_POWER_BRAKE reason to be ignored
["diag", "--run", "1", "-i", allGpusCsv, "--throttle-mask", "SW_THERMAL,HW_THERMAL,HW_SLOWDOWN"], # verifies that --throttle-mask with HW_POWER_BRAKE reason to be ignored
["diag", "--run", "1", "-i", allGpusCsv, "--throttle-mask", "40"], # verifies that --throttle-mask with HW_SLOWDOWN (8) and SW_THERMAL (32) reason to be ignored
["diag", "--run", "1", "-i", allGpusCsv, "--throttle-mask", "96"], # verifies that --throttle-mask with SW_THERMAL (32) and HW_THERMAL (64) reason to be ignored
["diag", "--run", "1", "-i", allGpusCsv, "--throttle-mask", "232"], # verifies that --throttle-mask with ALL reasons to be ignored
["diag", "--run", "1", "--gpuList", ",".join(str(x) for x in gpuIds)], # verifies --gpuList option accepts and validates list of GPUs passed in
pciTestCmdLineArgs,
])
## keep args in this order. Changing it may break the test
_test_invalid_args([
["diag", "--run", "-g", "2"], # Can't run on group that doesn't exist
["diag", "--run", "5"], # Can't run with a test number that doesn't exist
["diag", "--run", "\"roshar stress\""], # Can't run a non-existent test name
["diag", "--run", "3", "--parameters", "dianarstic.test_duration=40"],
["diag", "--run", "3", "--parameters", "diagnostic.test_durration=40"],
["diag", "--run", "3", "--parameters", "pcie.h2d_d2h_singgle_pinned.iterations=4000"],
["diag", "--run", "3", "--parameters", "pcie.h2d_d2h_single_pinned.itterations=4000"],
["diag", "--run", "3", "--parameters", "bob.tom=maybe"],
["diag", "--run", "3", "--parameters", "truck=slow"],
["diag", "--run", "3", "--parameters", "now this is a story all about how"],
["diag", "--run", "3", "--parameters", "my=life=got=flipped=turned=upside=down"],
["diag", "--run", "3", "--parameters", "and.i'd.like.to.take.a=minute=just.sit=right=there"],
["diag", "--run", "3", "--parameters", "i'll tell you=how.I.became the=prince of .a town called"],
["diag", "--run", "3", "--parameters", "Bel-Air"],
["diag", "--train"], # ensure that training is no longer supported
["diag", "--run", "1", "-i", allGpusCsv, "--parameters", "diagnostic.test_duration=30", "--fail-early 10"], # verifies --fail-early does not accept parameters
["diag", "--run", "1", "--parameters", "diagnostic.test_duration=30", "--fail-early", "--check-interval -1"], # no negative numbers allowed
["diag", "--run", "1", "--parameters", "diagnostic.test_duration=30", "--fail-early", "--check-interval 350"], # no numbers > 300 allowed
["diag", "--run", "1", "--parameters", "diagnostic.test_duration=30", "--check-interval 10"], # requires --fail-early parameter
# The tests below are disabled until bug http://nvbugs/2672193 is fixed
["diag", "--run", "1", "--throttle-mask", "HW_ZSLOWDOWN"], # verifies that --throttle-mask incorrect reason does not work
["diag", "--run", "1", "--throttle-mask", "SW_TRHERMAL"], # verifies that --throttle-mask incorrect reason does not work
["diag", "--run", "1", "--throttle-mask", "HWT_THERMAL"], # verifies that --throttle-mask incorrect reason does not work
["diag", "--run", "1", "--throttle-mask", "HW_POWER_OUTBRAKE"], # verifies that --throttle-mask incorrect reason does not work
["diag", "--run", "1", "--throttle-mask -10"], # verifies that --throttle-mask does not accept incorrect values for any reasons to be ignored
["diag", "--run", "1", "--plugin-path", "/usr/share/nvidia-validation-suite/unplugins"], # verifies --plugin-path fails if the plugins path is not specified correctly
# ["diag", "--run", "1", "--gpuList", "0-1-2-3-4-5"], # verifies --gpuList option accepts and validates list of GPUs passed in (disabled until http://nvbugs/2733071 is fixed)
["diag", "--run", "1", "--gpuList", "-1,-2,-3,-4,-5"], # verifies --gpuList option accepts and validates list of GPUs passed in
["diag", "--run", "1", "--gpuList", "a,b,c,d,e"], # verifies --gpuList option accepts and validates list of GPUs passed in
["diag", "--run", "1", "-i", "0-1-2-3-4"], # Make sure -i is a comma-separated list of integers
["diag", "--run", "1", "-i", "roshar"], # Make sure -i is a comma-separated list of integers
["diag", "--run", "1", "-i", "a,b,c,d,e,f"], # Make sure -i is a comma-separated list of integers
["diag", "--run", "1", "-i", allGpusCsv, "--plugin-path", "./apps/nvvs/plugins"], # verifies --plugin-path no longer works (removed)
["diag", "--run", "1", "--iterations", "0"], # We cannot run 0 iterations
["diag", "--run", "1", "--iterations", "\-1"], # We cannot run negative iterations
])
@test_utils.run_with_persistence_mode_on()
@test_utils.run_with_standalone_host_engine(320)
@test_utils.run_with_initialized_client()
@test_utils.run_only_with_live_gpus()
@test_utils.for_all_same_sku_gpus()
@test_utils.run_only_if_mig_is_disabled()
def test_dcgmi_diag_multiple_iterations(handle, gpuIds):
allGpusCsv = ",".join(map(str,gpuIds))
args = ["diag", "-r", "1", "-j", "-i", allGpusCsv, "--iterations", "3"]
retValue, stdout_lines, stderr_lines = _run_dcgmi_command(args)
assert retValue == 0, "Expected successful execution, but got %d" % retValue
rawtext = ""
for line in stdout_lines:
rawtext = rawtext + line + "\n"
try:
jsondict = json.loads(rawtext)
overallResult = jsondict["Overall Result"]
assert overallResult != None, "Didn't find a populated value for the overall result!"
iterationArray = jsondict["iterations"]
for i in range(0,2):
assert iterationArray[i] != None, "Didn't find a populated result for run %d" % i+1
except ValueError as e:
assert False, ("Couldn't parse json from '%s'" % rawtext)
@test_utils.run_with_standalone_host_engine(20)
@test_utils.run_with_initialized_client()
@test_utils.run_only_with_live_gpus()
@test_utils.run_only_as_root()
def test_dcgmi_stats(handle, gpuIds):
"""
Test DCGMI Stats
"""
## keep args in this order. Changing it may break the test
_test_valid_args([
["stats", "--enable"], # Enable watches
#["stats", "--pid", "100"], # check pid
#["stats", "--pid", "100", "--verbose"], # check pid verbose (run test process and enable these if wanted)
["stats", "--jstart", "1"], #start a job with Job ID 1
["stats", "--jstop", "1"], #Stop the job
["stats", "--job", "1"], #Print stats for the job
["stats", "--jremove", "1"], #Remove the job the job
["stats", "--jremoveall"], #Remove all jobs
["stats", "--disable"], #disable watches
["stats", "--jstart", "1"], #start another job with Job ID 1. This should work due to jremove above. Also, setup the jstart failure below
])
#Create group for testing
groupId = str(_create_dcgmi_group())
nonExistantGroupId = str(int(groupId) + 10)
## keep args in this order. Changing it may break the test
_test_invalid_args([
["stats", "--pid", "100", "-g", groupId], # Can't view stats with out watches enabled
["stats", "enable", "-g", nonExistantGroupId], # Can't enable watches on group that doesn't exist
["stats", "disable", "-g", nonExistantGroupId], # Can't disable watches on group that doesn't exist
["stats", "--jstart", "1"], # Cant start the job with a job ID which is being used
["stats", "--jstop", "3"], # Stop an invalid job id
["stats", "--jremove", "3"], # Remove an invalid job id
["stats", "--job", "3"] # Get stats for an invalid job id
])
@test_utils.run_with_standalone_host_engine(20, ["--port", "5545"])
@test_utils.run_with_initialized_client("127.0.0.1:5545")
def test_dcgmi_port(handle):
"""
Test DCGMI port - does dcgmi group testing using port 5545
"""
## keep args in this order. Changing it may break the test
_test_valid_args([
["group", "--host", "localhost:5545", "-l", ""], # list groups
])
## keep args in this order. Changing it may break the test
_test_invalid_args([
["group", "--host", "localhost:5545", "-c", "--default"], # Can't create a group called --default
])
@test_utils.run_with_standalone_host_engine()
@test_utils.run_with_initialized_client()
def test_dcgmi_field_groups(handle):
"""
Test DCGMI field groups - test the dcgmi commands under "fieldgroup"
"""
_test_valid_args([
["fieldgroup", "-l"],
["fieldgroup", "-i", "-g", "1"], # show internal field group
["fieldgroup", "-c", "my_group", "-f", "1,2,3"], # Add a field group
])
_test_invalid_args([
["fieldgroup", "-c", "my_group", "-f", "1,2,3"], # Add a duplicate group
["fieldgroup", "-c", "bad_fieldids", "-f", "999999"], # Pass bad fieldIds
["introspect", "-d", "-g", "1"], # Delete internal group. Bad
["introspect", "-i", "-g", "100000"], # Info for invalid group
])
@test_utils.run_with_standalone_host_engine()
@test_utils.run_with_initialized_client()
def test_dcgmi_introspect(handle):
"""
Test DCGMI introspection - test the dcgmi commands under "introspection"
"""
_test_valid_args([
["introspect", "--show", "--hostengine"], # show hostengine
["introspect", "-s", "-H"], # short form
])
_test_invalid_args([
["introspect", "--show"], # "show" without "--hostengine" should fail
])
@test_utils.run_with_standalone_host_engine(320)
@test_utils.run_with_initialized_client()
@test_utils.run_only_with_live_gpus()
def test_dcgmi_nvlink(handle, gpuIds):
"""
Test dcgmi to display nvlink error counts
"""
## keep args in this order. Changing it may break the test
_test_valid_args([
["nvlink", "-e", "-g", str(gpuIds[0])], # run the working nvlink command for gpuId[0]
["nvlink", "-s"] # Link status should work without parameters
])
_test_invalid_args([
["nvlink","-e"], # -e option requires -g option
["nvlink","-e -s"] # -e and -s are mutually exclusive
])
def helper_make_switch_string(switchId):
return "nvswitch:" + str(switchId)
@test_utils.run_with_standalone_host_engine(120)
@test_utils.run_with_initialized_client()
@test_utils.run_with_injection_gpus(2) #Injecting compute instances only works with live ampere or injected GPUs
@test_utils.run_with_injection_nvswitches(2)
@test_utils.run_with_injection_gpu_instances(2)
@test_utils.run_with_injection_gpu_compute_instances(2)
def test_dcgmi_dmon(handle, gpuIds, switchIds, instanceIds, ciIds):
"""
Test dcgmi to display dmon values
"""
gpuGroupId = str(_create_dcgmi_group(dcgm_structs.DCGM_GROUP_DEFAULT))
switchGroupId = str(_create_dcgmi_group(dcgm_structs.DCGM_GROUP_DEFAULT_NVSWITCHES))
logger.info("Injected switch IDs:" + str(switchIds))
# Creates a comma separated list of gpus
allGpusCsv = ",".join(map(str,gpuIds))
allInstancesCsv = ",".join([("instance:" + str(x)) for x in instanceIds])
# All compute instances
allCisCsv = ",".join([("ci:" + str(x)) for x in ciIds])
#Same for switches but predicate each one with nvswitch
allSwitchesCsv = ",".join(map(helper_make_switch_string,switchIds))
switchFieldId = dcgm_fields.DCGM_FI_DEV_NVSWITCH_TEMPERATURE_CURRENT
#Inject a value for a field for each switch so we can retrieve it
field = dcgm_structs_internal.c_dcgmInjectFieldValue_v1()
field.version = dcgm_structs_internal.dcgmInjectFieldValue_version1
field.fieldId = switchFieldId
field.status = 0
field.fieldType = ord(dcgm_fields.DCGM_FT_INT64)
field.ts = int((time.time()-5) * 1000000.0) #5 seconds ago
field.value.i64 = 0
for switchId in switchIds:
linkId = (dcgm_fields.DCGM_FE_SWITCH << 0) | (switchId << 16) | (1 << 8)
ret = dcgm_agent_internal.dcgmInjectEntityFieldValue(handle, dcgm_fields.DCGM_FE_LINK, linkId, field)
_test_valid_args([
["dmon", "-e", "150,155","-c","1"], # run the dmon for default gpu group.
["dmon", "-e", "150,155","-c","1","-g",gpuGroupId], # run the dmon for a specified gpu group
["dmon", "-e", "150,155","-c","1","-g",'all_gpus'], # run the dmon for a specified group
["dmon", "-e", str(switchFieldId),"-c","1","-g",'all_nvswitches'], # run the dmon for a specified group - Reenable after DCGM-413 is fixed
["dmon", "-e", str(switchFieldId),"-c","1","-g",switchGroupId], # run the dmon for a specified group
["dmon", "-e", "150,155","-c","1","-d","2000"], # run the dmon for delay mentioned and default gpu group.
["dmon", "-e", "150,155","-c","1","-d","2000","-i",allGpusCsv], # run the dmon for devices mentioned and mentioned delay.
["dmon", "-e", "150,155","-c","1","-d","2000","-i",allInstancesCsv],
["dmon", "-e", "150,155","-c","1","-d","2000","-i",allCisCsv],
["dmon", "-e", "150,155","-c","1","-d","2000","-i",allGpusCsv + "," + allInstancesCsv + "," + allCisCsv],
["dmon", "-e", "150,155","-c","1","-d","2000","-i","*"], # run the dmon for all GPUs via wildcard
["dmon", "-e", "150,155","-c","1","-d","2000","-i","*/*"], # run the dmon for all GPU Instances via wildcards
["dmon", "-e", "150,155","-c","1","-d","2000","-i","*/*/*"], # run the dmon for all Compute Instances via wildcards
["dmon", "-e", "150,155","-c","1","-d","2000","-i","*,*/*,*/*/*"], # run the dmon for all entities via wildcards
["dmon", "-e", str(switchFieldId),"-c","1","-d","2000","-i",allSwitchesCsv] # run the dmon for devices mentioned and mentioned delay.
])
#Run tests that take a gpuId as an argument
for gpu in gpuIds:
_test_valid_args([
["dmon", "-e", "150","-c","1","-i",str(gpu)], # run the dmon for one gpu.
["dmon", "-e", "150","-c","1","-i",'gpu:'+str(gpu)], # run the dmon for one gpu, tagged as gpu:.
["dmon", "-e", "150","-c","1","-i",str(gpu)], # run the dmon for mentioned devices and count value.
["dmon", "-e", "150,155","-c","1","-i",str(gpu)] # run the dmon for devices mentioned, default delay and field values that are provided.
])
#Run tests that take a nvSwitch as an argument
for switchId in switchIds:
_test_valid_args([
["dmon", "-e", str(switchFieldId),"-c","1","-i",'nvswitch:'+str(switchId)], # run the dmon for one nvswitch, tagged as nvswitch:.
])
hugeGpuCsv = ",".join(map(str,list(range(0, dcgm_structs.DCGM_MAX_NUM_DEVICES*2, 1))))
_test_invalid_args([
["dmon","-c","1"], # run without required fields.
["dmon", "-e", "-150","-c","1","-i","1"], # run with invalid field id.
["dmon", "-e", "150","-c","1","-i","-2"], # run with invalid gpu id.
["dmon", "-e", "150","-c","1","-i","gpu:999"], # run with invalid gpu id.
["dmon", "-e", "150","-c","1","-g","999"], # run with invalid group id.
["dmon", "-i", hugeGpuCsv, "-e", "150", "-c", "1"], # run with invalid number of devices.
["dmon", "-i", "instance:2000", "-e", "150", "-c", "1"], # run with invalid gpu_i
["dmon", "-i", "ci:2000", "-e", "150", "-c", "1"], # run with invalid gpu_ci
["dmon", "-e", "150","f","0","-c","1","-i","0,1,765"], # run with invalid device id (non existing id).
["dmon", "-e", "150","-c","-1","-i","1"], # run with invalid count value.
["dmon", "-e", "150","-c","1","-i","1","-d","-1"], # run with invalid delay (negative value).
["dmon", "-f", "-9","-c","1","-i","1","-d","10000"], # run with invalid field Id.
["dmon", "-f", "150","-c", "1", "-i","0", "-d", "99" ] # run with invalid delay value.
])
@test_utils.run_with_standalone_host_engine(20)
@test_utils.run_with_initialized_client()
@test_utils.run_only_with_live_gpus()
@test_utils.run_with_injection_nvswitches(2)
def test_dcgmi_nvlink_nvswitches(handle, gpuIds, switchIds):
"""
Test dcgmi to display dmon values
"""
gpuGroupId = str(_create_dcgmi_group(dcgm_structs.DCGM_GROUP_DEFAULT))
switchGroupId = str(_create_dcgmi_group(dcgm_structs.DCGM_GROUP_DEFAULT_NVSWITCHES))
logger.info("Injected switch IDs:" + str(switchIds))
_test_valid_args([
["nvlink", "-s"] # Link status should work without parameters
])
# Creates a comma separated list of gpus
allGpusCsv = ",".join(map(str,gpuIds))
#Same for switches but predicate each one with nvswitch
allSwitchesCsv = ",".join(map(helper_make_switch_string,switchIds))
switchFieldId = dcgm_fields.DCGM_FI_DEV_NVSWITCH_LINK_THROUGHPUT_RX
#Inject a value for a field for each switch so we can retrieve it
field = dcgm_structs_internal.c_dcgmInjectFieldValue_v1()
field.version = dcgm_structs_internal.dcgmInjectFieldValue_version1
field.fieldId = switchFieldId
field.status = 0
field.fieldType = ord(dcgm_fields.DCGM_FT_INT64)
field.ts = int((time.time()-5) * 1000000.0) #5 seconds ago
field.value.i64 = 0
for switchId in switchIds:
ret = dcgm_agent_internal.dcgmInjectEntityFieldValue(handle, dcgm_fields.DCGM_FE_SWITCH,
switchId, field)
_test_valid_args([
["dmon", "-e", "150,155","-c","1"], # run the dmon for default gpu group.
["dmon", "-e", "150,155","-c","1","-g",gpuGroupId], # run the dmon for a specified gpu group
["dmon", "-e", "150,155","-c","1","-g",'all_gpus'], # run the dmon for a specified group
["dmon", "-e", str(switchFieldId),"-c","1","-g",'all_nvswitches'], # run the dmon for a specified group - Reenable after DCGM-413 is fixed
["dmon", "-e", str(switchFieldId),"-c","1","-g",switchGroupId], # run the dmon for a specified group
["dmon", "-e", "150,155","-c","1","-d","2000"], # run the dmon for delay mentioned and default gpu group.
["dmon", "-e", "150,155","-c","1","-d","2000","-i",allGpusCsv], # run the dmon for devices mentioned and mentioned delay.
["dmon", "-e", str(switchFieldId),"-c","1","-d","2000","-i",allSwitchesCsv] # run the dmon for devices mentioned and mentioned delay.
])
@test_utils.run_with_standalone_host_engine(20)
@test_utils.run_with_initialized_client()
@test_utils.run_only_with_live_gpus()
def test_dcgmi_modules(handle, gpuIds):
"""
Test DCGMI modules
"""
## keep args in this order. Changing it may break the test
_test_valid_args([
["modules", "--list"],
["modules", "--denylist", "5"],
["modules", "--denylist", "policy"],
])
## keep args in this order. Changing it may break the test
_test_invalid_args([
["modules", "--list", "4"],
["modules", "--denylist", "20"],
["modules", "--denylist", "notamodule"],
])
@test_utils.run_with_standalone_host_engine(20)
@test_utils.run_with_initialized_client()
@test_utils.run_only_with_live_gpus()
@test_utils.for_all_same_sku_gpus()
def test_dcgmi_profile(handle, gpuIds):
"""
Test DCGMI "profile" subcommand
"""
dcgmHandle = pydcgm.DcgmHandle(handle=handle)
dcgmSystem = dcgmHandle.GetSystem()
dcgmGroup = dcgmSystem.GetGroupWithGpuIds('mygroup', gpuIds)
# Creates a comma separated list of gpus
allGpusCsv = ",".join(map(str,gpuIds))
#See if these GPUs even support profiling. This will bail out for non-Tesla or Pascal or older SKUs
try:
supportedMetrics = dcgmGroup.profiling.GetSupportedMetricGroups()
except dcgm_structs.dcgmExceptionClass(dcgm_structs.DCGM_ST_PROFILING_NOT_SUPPORTED) as e:
test_utils.skip_test("Profiling is not supported for gpuIds %s" % str(gpuIds))
except dcgm_structs.dcgmExceptionClass(dcgm_structs.DCGM_ST_MODULE_NOT_LOADED) as e:
test_utils.skip_test("The profiling module could not be loaded")
except dcgm_structs.dcgmExceptionClass(dcgm_structs.DCGM_ST_NOT_SUPPORTED) as e:
test_utils.skip_test("The profiling module is not supported")
## keep args in this order. Changing it may break the test
_test_valid_args([
["profile", "--list", "-i", allGpusCsv],
["profile", "--list", "-g", str(dcgmGroup.GetId().value)],
])
## keep args in this order. Changing it may break the test
_test_invalid_args([
["profile", "--list", "--pause", "--resume"], #mutually exclusive flags
["profile", "--pause", "--resume"], #mutually exclusive flags
["profile", "--list", "-i", "999"], #Invalid gpuID
["profile", "--list", "-i", allGpusCsv + ",taco"], #Invalid gpu at end
["profile", "--list", "-g", "999"], #Invalid group
])
@test_utils.run_with_standalone_host_engine(20)
@test_utils.run_with_initialized_client()
@test_utils.run_only_with_live_gpus()
@test_utils.for_all_same_sku_gpus()
def test_dcgmi_profile_affected_by_gpm(handle, gpuIds):
"""
Test DCGMI "profile" subcommands that are affected by if GPM works or not
"""
dcgmHandle = pydcgm.DcgmHandle(handle=handle)
dcgmSystem = dcgmHandle.GetSystem()
dcgmGroup = dcgmSystem.GetGroupWithGpuIds('mygroup', gpuIds)
#See if these GPUs even support profiling. This will bail out for non-Tesla or Pascal or older SKUs
try:
supportedMetrics = dcgmGroup.profiling.GetSupportedMetricGroups()
except dcgm_structs.dcgmExceptionClass(dcgm_structs.DCGM_ST_PROFILING_NOT_SUPPORTED) as e:
test_utils.skip_test("Profiling is not supported for gpuIds %s" % str(gpuIds))
except dcgm_structs.dcgmExceptionClass(dcgm_structs.DCGM_ST_MODULE_NOT_LOADED) as e:
test_utils.skip_test("The profiling module could not be loaded")
except dcgm_structs.dcgmExceptionClass(dcgm_structs.DCGM_ST_NOT_SUPPORTED) as e:
test_utils.skip_test("The profiling module is not supported")
## keep args in this order. Changing it may break the test
pauseResumeArgs = [
["profile", "--pause"], #Pause followed by resume
["profile", "--resume"],
["profile", "--pause"], #Double pause and double resume should be fine
["profile", "--pause"],
["profile", "--resume"],
["profile", "--resume"],
]
#GPM GPUs don't support pause/resume since monitoring and profiling aren't mutually exclusive anymore
if test_utils.gpu_supports_gpm(handle, gpuIds[0]):
_test_invalid_args(pauseResumeArgs)
else:
_test_valid_args(pauseResumeArgs)
@test_utils.run_with_standalone_host_engine(20)
@test_utils.run_with_initialized_client()
@test_utils.run_only_with_live_gpus()
def test_dcgmi_test_introspect(handle, gpuIds):
"""
Test "dcgmi test --introspect"
"""
oneGpuIdStr = str(gpuIds[0])
gpuGroupId = str(_create_dcgmi_group(dcgm_structs.DCGM_GROUP_DEFAULT))
gpuGroupIdStr = str(gpuGroupId)
fieldIdStr = str(dcgm_fields.DCGM_FI_DEV_ECC_CURRENT) #Use this field because it's watched by default in the host engine
## keep args in this order. Changing it may break the test
_test_valid_args([
["test", "--introspect", "--gpuid", oneGpuIdStr, "--field", fieldIdStr],
["test", "--introspect", "-g", gpuGroupIdStr, "--field", fieldIdStr],
["test", "--introspect", "-g", gpuGroupIdStr, "--field", fieldIdStr],
])
## keep args in this order. Changing it may break the test
_test_invalid_args([
["test", "--introspect", "--inject"], #mutually exclusive flags
["test", "--introspect", "--gpuid", oneGpuIdStr], #Missing --field
["test", "--introspect", "-g", gpuGroupIdStr, "--gpuid", oneGpuIdStr],
["test", "--introspect", "--gpuid", "11001001"],
["test", "--introspect", "-g", "11001001"],
["test", "--introspect", "--group", "11001001"],
["test", "--introspect", "-g", gpuGroupIdStr, "--field", "10000000"], #Bad fieldId
])
@test_utils.run_with_standalone_host_engine(20)
@test_utils.run_with_initialized_client()
@test_utils.run_only_with_live_gpus()
def test_dcgmi_test_inject(handle, gpuIds):
"""
Test "dcgmi test --inject"
"""
oneGpuIdStr = str(gpuIds[0])
gpuGroupId = str(_create_dcgmi_group(dcgm_structs.DCGM_GROUP_DEFAULT))
gpuGroupIdStr = str(gpuGroupId)
fieldIdStr = str(dcgm_fields.DCGM_FI_DEV_GPU_TEMP)
## keep args in this order. Changing it may break the test
_test_valid_args([
["test", "--inject", "--gpuid", oneGpuIdStr, "--field", fieldIdStr, "-v", '45'],
["test", "--inject", "--gpuid", oneGpuIdStr, "--field", fieldIdStr, "--value", '45'],
])
## keep args in this order. Changing it may break the test
_test_invalid_args([
["test", "--inject", "--introspect"], #mutually exclusive flags
["test", "--inject", "-g", gpuGroupIdStr], #group ID is not supported
["test", "--inject", "--gpuid", oneGpuIdStr], #Missing --field
["test", "--inject", "--gpuid", oneGpuIdStr, "--field", fieldIdStr], #Missing --value
["test", "--inject", "-g", gpuGroupIdStr, "--gpuid", oneGpuIdStr],
["test", "--inject", "--gpuid", "11001001", "--field", fieldIdStr, "--value", '45'], #Bad gpuId
["test", "--inject", "-g", "11001001"],
["test", "--inject", "--group", "11001001"],
["test", "--inject", "--gpuid", oneGpuIdStr, "--field", "10000000", "--value", '45'], #Bad fieldId
])
@test_utils.run_with_standalone_host_engine(20)
@test_utils.run_with_initialized_client()
def test_dcgmi_dmon_pause_resume(handle):
_test_valid_args([
['test', '--pause'],
['test', '--resume'],
])
| DCGM-master | testing/python3/tests/test_dcgmi.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from contextlib import closing
import pydcgm
import dcgm_structs
import logger
import test_utils
import apps
import time
@test_utils.run_only_on_linux()
@test_utils.run_only_on_bare_metal()
@test_utils.run_with_logging_on()
def test_logging_env_var():
"""
Verifies that we log to the supplied env var
"""
if test_utils.loggingLevel != 'DEBUG':
test_utils.skip_test("Detected logLevel != DEBUG. This test requires DEBUG. Likely cause: --eris option")
passed = False
# Env var is automatically set in NvHostEngineApp
nvhost_engine = apps.NvHostEngineApp()
nvhost_engine.start(timeout=10)
contents = None
# Try for 5 seconds
for i in range(25):
time.sleep(0.2)
with closing(open(nvhost_engine.dcgm_trace_fname, encoding='utf-8')) as f:
# pylint: disable=no-member
contents = f.read()
logger.debug("Read %d bytes from %s" % (len(contents), nvhost_engine.dcgm_trace_fname))
# This is checking two things:
# - that we are logging to the file specified in ENV
# - that we are setting severity according to ENV (DEBUG)
if 'DEBUG' in contents:
passed = True
break
# Cleaning up
nvhost_engine.terminate()
nvhost_engine.validate()
errorString = ""
if (not passed):
if contents is not None:
errorString = "Unable to find 'DEBUG' in log file"
else:
errorString = "log file %s was never read" % nvhost_engine.dcgm_trace_fname
assert passed, errorString
@test_utils.run_with_logging_on()
def test_logging_modules():
"""
Verifies that module logging is functional
"""
if test_utils.loggingLevel != 'DEBUG':
test_utils.skip_test("Detected logLevel != DEBUG. This test requires DEBUG. Likely cause: --eris option")
PASSED = True
FAILED = False
result = FAILED
nvhost_engine = apps.NvHostEngineApp()
nvhost_engine.start(timeout=10)
contents = None
# Try for 5 seconds
for i in range(25):
time.sleep(0.2)
with closing(open(nvhost_engine.dcgm_trace_fname, encoding="utf-8")) as f:
# pylint: disable=no-member
contents = f.read()
logger.debug("Read %d bytes from %s" % (len(contents), nvhost_engine.dcgm_trace_fname))
# NVSwitch module is loaded on startup. So we check for records from that module
test_string = "Initialized logging for module 1"
# Note that if --eris is passsed, we only log at WARNING level
# If logging is not at DEBUG level, then skip the test
if test_string in contents:
result = PASSED
break
# Cleaning up
nvhost_engine.terminate()
nvhost_engine.validate()
errorString = ""
if (result != PASSED):
if contents is not None:
errorString = "Unable to find $test_string in log file"
else:
errorString = "log file %s was never read" % nvhost_engine.dcgm_trace_fname
assert result == PASSED, errorString
| DCGM-master | testing/python3/tests/test_logging.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pydcgm
import dcgm_structs
import dcgm_agent_internal
import dcgm_agent
import logger
import test_utils
import dcgm_fields
import dcgm_internal_helpers
import dcgm_field_injection_helpers
import option_parser
import DcgmDiag
import DcgmHandle
import denylist_recommendations
import threading
import time
import sys
import os
import signal
import utils
import json
from apps.app_runner import AppRunner
def helper_test_denylist_briefly():
# Run a basic test of the denylist script to make sure we don't break compatibility
denylistApp = dcgm_internal_helpers.createDenylistApp(instantaneous=True)
try:
denylistApp.run()
except Exception as e:
assert False, "Exception thrown when running the denylist app: '%s'" % str(e)
try:
output = ""
for line in denylistApp.stdout_lines:
output += "%s\n" % line
jo = json.loads(output)
except Exception as e:
assert False, "Couldn't parse the json output by the denylist. Got exception: %s\noutput\n:%s" % (str(e), output)
@test_utils.run_with_standalone_host_engine(120)
@test_utils.run_with_initialized_client()
@test_utils.run_only_with_live_gpus()
def test_basic_denylisting_script(handle, gpuIds):
helper_test_denylist_briefly()
def helper_test_denylist_checks(handle, gpuIds):
handleObj = DcgmHandle.DcgmHandle(handle=handle)
settings = {}
settings['instant'] = True
settings['entity_get_flags'] = 0
settings['testNames'] = '3'
settings['hostname'] = 'localhost'
settings['watches'] = dcgm_structs.DCGM_HEALTH_WATCH_MEM | dcgm_structs.DCGM_HEALTH_WATCH_PCIE
error_list = []
ret = dcgm_field_injection_helpers.inject_field_value_i64(handle, gpuIds[0],
dcgm_fields.DCGM_FI_DEV_ECC_DBE_VOL_TOTAL, 0, -50)
denylist_recommendations.check_health(handleObj, settings, error_list)
# Make sure the GPUs pass a basic health test before running this test
for gpuObj in denylist_recommendations.g_gpus:
if gpuObj.IsHealthy() == False:
test_utils.skip_test("Skipping because GPU %d is not healthy. " % gpuObj.GetEntityId())
# Inject a memory error and verify that we fail
denylist_recommendations.g_gpus = [] # Reset g_gpus
ret = dcgm_field_injection_helpers.inject_field_value_i64(handle, gpuIds[0],
dcgm_fields.DCGM_FI_DEV_ECC_DBE_VOL_TOTAL, 1000, 10)
assert (ret == dcgm_structs.DCGM_ST_OK)
denylist_recommendations.check_health(handleObj, settings, error_list)
for gpuObj in denylist_recommendations.g_gpus:
if gpuObj.GetEntityId() == gpuIds[0]:
assert gpuObj.IsHealthy() == False, "Injected error didn't trigger a failure on GPU %d" % gpuIds[0]
else:
assert gpuObj.IsHealthy(), "GPU %d reported unhealthy despite not having an inserted error: '%s'" % (gpuIds[0], gpuObj.WhyUnhealthy())
# Remove the memory monitor and make sure we pass our checks
denylist_recommendations.g_gpus = [] # Reset g_gpus
settings['watches'] = dcgm_structs.DCGM_HEALTH_WATCH_PCIE
denylist_recommendations.check_health(handleObj, settings, error_list)
for gpuObj in denylist_recommendations.g_gpus:
if gpuObj.GetEntityId() == gpuIds[0]:
assert gpuObj.IsHealthy(), "Injected error wasn't ignored for GPU %d: %s" % (gpuIds[0], gpuObj.WhyUnhealthy())
else:
assert gpuObj.IsHealthy(), "GPU %d reported unhealthy despite not having an inserted error: '%s'" % (gpuIds[0], gpuObj.WhyUnhealthy())
@test_utils.run_with_embedded_host_engine()
@test_utils.run_only_with_live_gpus()
def test_denylist_checks(handle, gpuIds):
helper_test_denylist_checks(handle, gpuIds)
| DCGM-master | testing/python3/tests/test_denylisting.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pydcgm
import dcgm_structs
import dcgm_agent
from dcgm_structs import DCGM_ST_NOT_SUPPORTED, dcgmExceptionClass
import test_utils
import logger
import os
import option_parser
import time
import dcgm_fields
import dcgm_structs_internal
import dcgm_agent_internal
import DcgmReader
import random
import dcgm_field_helpers
import apps
g_profNotSupportedErrorStr = "Continuous mode profiling is not supported for this GPU group. Either libnvperf_dcgm_host.so isn't in your LD_LIBRARY_PATH or it is not the NDA version that supports DC profiling"
g_moduleNotLoadedErrorStr = "Continuous mode profiling is not supported for this system because the profiling module could not be loaded. This is likely due to libnvperf_dcgm_host.so not being in LD_LIBRARY_PATH"
DLG_MAX_METRIC_GROUPS = 5 #This is taken from modules/profiling/DcgmLopConfig.h. These values need to be in sync for multipass tests to pass
def helper_check_profiling_environment(dcgmGroup):
try:
dcgmGroup.profiling.GetSupportedMetricGroups()
except dcgm_structs.dcgmExceptionClass(dcgm_structs.DCGM_ST_PROFILING_NOT_SUPPORTED) as e:
test_utils.skip_test(g_profNotSupportedErrorStr)
except dcgm_structs.dcgmExceptionClass(dcgm_structs.DCGM_ST_MODULE_NOT_LOADED) as e:
test_utils.skip_test(g_moduleNotLoadedErrorStr)
except dcgm_structs.dcgmExceptionClass(dcgm_structs.DCGM_ST_NOT_SUPPORTED) as e:
test_utils.skip_test(g_profNotSupportedErrorStr)
def helper_get_supported_field_ids(dcgmGroup):
'''
Get a list of the supported fieldIds for the provided DcgmGroup object.
It's important to query this dynamically, as field IDs can vary from chip to chip
and driver version to driver version
'''
fieldIds = []
metricGroups = dcgmGroup.profiling.GetSupportedMetricGroups()
for i in range(metricGroups.numMetricGroups):
for j in range(metricGroups.metricGroups[i].numFieldIds):
fieldIds.append(metricGroups.metricGroups[i].fieldIds[j])
return fieldIds
def helper_get_multipass_field_ids(dcgmGroup):
'''
Get a list of the supported fieldIds for the provided DcgmGroup object that
require multiple passes in the hardware
Returns None if no such combination exists. Otherwise a list of lists
where the first dimension is groups of fields that are exclusive with each other.
the second dimension are the fieldIds within an exclusive group.
'''
exclusiveFields = {} #Key by major ID
#First, look for two metric groups that have the same major version but different minor version
#That is the sign of being multi-pass
metricGroups = dcgmGroup.profiling.GetSupportedMetricGroups()
for i in range(metricGroups.numMetricGroups):
majorId = metricGroups.metricGroups[i].majorId
if majorId not in exclusiveFields:
exclusiveFields[majorId] = []
fieldIds = metricGroups.metricGroups[i].fieldIds[0:metricGroups.metricGroups[i].numFieldIds]
exclusiveFields[majorId].append(fieldIds)
#See if any groups have > 1 element. Volta and turing only have one multi-pass group, so we
#can just return one if we find it
for group in list(exclusiveFields.values()):
if len(group) > 1:
return group
return None
def helper_get_single_pass_field_ids(dcgmGroup):
'''
Get a list of the supported fieldIds for the provided DcgmGroup object that can
be watched at the same time
Returns None if no field IDs are supported
'''
fieldIds = []
#Try to return the largest single-pass group
largestMetricGroupIndex = None
largestMetricGroupCount = 0
metricGroups = dcgmGroup.profiling.GetSupportedMetricGroups()
for i in range(metricGroups.numMetricGroups):
if metricGroups.metricGroups[i].numFieldIds > largestMetricGroupCount:
largestMetricGroupIndex = i
largestMetricGroupCount = metricGroups.metricGroups[i].numFieldIds
if largestMetricGroupIndex is None:
return None
for i in range(metricGroups.metricGroups[largestMetricGroupIndex].numFieldIds):
fieldIds.append(metricGroups.metricGroups[largestMetricGroupIndex].fieldIds[i])
return fieldIds
@test_utils.run_with_embedded_host_engine()
@test_utils.run_only_with_live_gpus()
@test_utils.exclude_confidential_compute_gpus()
@test_utils.run_only_if_gpus_available()
@test_utils.for_all_same_sku_gpus()
def test_dcgm_prof_get_supported_metric_groups_sanity(handle, gpuIds):
dcgmHandle = pydcgm.DcgmHandle(handle=handle)
dcgmSystem = dcgmHandle.GetSystem()
dcgmGroup = dcgmSystem.GetGroupWithGpuIds('mygroup', gpuIds)
helper_check_profiling_environment(dcgmGroup)
@test_utils.run_with_embedded_host_engine()
@test_utils.run_only_with_live_gpus()
@test_utils.exclude_confidential_compute_gpus()
@test_utils.run_only_if_gpus_available()
@test_utils.for_all_same_sku_gpus()
@test_utils.run_only_as_root()
def test_dcgm_prof_watch_fields_sanity(handle, gpuIds):
dcgmHandle = pydcgm.DcgmHandle(handle=handle)
dcgmSystem = dcgmHandle.GetSystem()
dcgmGroup = dcgmSystem.GetGroupWithGpuIds('mygroup', gpuIds)
helper_check_profiling_environment(dcgmGroup)
fieldIds = helper_get_single_pass_field_ids(dcgmGroup)
assert fieldIds is not None
logger.info("Single pass field IDs: " + str(fieldIds))
fieldGroup = pydcgm.DcgmFieldGroup(dcgmHandle, "my_field_group", fieldIds)
dcgmGroup.samples.WatchFields(fieldGroup, 1000000, 3600.0, 0)
#Throws an exception on error
dcgmGroup.samples.WatchFields(fieldGroup, 1000000, 3600.0, 0)
#Cleanup
dcgmGroup.samples.UnwatchFields(fieldGroup)
dcgmGroup.Delete()
@test_utils.run_with_embedded_host_engine()
@test_utils.run_only_with_live_gpus()
@test_utils.exclude_confidential_compute_gpus()
@test_utils.run_only_if_gpus_available()
@test_utils.run_only_as_root()
@test_utils.run_with_injection_gpus(gpuCount=2) # Injecting fake GPUs to simulate not supported SKUs
@test_utils.run_for_each_gpu_individually()
def test_dcgm_prof_all_supported_fields_watchable(handle, gpuId):
'''
Verify that all fields that are reported as supported are watchable and
that values can be returned for them
'''
dcgmHandle = pydcgm.DcgmHandle(handle=handle)
dcgmSystem = dcgmHandle.GetSystem()
dcgmGroup = dcgmSystem.GetGroupWithGpuIds('mygroup', [gpuId])
helper_check_profiling_environment(dcgmGroup)
fieldIds = helper_get_supported_field_ids(dcgmGroup)
assert fieldIds is not None
watchFreq = 1000 #1 ms
maxKeepAge = 60.0
maxKeepSamples = 0
maxAgeUsec = int(maxKeepAge) * watchFreq
entityPairList = [dcgm_structs.c_dcgmGroupEntityPair_t(dcgm_fields.DCGM_FE_GPU, gpuId)]
for fieldId in fieldIds:
fieldGroup = pydcgm.DcgmFieldGroup(dcgmHandle, "my_field_group_%d" % fieldId, [fieldId, ])
# If there are only one unsupported SKUs in the group, WatchFields should return an error.
# If at least one GPU in the group is supported, WatchFields will be successful.
# The described logic is used to skip unsupported or fake SKUs.
try:
dcgmGroup.samples.WatchFields(fieldGroup, watchFreq, maxKeepAge, maxKeepSamples)
except:
fieldGroup.Delete()
test_utils.skip_test_supported("DCP")
# Sending a request to the profiling manager guarantees that an update cycle has happened since
# the last request
dcgmGroup.profiling.GetSupportedMetricGroups()
# validate watch freq, quota, and watched flags
cmfi = dcgm_agent_internal.dcgmGetCacheManagerFieldInfo(handle, gpuId, dcgm_fields.DCGM_FE_GPU, fieldId)
assert (cmfi.flags & dcgm_structs_internal.DCGM_CMI_F_WATCHED) != 0, "gpuId %u, fieldId %u not watched" % (gpuId, fieldId)
assert cmfi.numSamples > 0
assert cmfi.numWatchers == 1, "numWatchers %d" % cmfi.numWatchers
assert cmfi.monitorIntervalUsec == watchFreq, "monitorIntervalUsec %u != watchFreq %u" % (cmfi.monitorIntervalUsec, watchFreq)
assert cmfi.lastStatus == dcgm_structs.DCGM_ST_OK, "lastStatus %u != DCGM_ST_OK" % (cmfi.lastStatus)
fieldValues = dcgm_agent.dcgmEntitiesGetLatestValues(handle, entityPairList, [fieldId, ], 0)
for i, fieldValue in enumerate(fieldValues):
logger.debug(str(fieldValue))
assert(fieldValue.status == dcgm_structs.DCGM_ST_OK), "idx %d status was %d" % (i, fieldValue.status)
assert(fieldValue.ts != 0), "idx %d timestamp was 0" % (i)
dcgmGroup.samples.UnwatchFields(fieldGroup)
fieldGroup.Delete()
#Validate watch flags after unwatch
cmfi = dcgm_agent_internal.dcgmGetCacheManagerFieldInfo(handle, gpuId, dcgm_fields.DCGM_FE_GPU, fieldId)
assert (cmfi.flags & dcgm_structs_internal.DCGM_CMI_F_WATCHED) == 0, "gpuId %u, fieldId %u still watched. flags x%X" % (gpuId, fieldId, cmfi.flags)
assert cmfi.numWatchers == 0, "numWatchers %d" % cmfi.numWatchers
@test_utils.run_with_embedded_host_engine()
@test_utils.run_only_with_live_gpus()
@test_utils.exclude_confidential_compute_gpus()
@test_utils.run_only_if_gpus_available()
@test_utils.for_all_same_sku_gpus()
@test_utils.run_only_as_root()
def test_dcgm_prof_watch_multipass(handle, gpuIds):
dcgmHandle = pydcgm.DcgmHandle(handle=handle)
dcgmSystem = dcgmHandle.GetSystem()
dcgmGroup = dcgmSystem.GetGroupWithGpuIds('mygroup', gpuIds)
helper_check_profiling_environment(dcgmGroup)
mpFieldIds = helper_get_multipass_field_ids(dcgmGroup)
if mpFieldIds is None:
test_utils.skip_test("No multipass profiling fields exist for the gpu group")
logger.info("Multipass fieldIds: " + str(mpFieldIds))
#Make sure that multipass watching up to DLG_MAX_METRIC_GROUPS groups works
for i in range(min(len(mpFieldIds), DLG_MAX_METRIC_GROUPS)):
fieldIds = []
for j in range(i+1):
fieldIds.extend(mpFieldIds[j])
logger.info("Positive testing multipass fieldIds %s" % str(fieldIds))
fieldGroup = pydcgm.DcgmFieldGroup(dcgmHandle, "my_field_group_%d" % i, fieldIds)
dcgmGroup.samples.WatchFields(fieldGroup, 1000000, 3600.0, 0)
dcgmGroup.samples.UnwatchFields(fieldGroup)
fieldGroup.Delete()
if len(mpFieldIds) <= DLG_MAX_METRIC_GROUPS:
test_utils.skip_test("Skipping multipass failure test since there are %d <= %d multipass groups." %
(len(mpFieldIds), DLG_MAX_METRIC_GROUPS))
for i in range(DLG_MAX_METRIC_GROUPS+1, len(mpFieldIds)+1):
fieldIds = []
for j in range(i):
fieldIds.extend(mpFieldIds[j])
logger.info("Negative testing multipass fieldIds %s" % str(fieldIds))
fieldGroup = pydcgm.DcgmFieldGroup(dcgmHandle, "my_field_group_%d" % i, fieldIds)
with test_utils.assert_raises(dcgm_structs.dcgmExceptionClass(dcgm_structs.DCGM_ST_PROFILING_MULTI_PASS)):
dcgmGroup.samples.WatchFields(fieldGroup, 1000000, 3600.0, 0)
dcgmGroup.samples.UnwatchFields(fieldGroup)
fieldGroup.Delete()
@test_utils.run_with_standalone_host_engine(20)
@test_utils.run_with_initialized_client()
@test_utils.run_only_with_live_gpus()
@test_utils.exclude_confidential_compute_gpus()
@test_utils.run_only_if_gpus_available()
@test_utils.for_all_same_sku_gpus()
@test_utils.run_only_as_root()
def test_dcgm_prof_watch_fields_multi_user(handle, gpuIds):
dcgmHandle = pydcgm.DcgmHandle(ipAddress="127.0.0.1")
dcgmSystem = dcgmHandle.GetSystem()
dcgmGroup = dcgmSystem.GetGroupWithGpuIds('mygroup', gpuIds)
helper_check_profiling_environment(dcgmGroup)
dcgmHandle2 = pydcgm.DcgmHandle(ipAddress="127.0.0.1")
dcgmSystem2 = dcgmHandle2.GetSystem()
dcgmGroup2 = dcgmSystem2.GetGroupWithGpuIds('mygroup2', gpuIds)
helper_check_profiling_environment(dcgmGroup)
fieldIds = helper_get_single_pass_field_ids(dcgmGroup)
assert fieldIds is not None
fieldGroup = pydcgm.DcgmFieldGroup(dcgmHandle, "my_field_group_0", fieldIds)
fieldGroup2 = pydcgm.DcgmFieldGroup(dcgmHandle, "my_field_group_2", fieldIds)
# Take ownership of the profiling watches
dcgmGroup.samples.WatchFields(fieldGroup, 1000000, 3600.0, 0)
dcgmGroup2.samples.WatchFields(fieldGroup2, 1000000, 3600.0, 0)
# Release the watches
dcgmGroup2.samples.UnwatchFields(fieldGroup2)
dcgmGroup.samples.UnwatchFields(fieldGroup)
# Now dcgmHandle2 owns the watches
dcgmGroup2.samples.WatchFields(fieldGroup2, 1000000, 3600.0, 0)
# connection 1 should not fail to acquire the watches
dcgmGroup.samples.WatchFields(fieldGroup, 1000000, 3600.0, 0)
dcgmGroup2.samples.UnwatchFields(fieldGroup2)
dcgmGroup.samples.UnwatchFields(fieldGroup)
fieldGroup.Delete()
fieldGroup2.Delete()
dcgmHandle.Shutdown()
dcgmHandle2.Shutdown()
@test_utils.run_with_standalone_host_engine(20)
@test_utils.run_with_initialized_client()
@test_utils.run_only_with_live_gpus()
@test_utils.exclude_confidential_compute_gpus()
@test_utils.run_only_if_gpus_available()
@test_utils.for_all_same_sku_gpus()
@test_utils.run_only_as_root()
def test_dcgm_prof_with_dcgmreader(handle, gpuIds):
"""
Verifies that we can access profiling data with DcgmReader, which is the
base class for dcgm exporters
"""
dcgmHandle = pydcgm.DcgmHandle(handle)
dcgmSystem = dcgmHandle.GetSystem()
dcgmGroup = dcgmSystem.GetGroupWithGpuIds('mygroup', gpuIds)
helper_check_profiling_environment(dcgmGroup)
fieldIds = helper_get_single_pass_field_ids(dcgmGroup)
updateFrequencyUsec = 200000 # 200ms
sleepTime = updateFrequencyUsec / 1000000 * 2 # Convert to seconds and sleep twice as long; ensures fresh sample
dr = DcgmReader.DcgmReader(fieldIds=fieldIds, updateFrequency=updateFrequencyUsec, maxKeepAge=30.0, gpuIds=gpuIds)
dr.SetHandle(handle)
for i in range(5):
time.sleep(sleepTime)
latest = dr.GetLatestGpuValuesAsFieldIdDict()
logger.info(str(latest))
for gpuId in gpuIds:
if len(latest[gpuId]) != len(fieldIds):
missingFieldIds = []
extraFieldIds = []
for fieldId in fieldIds:
if fieldId not in latest[gpuId]:
missingFieldIds.append(fieldId)
for fieldId in latest[gpuId]:
if fieldId not in fieldIds:
extraFieldIds.append(fieldId)
errmsg = "i=%d, gpuId %d, len %d != %d" % (i, gpuId, len(latest[gpuId]), len(fieldIds))
if len(missingFieldIds) > 0:
errmsg = errmsg + " GPU is missing entries for fields %s" % str(missingFieldIds)
if len(extraFieldIds) > 0:
errmsg = errmsg + " GPU has extra entries for fields %s" % str(extraFieldIds)
assert len(latest[gpuId]) == len(fieldIds), errmsg
@test_utils.run_with_embedded_host_engine()
@test_utils.run_only_with_live_gpus()
@test_utils.exclude_confidential_compute_gpus()
@test_utils.run_only_if_gpus_available()
@test_utils.for_all_same_sku_gpus()
@test_utils.run_only_as_root()
def test_dcgm_prof_initial_valid_record(handle, gpuIds):
'''
Test that we can retrieve a valid FV for a profiling field immediately after watching
'''
dcgmHandle = pydcgm.DcgmHandle(handle=handle)
dcgmSystem = dcgmHandle.GetSystem()
dcgmGroup = dcgmSystem.GetGroupWithGpuIds('mygroup', gpuIds)
helper_check_profiling_environment(dcgmGroup)
fieldIds = helper_get_single_pass_field_ids(dcgmGroup)
assert fieldIds is not None
fieldGroup = pydcgm.DcgmFieldGroup(dcgmHandle, "my_field_group_0", fieldIds)
#Set watches using a large interval so we don't get a record for 10 seconds in the bug case
dcgmGroup.samples.WatchFields(fieldGroup, 10000000, 3600.0, 0)
gpuId = gpuIds[0]
fieldValues = dcgm_agent.dcgmEntityGetLatestValues(handle, dcgm_fields.DCGM_FE_GPU, gpuId, fieldIds)
assert len(fieldValues) == len(fieldIds), "%d != %d" % (len(fieldValues), len(fieldIds))
for i, fieldValue in enumerate(fieldValues):
logger.info(str(fieldValue))
assert(fieldValue.version != 0), "idx %d Version was 0" % i
assert(fieldValue.fieldId == fieldIds[i]), "idx %d fieldValue.fieldId %d != fieldIds[i] %d" % (i, fieldValue.fieldId, fieldIds[i])
assert(fieldValue.status == dcgm_structs.DCGM_ST_OK), "idx %d status was %d" % (i, fieldValue.status)
#The following line catches the bug in Jira DCGM-1357. Previously, a record would be returned with a
#0 timestamp
assert(fieldValue.ts != 0), "idx %d timestamp was 0" % i
#Cleanup
dcgmGroup.samples.UnwatchFields(fieldGroup)
fieldGroup.Delete()
@test_utils.run_with_embedded_host_engine()
@test_utils.run_only_with_live_gpus()
@test_utils.exclude_confidential_compute_gpus()
@test_utils.run_only_if_gpus_available()
@test_utils.for_all_same_sku_gpus()
def test_dcgm_prof_multi_pause_resume(handle, gpuIds):
'''
Test that we can pause and resume profiling over and over without error
'''
dcgmHandle = pydcgm.DcgmHandle(handle=handle)
dcgmSystem = dcgmHandle.GetSystem()
dcgmGroup = dcgmSystem.GetGroupWithGpuIds('mygroup', gpuIds)
#GPM-enabled GPUs don't support pause/resume
if test_utils.gpu_supports_gpm(handle, gpuIds[0]):
with test_utils.assert_raises(dcgm_structs.dcgmExceptionClass(dcgm_structs.DCGM_ST_NOT_SUPPORTED)):
dcgmSystem.profiling.Pause()
with test_utils.assert_raises(dcgm_structs.dcgmExceptionClass(dcgm_structs.DCGM_ST_NOT_SUPPORTED)):
dcgmSystem.profiling.Resume()
return
helper_check_profiling_environment(dcgmGroup)
#We should never get an error back from pause or resume. Pause and Resume throw exceptions on error
numPauses = 0
numResumes = 0
for i in range(100):
#Flip a coin and pause if we get 0. unpause otherwise (1)
coin = random.randint(0,1)
if coin == 0:
dcgmSystem.profiling.Pause()
numPauses += 1
else:
dcgmSystem.profiling.Resume()
numResumes += 1
logger.info("Got %d pauses and %d resumes" % (numPauses, numResumes))
@test_utils.run_with_embedded_host_engine()
@test_utils.run_only_with_live_gpus()
@test_utils.exclude_confidential_compute_gpus()
@test_utils.run_only_if_gpus_available()
@test_utils.for_all_same_sku_gpus()
@test_utils.run_only_as_root()
def test_dcgm_prof_pause_resume_values(handle, gpuIds):
'''
Test that we get valid values when profiling is resumed and BLANK values when profiling is paused
'''
dcgmHandle = pydcgm.DcgmHandle(handle=handle)
dcgmSystem = dcgmHandle.GetSystem()
dcgmGroup = dcgmSystem.GetGroupWithGpuIds('mygroup', gpuIds)
#GPM-enabled GPUs don't support pause/resume
if test_utils.gpu_supports_gpm(handle, gpuIds[0]):
with test_utils.assert_raises(dcgm_structs.dcgmExceptionClass(dcgm_structs.DCGM_ST_NOT_SUPPORTED)):
dcgmSystem.profiling.Pause()
with test_utils.assert_raises(dcgm_structs.dcgmExceptionClass(dcgm_structs.DCGM_ST_NOT_SUPPORTED)):
dcgmSystem.profiling.Resume()
return
helper_check_profiling_environment(dcgmGroup)
fieldIds = helper_get_single_pass_field_ids(dcgmGroup)
assert fieldIds is not None
#10 ms watches so we can test quickly
watchIntervalUsec = 10000
sleepIntervalSec = 0.1 * len(gpuIds) #100 ms per GPU
#Start paused. All the other tests start unpaused
dcgmSystem.profiling.Pause()
fieldGroup = pydcgm.DcgmFieldGroup(dcgmHandle, "my_field_group_0", fieldIds)
dcgmGroup.samples.WatchFields(fieldGroup, watchIntervalUsec, 60.0, 0)
gpuId = gpuIds[0]
fieldValues = dcgm_agent.dcgmEntityGetLatestValues(handle, dcgm_fields.DCGM_FE_GPU, gpuId, fieldIds)
assert len(fieldValues) == len(fieldIds), "%d != %d" % (len(fieldValues), len(fieldIds))
#All should be blank
for i, fieldValue in enumerate(fieldValues):
fv = dcgm_field_helpers.DcgmFieldValue(fieldValue)
assert fv.isBlank, "Got nonblank fv index %d" % i
#Resume. All should be valid
dcgmSystem.profiling.Resume()
time.sleep(sleepIntervalSec)
fieldValues = dcgm_agent.dcgmEntityGetLatestValues(handle, dcgm_fields.DCGM_FE_GPU, gpuId, fieldIds)
assert len(fieldValues) == len(fieldIds), "%d != %d" % (len(fieldValues), len(fieldIds))
#All should be non-blank
for i, fieldValue in enumerate(fieldValues):
fv = dcgm_field_helpers.DcgmFieldValue(fieldValue)
assert not fv.isBlank, "Got blank fv index %d" % i
#Pause again. All should be blank
dcgmSystem.profiling.Pause()
time.sleep(sleepIntervalSec)
fieldValues = dcgm_agent.dcgmEntityGetLatestValues(handle, dcgm_fields.DCGM_FE_GPU, gpuId, fieldIds)
assert len(fieldValues) == len(fieldIds), "%d != %d" % (len(fieldValues), len(fieldIds))
#All should be blank
for i, fieldValue in enumerate(fieldValues):
fv = dcgm_field_helpers.DcgmFieldValue(fieldValue)
assert fv.isBlank, "Got nonblank fv index %d" % i
#This shouldn't fail
dcgmSystem.profiling.Resume()
dcgmGroup.samples.UnwatchFields(fieldGroup)
fieldGroup.Delete()
def helper_test_dpt_field_ids(handle, gpuIds, fieldIdsStr, extraArgs = None):
'''
Test that we can retrieve a valid FV for a profiling field or fields
immediately after watching
'''
dcgmHandle = pydcgm.DcgmHandle(handle=handle)
dcgmSystem = dcgmHandle.GetSystem()
dcgmGroup = dcgmSystem.GetGroupWithGpuIds('mygroup', gpuIds)
helper_check_profiling_environment(dcgmGroup)
cudaDriverVersion = test_utils.get_cuda_driver_version(handle, gpuIds[0])
supportedFieldIds = helper_get_supported_field_ids(dcgmGroup)
# Just test the first GPU of our SKU. Other tests will cover multiple SKUs
useGpuIds = [gpuIds[0], ]
args = ["--target-max-value", "--no-dcgm-validation", "--dvs", "--reset", "--mode", "validate", "-d", "5.0", "-r", "0.25", "--sync-count", "5", "-w", "5", "-t", fieldIdsStr]
if extraArgs is not None:
args.extend(extraArgs)
app = apps.DcgmProfTesterApp(cudaDriverMajorVersion=cudaDriverVersion[0], gpuIds=useGpuIds, args=args)
app.start(timeout=120.0 * len(gpuIds)) #Account for slow systems but still add an upper bound
app.wait()
def helper_test_dpt_field_id(handle, gpuIds, fieldId, extraArgs = None):
'''
Test that we can retrieve a valid FV for a profiling field immediately after watching
'''
helper_test_dpt_field_ids(handle, gpuIds, str(fieldId), extraArgs)
def helper_test_dpt_field_fast_id(handle, gpuIds, fieldId, extraArgs = None):
'''
Test that we can retrieve a valid FV for a profiling field immediately after watching
'''
dcgmHandle = pydcgm.DcgmHandle(handle=handle)
dcgmSystem = dcgmHandle.GetSystem()
dcgmGroup = dcgmSystem.GetGroupWithGpuIds('mygroup', gpuIds)
helper_check_profiling_environment(dcgmGroup)
cudaDriverVersion = test_utils.get_cuda_driver_version(handle, gpuIds[0])
supportedFieldIds = helper_get_supported_field_ids(dcgmGroup)
# Just test the first GPU of our SKU. Other tests will cover multiple SKUs
useGpuIds = [gpuIds[0], ]
args = ["--target-max-value", "--no-dcgm-validation", "--dvs", "--reset", "--mode", "validate,fast", "-d", "15.0", "-r", "1.0", "--sync-count", "5", "-w", "5", "-t", str(fieldId)]
if extraArgs is not None:
args.extend(extraArgs)
app = apps.DcgmProfTesterApp(cudaDriverMajorVersion=cudaDriverVersion[0], gpuIds=useGpuIds, args=args)
app.start(timeout=120.0 * len(gpuIds)) #Account for slow systems but still add an upper bound
app.wait()
def helper_test_dpt_h(handle, gpuIds):
'''
Test that -h command line argument works.
'''
dcgmHandle = pydcgm.DcgmHandle(handle=handle)
dcgmSystem = dcgmHandle.GetSystem()
dcgmGroup = dcgmSystem.GetGroupWithGpuIds('mygroup', gpuIds)
helper_check_profiling_environment(dcgmGroup)
cudaDriverVersion = test_utils.get_cuda_driver_version(handle, gpuIds[0])
supportedFieldIds = helper_get_supported_field_ids(dcgmGroup)
#Just test the first GPU of our SKU. Other tests will cover multiple SKUs
useGpuIds = [gpuIds[0], ]
args = ["-h"]
app = apps.DcgmProfTesterApp(cudaDriverMajorVersion=cudaDriverVersion[0], gpuIds=useGpuIds, args=args)
app.start(timeout=120.0 * len(gpuIds)) #Account for slow systems but still add an upper bound
app.wait()
def helper_test_dpt_help(handle, gpuIds):
'''
Test that command line --help argument works.
'''
dcgmHandle = pydcgm.DcgmHandle(handle=handle)
dcgmSystem = dcgmHandle.GetSystem()
dcgmGroup = dcgmSystem.GetGroupWithGpuIds('mygroup', gpuIds)
helper_check_profiling_environment(dcgmGroup)
cudaDriverVersion = test_utils.get_cuda_driver_version(handle, gpuIds[0])
supportedFieldIds = helper_get_supported_field_ids(dcgmGroup)
#Just test the first GPU of our SKU. Other tests will cover multiple SKUs
useGpuIds = [gpuIds[0], ]
args = ["--help"]
app = apps.DcgmProfTesterApp(cudaDriverMajorVersion=cudaDriverVersion[0], gpuIds=useGpuIds, args=args)
app.start(timeout=120.0 * len(gpuIds)) #Account for slow systems but still add an upper bound
app.wait()
@test_utils.run_with_embedded_host_engine()
@test_utils.run_only_with_live_gpus()
@test_utils.exclude_confidential_compute_gpus()
@test_utils.run_only_if_gpus_available()
@test_utils.for_all_same_sku_gpus()
@test_utils.run_only_as_root()
def test_dcgmproftester_gr_active(handle, gpuIds):
helper_test_dpt_field_id(handle, gpuIds, dcgm_fields.DCGM_FI_PROF_GR_ENGINE_ACTIVE)
@test_utils.run_with_embedded_host_engine()
@test_utils.run_only_with_live_gpus()
@test_utils.exclude_confidential_compute_gpus()
@test_utils.run_only_if_gpus_available()
@test_utils.for_all_same_sku_gpus()
@test_utils.run_only_as_root()
def test_dcgmproftester_h(handle, gpuIds):
helper_test_dpt_h(handle, gpuIds)
@test_utils.run_with_embedded_host_engine()
@test_utils.run_only_with_live_gpus()
@test_utils.exclude_confidential_compute_gpus()
@test_utils.run_only_if_gpus_available()
@test_utils.for_all_same_sku_gpus()
@test_utils.run_only_as_root()
def test_dcgmproftester_help(handle, gpuIds):
helper_test_dpt_help(handle, gpuIds)
@test_utils.run_with_embedded_host_engine()
@test_utils.run_only_with_live_gpus()
@test_utils.exclude_confidential_compute_gpus()
@test_utils.run_only_if_gpus_available()
@test_utils.for_all_same_sku_gpus()
@test_utils.run_only_as_root()
def test_dcgmproftester_sm_active(handle, gpuIds):
helper_test_dpt_field_id(handle, gpuIds, dcgm_fields.DCGM_FI_PROF_SM_ACTIVE)
@test_utils.run_with_embedded_host_engine()
@test_utils.run_only_with_live_gpus()
@test_utils.exclude_confidential_compute_gpus()
@test_utils.run_only_if_gpus_available()
@test_utils.for_all_same_sku_gpus()
@test_utils.run_only_as_root()
def test_dcgmproftester_sm_occupancy(handle, gpuIds):
helper_test_dpt_field_id(handle, gpuIds, dcgm_fields.DCGM_FI_PROF_SM_OCCUPANCY)
@test_utils.run_with_embedded_host_engine()
@test_utils.run_only_with_live_gpus()
@test_utils.exclude_confidential_compute_gpus()
@test_utils.run_only_if_gpus_available()
@test_utils.exclude_non_compute_gpus()
@test_utils.for_all_same_sku_gpus()
@test_utils.run_only_as_root()
def test_dcgmproftester_tensor_active(handle, gpuIds):
helper_test_dpt_field_id(handle, gpuIds, dcgm_fields.DCGM_FI_PROF_PIPE_TENSOR_ACTIVE)
@test_utils.run_with_embedded_host_engine()
@test_utils.run_only_with_live_gpus()
@test_utils.exclude_confidential_compute_gpus()
@test_utils.for_all_same_sku_gpus()
@test_utils.filter_sku("2322 2324 20F5 20F3")
@test_utils.run_only_if_gpus_available()
@test_utils.run_only_as_root()
def test_dcgmproftester_fp64_active(handle, gpuIds):
helper_test_dpt_field_id(handle, gpuIds, dcgm_fields.DCGM_FI_PROF_PIPE_FP64_ACTIVE)
@test_utils.run_with_embedded_host_engine()
@test_utils.run_only_with_live_gpus()
@test_utils.exclude_confidential_compute_gpus()
@test_utils.run_only_if_gpus_available()
@test_utils.for_all_same_sku_gpus()
@test_utils.run_only_as_root()
def test_dcgmproftester_fp32_active(handle, gpuIds):
helper_test_dpt_field_id(handle, gpuIds, dcgm_fields.DCGM_FI_PROF_PIPE_FP32_ACTIVE)
@test_utils.run_with_embedded_host_engine()
@test_utils.run_only_with_live_gpus()
@test_utils.exclude_confidential_compute_gpus()
@test_utils.run_only_if_gpus_available()
@test_utils.for_all_same_sku_gpus()
@test_utils.run_only_as_root()
def test_dcgmproftester_fp16_active(handle, gpuIds):
helper_test_dpt_field_id(handle, gpuIds, dcgm_fields.DCGM_FI_PROF_PIPE_FP16_ACTIVE)
@test_utils.run_with_embedded_host_engine()
@test_utils.run_only_with_live_gpus()
@test_utils.exclude_confidential_compute_gpus()
@test_utils.run_only_if_gpus_available()
@test_utils.for_all_same_sku_gpus()
@test_utils.run_only_as_root()
def test_dcgmproftester_pcie_rx(handle, gpuIds):
helper_test_dpt_field_fast_id(handle, gpuIds, dcgm_fields.DCGM_FI_PROF_PCIE_RX_BYTES, ["--percent-tolerance", "20.0"])
@test_utils.run_with_embedded_host_engine()
@test_utils.run_only_with_live_gpus()
@test_utils.exclude_confidential_compute_gpus()
@test_utils.run_only_if_gpus_available()
@test_utils.for_all_same_sku_gpus()
@test_utils.run_only_as_root()
def test_dcgmproftester_pcie_tx(handle, gpuIds):
helper_test_dpt_field_fast_id(handle, gpuIds, dcgm_fields.DCGM_FI_PROF_PCIE_TX_BYTES)
def dont_test_slower_gpus(handle, gpuIds):
# These GPU ids don't need to be tested
lower_bandwidth_ids = [ 0x20f5, 0x20f6 ]
for gpuId in gpuIds:
deviceId = test_utils.get_device_id(handle, gpuId)
if deviceId in lower_bandwidth_ids:
test_utils.skip_test("Skipping the nvlink bandwidth tests for device id: '%s'" % deviceId)
@test_utils.run_with_embedded_host_engine()
@test_utils.run_only_with_live_gpus()
@test_utils.exclude_confidential_compute_gpus()
@test_utils.run_only_if_gpus_available()
@test_utils.for_all_same_sku_gpus()
@test_utils.run_only_as_root()
@test_utils.run_only_if_mig_is_disabled()
def test_dcgmproftester_nvlink_rx(handle, gpuIds):
dont_test_slower_gpus(handle, gpuIds)
helper_test_dpt_field_id(handle, gpuIds, dcgm_fields.DCGM_FI_PROF_NVLINK_RX_BYTES)
@test_utils.run_with_embedded_host_engine()
@test_utils.run_only_with_live_gpus()
@test_utils.exclude_confidential_compute_gpus()
@test_utils.run_only_if_gpus_available()
@test_utils.for_all_same_sku_gpus()
@test_utils.run_only_as_root()
@test_utils.run_only_if_mig_is_disabled()
def test_dcgmproftester_nvlink_tx(handle, gpuIds):
dont_test_slower_gpus(handle, gpuIds)
helper_test_dpt_field_id(handle, gpuIds, dcgm_fields.DCGM_FI_PROF_NVLINK_TX_BYTES)
@test_utils.run_with_embedded_host_engine()
@test_utils.run_only_with_live_gpus()
@test_utils.exclude_confidential_compute_gpus()
@test_utils.run_only_if_gpus_available()
@test_utils.for_all_same_sku_gpus()
@test_utils.run_only_as_root()
@test_utils.run_only_if_mig_is_disabled()
def test_dcgmproftester_nvlink_and_other(handle, gpuIds):
'''
This added to verify the fix for
https://nvbugswb.nvidia.com/NvBugs5/SWBug.aspx?bugid=3903747
'''
dont_test_slower_gpus(handle, gpuIds)
helper_test_dpt_field_ids(handle, gpuIds, str(dcgm_fields.DCGM_FI_PROF_SM_ACTIVE) + "," + str(dcgm_fields.DCGM_FI_PROF_NVLINK_TX_BYTES))
@test_utils.run_with_embedded_host_engine()
@test_utils.run_only_with_live_gpus()
@test_utils.exclude_confidential_compute_gpus()
@test_utils.run_only_if_gpus_available()
@test_utils.for_all_same_sku_gpus()
@test_utils.run_only_as_root()
def test_dcgmproftester_parallel_gpus(handle, gpuIds):
'''
Test that we can successfully read dcgmproftester metrics multiple concurrent GPUs
This tests a few things:
1. That metrics work for more than GPU 0
2. That metrics work for multiple GPUs at a time
'''
if len(gpuIds) < 2:
test_utils.skip_test("Skipping multi-GPU test since there's only one of this SKU")
dcgmHandle = pydcgm.DcgmHandle(handle=handle)
dcgmSystem = dcgmHandle.GetSystem()
dcgmGroup = dcgmSystem.GetGroupWithGpuIds('mygroup', gpuIds)
helper_check_profiling_environment(dcgmGroup)
cudaDriverVersion = test_utils.get_cuda_driver_version(handle, gpuIds[0])
#Graphics activity works for every GPU that supports DCP. It also works reliably even under heavy concurrecy
fieldIds = "1001"
args = ["--mode", "validate", "-d", "15.0", "-r", "1.0", "--sync-count", "5", "-w", "10", "-t", fieldIds]
app = apps.DcgmProfTesterApp(cudaDriverMajorVersion=cudaDriverVersion[0], gpuIds=gpuIds, args=args)
app.start(timeout=120.0 * len(gpuIds)) #Account for slow systems but still add an upper bound
app.wait()
app.validate() #Validate here so that errors are printed when they occur instead of at the end of the test
@test_utils.run_with_embedded_host_engine()
@test_utils.run_only_with_live_gpus()
@test_utils.exclude_confidential_compute_gpus()
@test_utils.run_only_if_gpus_available()
@test_utils.for_all_same_sku_gpus()
@test_utils.run_only_as_root()
def test_dcgm_prof_global_pause_resume_values(handle, gpuIds):
"""
Test that we get valid values when DCGM is resumed and BLANK values when DCGM is paused
"""
dcgmHandle = pydcgm.DcgmHandle(handle=handle)
dcgmSystem = dcgmHandle.GetSystem()
dcgmGroup = dcgmSystem.GetGroupWithGpuIds('mygroup', gpuIds)
# GPM-enabled GPUs would get DCP metrics from the NVML instead of the paused profiling module and will return
# valid values when DCGM is paused until we implement full driver detach/reattach on pause/resume.
if test_utils.gpu_supports_gpm(handle, gpuIds[0]):
test_utils.skip_test("Skipping test for GPM-enabled GPUs")
helper_check_profiling_environment(dcgmGroup)
fieldIds = helper_get_single_pass_field_ids(dcgmGroup)
assert fieldIds is not None
# 10 ms watches so we can test quickly
watchIntervalUsec = 10000
sleepIntervalSec = 0.1 * len(gpuIds) # 100 ms per GPU
# Start paused. All the other tests start unpaused
dcgmSystem.PauseTelemetryForDiag()
fieldGroup = pydcgm.DcgmFieldGroup(dcgmHandle, "my_field_group_0", fieldIds)
dcgmGroup.samples.WatchFields(fieldGroup, watchIntervalUsec, 60.0, 0)
gpuId = gpuIds[0]
fieldValues = dcgm_agent.dcgmEntityGetLatestValues(handle, dcgm_fields.DCGM_FE_GPU, gpuId, fieldIds)
assert len(fieldValues) == len(fieldIds), "%d != %d" % (len(fieldValues), len(fieldIds))
# All should be blank
for i, fieldValue in enumerate(fieldValues):
fv = dcgm_field_helpers.DcgmFieldValue(fieldValue)
assert fv.isBlank, "Got nonblank fv index %d" % i
# Resume. All should be valid
dcgmSystem.ResumeTelemetryForDiag()
time.sleep(sleepIntervalSec)
fieldValues = dcgm_agent.dcgmEntityGetLatestValues(handle, dcgm_fields.DCGM_FE_GPU, gpuId, fieldIds)
assert len(fieldValues) == len(fieldIds), "%d != %d" % (len(fieldValues), len(fieldIds))
# All should be non-blank
for i, fieldValue in enumerate(fieldValues):
fv = dcgm_field_helpers.DcgmFieldValue(fieldValue)
assert not fv.isBlank, "Got blank fv index %d" % i
# Pause again. All should be blank
dcgmSystem.PauseTelemetryForDiag()
time.sleep(sleepIntervalSec)
fieldValues = dcgm_agent.dcgmEntityGetLatestValues(handle, dcgm_fields.DCGM_FE_GPU, gpuId, fieldIds)
assert len(fieldValues) == len(fieldIds), "%d != %d" % (len(fieldValues), len(fieldIds))
# All should be blank
for i, fieldValue in enumerate(fieldValues):
fv = dcgm_field_helpers.DcgmFieldValue(fieldValue)
assert fv.isBlank, "Got nonblank fv index %d" % i
# This shouldn't fail
dcgmSystem.ResumeTelemetryForDiag()
dcgmGroup.samples.UnwatchFields(fieldGroup)
fieldGroup.Delete()
| DCGM-master | testing/python3/tests/test_prof.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pydcgm
import dcgm_structs
import dcgm_agent
from dcgm_structs import dcgmExceptionClass
import test_utils
import logger
import os
import option_parser
import DcgmDiag
import json
import os.path
def load_json_stats_file(filename, logContentsOnError=True):
with open(filename) as json_file:
data = json.load(json_file)
if data == None and logContentsOnError:
logger.error("Unable to load json file %s. File contents: %s" % (filename, json_file.read()))
return data
raise "Couldn't open stats file %s" % filename
def helper_basic_stats_file_check(statsFile, gpuIds, statName):
try:
json_data = load_json_stats_file(statsFile)
finally:
os.remove(statsFile);
assert json_data != None, "Could not load json data from the stats file"
foundGpuIds = []
foundStatName = False
if not statName:
foundStatName = True
for stat_names in json_data["GPUS"]:
for stat in stat_names:
# Make sure the requested stat name is found
if foundStatName == False:
foundStatName = statName == stat
prev_timestamps = []
gpuId = stat_names["gpuId"]
if stat == "gpuId":
foundGpuIds.append(int(gpuId))
continue
for value in stat_names[stat]:
# Make sure no timestamps are repeated
if len(prev_timestamps) > 0:
assert value['timestamp'] > prev_timestamps[-1], \
"GPU %s, field %s has out of order timestamps: %s then %s" % \
(gpuId, stat, prev_timestamps[-1], value['timestamp'])
prev_timestamps.append(value['timestamp'])
assert foundStatName == True, "Expected to find stat '%s', but it was not present in the stats file" % statName
# Make sure we found each expected GPU id
for gpuId in gpuIds:
assert gpuId in foundGpuIds, "Couldn't find GPU %d in the stats file (found %s)" % (gpuId, str(foundGpuIds))
def helper_test_stats_file_basics(handle, gpuIds, statsAsString, pluginName, pluginIndex, paramStr, statName=None):
#Run on a single GPU since we're just testing the stats file output
gpuIds = [gpuIds[0], ]
dd = DcgmDiag.DcgmDiag(gpuIds=gpuIds, testNamesStr=pluginName, paramsStr=paramStr) # was 20
dd.SetStatsPath('/tmp/')
# Make sure a stats file was created
statsfile = '/tmp/stats_%s.json' % (pluginName.replace(' ', '_'))
if statsAsString == True:
dd.SetConfigFileContents("%YAML 1.2\n\nglobals:\n logfile_type: text\n")
response = test_utils.diag_execute_wrapper(dd, handle)
skippedAll = True
try:
if len(response.systemError.msg) == 0:
passedCount = 0
errors = ""
for gpuIndex in range(response.gpuCount):
resultType = response.perGpuResponses[gpuIndex].results[pluginIndex].result
if resultType != dcgm_structs.DCGM_DIAG_RESULT_SKIP \
and resultType != dcgm_structs.DCGM_DIAG_RESULT_NOT_RUN:
skippedAll = False
if resultType == dcgm_structs.DCGM_DIAG_RESULT_PASS:
passedCount = passedCount + 1
else:
warning = response.perGpuResponses[gpuIndex].results[pluginIndex].error.msg
if len(warning):
errors = "%s GPU %d failed: %s" % (errors, gpuIndex, warning)
if skippedAll == False and passedCount > 0:
detailedMsg = "passed on %d of %d GPUs" % (passedCount, response.gpuCount)
if len(errors):
detailedMsg = "%s and had these errors: %s" % (detailedMsg, errors)
logger.info("%s when running the %s plugin" % (detailedMsg, pluginName))
assert os.path.isfile(statsfile), "Statsfile '%s' was not created as expected and %s" % (statsfile, detailedMsg)
if not statsAsString:
helper_basic_stats_file_check(statsfile, gpuIds, statName)
elif passedCount == 0:
test_utils.skip_test("Unable to pass any of these short runs for plugin %s." % pluginName)
else:
test_utils.skip_test("The %s plugin was skipped, so we cannot run this test." % pluginName)
else:
test_utils.skip_test("The %s plugin had a problem when executing, so we cannot run this test." % pluginName)
finally:
if os.path.exists(statsfile):
os.remove(statsfile)
@test_utils.run_only_as_root()
@test_utils.with_service_account('dcgm-tests-service-account')
@test_utils.run_with_standalone_host_engine(20, heArgs=['--service-account', 'dcgm-tests-service-account'])
@test_utils.run_with_initialized_client()
@test_utils.run_only_with_live_gpus()
@test_utils.for_all_same_sku_gpus()
@test_utils.run_only_if_mig_is_disabled()
def test_dcgm_action_stats_file_present_standalone_with_service_account(handle, gpuIds):
helper_test_stats_file_basics(handle, gpuIds, False, 'diagnostic', dcgm_structs.DCGM_DIAGNOSTIC_INDEX, "diagnostic.test_duration=10", statName='perf_gflops')
@test_utils.run_with_standalone_host_engine(20)
@test_utils.run_with_initialized_client()
@test_utils.run_only_with_live_gpus()
@test_utils.for_all_same_sku_gpus()
@test_utils.run_only_if_mig_is_disabled()
def test_dcgm_action_stats_file_present_standalone(handle, gpuIds):
helper_test_stats_file_basics(handle, gpuIds, False, 'diagnostic', dcgm_structs.DCGM_DIAGNOSTIC_INDEX, "diagnostic.test_duration=10", statName='perf_gflops')
@test_utils.run_with_embedded_host_engine()
@test_utils.run_only_with_live_gpus()
@test_utils.for_all_same_sku_gpus()
@test_utils.run_only_if_mig_is_disabled()
def test_dcgm_action_stats_file_present_embedded(handle, gpuIds):
helper_test_stats_file_basics(handle, gpuIds, False, 'diagnostic', dcgm_structs.DCGM_DIAGNOSTIC_INDEX, "diagnostic.test_duration=10", statName='perf_gflops')
@test_utils.run_only_as_root()
@test_utils.with_service_account('dcgm-tests-service-account')
@test_utils.run_with_standalone_host_engine(20, heArgs=['--service-account', 'dcgm-tests-service-account'])
@test_utils.run_with_initialized_client()
@test_utils.run_only_with_live_gpus()
@test_utils.for_all_same_sku_gpus()
@test_utils.run_only_if_mig_is_disabled()
def test_dcgm_action_string_stats_file_present_standalone_with_service_account(handle, gpuIds):
helper_test_stats_file_basics(handle, gpuIds, True, 'diagnostic', dcgm_structs.DCGM_DIAGNOSTIC_INDEX, "diagnostic.test_duration=10")
@test_utils.run_with_standalone_host_engine(20)
@test_utils.run_with_initialized_client()
@test_utils.run_only_with_live_gpus()
@test_utils.for_all_same_sku_gpus()
@test_utils.run_only_if_mig_is_disabled()
def test_dcgm_action_string_stats_file_present_standalone(handle, gpuIds):
helper_test_stats_file_basics(handle, gpuIds, True, 'diagnostic', dcgm_structs.DCGM_DIAGNOSTIC_INDEX, "diagnostic.test_duration=10")
@test_utils.run_with_embedded_host_engine()
@test_utils.run_only_with_live_gpus()
@test_utils.for_all_same_sku_gpus()
@test_utils.run_only_if_mig_is_disabled()
def test_dcgm_action_string_stats_file_present_embedded(handle, gpuIds):
helper_test_stats_file_basics(handle, gpuIds, True, 'diagnostic', dcgm_structs.DCGM_DIAGNOSTIC_INDEX, "diagnostic.test_duration=10")
@test_utils.run_with_embedded_host_engine()
@test_utils.run_only_with_live_gpus()
@test_utils.for_all_same_sku_gpus()
@test_utils.run_only_if_mig_is_disabled()
def test_dcgm_action_stats_basics_targeted_power_embedded(handle, gpuIds):
helper_test_stats_file_basics(handle, gpuIds, False, 'targeted power', dcgm_structs.DCGM_TARGETED_POWER_INDEX, "targeted power.test_duration=10")
@test_utils.run_only_as_root()
@test_utils.with_service_account('dcgm-tests-service-account')
@test_utils.run_with_standalone_host_engine(20, heArgs=['--service-account', 'dcgm-tests-service-account'])
@test_utils.run_with_initialized_client()
@test_utils.run_only_with_live_gpus()
@test_utils.for_all_same_sku_gpus()
@test_utils.run_only_if_mig_is_disabled()
def test_dcgm_action_stats_basics_targeted_power_standalone_with_service_account(handle, gpuIds):
helper_test_stats_file_basics(handle, gpuIds, False, 'targeted power', dcgm_structs.DCGM_TARGETED_POWER_INDEX, "targeted power.test_duration=10")
@test_utils.run_with_standalone_host_engine(20)
@test_utils.run_with_initialized_client()
@test_utils.run_only_with_live_gpus()
@test_utils.for_all_same_sku_gpus()
@test_utils.run_only_if_mig_is_disabled()
def test_dcgm_action_stats_basics_targeted_power_standalone(handle, gpuIds):
helper_test_stats_file_basics(handle, gpuIds, False, 'targeted power', dcgm_structs.DCGM_TARGETED_POWER_INDEX, "targeted power.test_duration=10")
@test_utils.run_with_embedded_host_engine()
@test_utils.run_only_with_live_gpus()
@test_utils.for_all_same_sku_gpus()
@test_utils.run_only_if_mig_is_disabled()
def test_dcgm_action_stats_basics_targeted_stress_embedded(handle, gpuIds):
helper_test_stats_file_basics(handle, gpuIds, False, 'targeted stress', dcgm_structs.DCGM_TARGETED_STRESS_INDEX, "targeted stress.test_duration=10", statName='flops_per_op')
@test_utils.run_only_as_root()
@test_utils.with_service_account('dcgm-tests-service-account')
@test_utils.run_with_standalone_host_engine(20, heArgs=['--service-account', 'dcgm-tests-service-account'])
@test_utils.run_with_initialized_client()
@test_utils.run_only_with_live_gpus()
@test_utils.for_all_same_sku_gpus()
@test_utils.run_only_if_mig_is_disabled()
def test_dcgm_action_stats_basics_targeted_stress_standalone_with_service_account(handle, gpuIds):
helper_test_stats_file_basics(handle, gpuIds, False, 'targeted stress', dcgm_structs.DCGM_TARGETED_STRESS_INDEX, "targeted stress.test_duration=10", statName='flops_per_op')
@test_utils.run_with_standalone_host_engine(20)
@test_utils.run_with_initialized_client()
@test_utils.run_only_with_live_gpus()
@test_utils.for_all_same_sku_gpus()
@test_utils.run_only_if_mig_is_disabled()
def test_dcgm_action_stats_basics_targeted_stress_standalone(handle, gpuIds):
helper_test_stats_file_basics(handle, gpuIds, False, 'targeted stress', dcgm_structs.DCGM_TARGETED_STRESS_INDEX, "targeted stress.test_duration=10", statName='flops_per_op')
@test_utils.run_with_embedded_host_engine()
@test_utils.run_only_with_live_gpus()
@test_utils.for_all_same_sku_gpus()
@test_utils.run_only_if_mig_is_disabled()
def test_dcgm_action_stats_basics_pcie_embedded(handle, gpuIds):
helper_test_stats_file_basics(handle, gpuIds, False, 'pcie', dcgm_structs.DCGM_PCI_INDEX, "pcie.test_duration=10;pcie.test_with_gemm=true", statName='perf_gflops')
@test_utils.run_only_as_root()
@test_utils.with_service_account('dcgm-tests-service-account')
@test_utils.run_with_standalone_host_engine(20, heArgs=['--service-account', 'dcgm-tests-service-account'])
@test_utils.run_with_initialized_client()
@test_utils.run_only_with_live_gpus()
@test_utils.for_all_same_sku_gpus()
@test_utils.run_only_if_mig_is_disabled()
def test_dcgm_action_stats_basics_pcie_standalone_with_service_account(handle, gpuIds):
helper_test_stats_file_basics(handle, gpuIds, False, 'pcie', dcgm_structs.DCGM_PCI_INDEX, "pcie.test_duration=10;pcie.test_with_gemm=true", statName='perf_gflops')
@test_utils.run_with_standalone_host_engine(20)
@test_utils.run_with_initialized_client()
@test_utils.run_only_with_live_gpus()
@test_utils.for_all_same_sku_gpus()
@test_utils.run_only_if_mig_is_disabled()
def test_dcgm_action_stats_basics_pcie_standalone(handle, gpuIds):
helper_test_stats_file_basics(handle, gpuIds, False, 'pcie', dcgm_structs.DCGM_PCI_INDEX, "pcie.test_duration=10;pcie.test_with_gemm=true", statName='perf_gflops')
def helper_test_bad_statspath(handle, gpuIds):
dd = DcgmDiag.DcgmDiag(gpuIds=gpuIds, testNamesStr='diagnostic', paramsStr='diagnostic.test_duration=20')
dd.SetStatsPath('/fake/superfake/notreal/')
failed = False
try:
response = test_utils.diag_execute_wrapper(dd, handle)
except dcgm_structs.dcgmExceptionClass(dcgm_structs.DCGM_ST_NVVS_ERROR) as e:
failed = True
assert str(e).find('cannot access statspath') != -1, "Should have received a statspath error but got %s" % str(e)
assert failed, "We must fail when attempting to access a fake dir"
filename = '/tmp/not_a_file'
if not os.path.isfile(filename):
# create the file
with open(filename, 'w') as f:
f.write('lorem ipsum')
failed = False
dd.SetStatsPath(filename)
try:
response = test_utils.diag_execute_wrapper(dd, handle)
except dcgm_structs.dcgmExceptionClass(dcgm_structs.DCGM_ST_NVVS_ERROR) as e:
failed = True
assert str(e).find('is not a directory') != -1, "Should have received a statspath error but got %s" % str(e)
assert failed, "We must fail when attempting to set statspath to a file"
# Remove the file to clean up after ourselves
os.remove(filename)
@test_utils.run_with_embedded_host_engine()
@test_utils.run_only_with_live_gpus()
@test_utils.for_all_same_sku_gpus()
@test_utils.run_only_if_mig_is_disabled()
def test_diag_stats_bad_statspath_embedded(handle, gpuIds):
helper_test_bad_statspath(handle, gpuIds)
@test_utils.run_only_as_root()
@test_utils.with_service_account('dcgm-tests-service-account')
@test_utils.run_with_standalone_host_engine(20, heArgs=['--service-account', 'dcgm-tests-service-account'])
@test_utils.run_with_initialized_client()
@test_utils.run_only_with_live_gpus()
@test_utils.for_all_same_sku_gpus()
@test_utils.run_only_if_mig_is_disabled()
def test_diag_stats_bad_statspath_standalone_with_service_account(handle, gpuIds):
helper_test_bad_statspath(handle, gpuIds)
@test_utils.run_with_standalone_host_engine(20)
@test_utils.run_with_initialized_client()
@test_utils.run_only_with_live_gpus()
@test_utils.for_all_same_sku_gpus()
@test_utils.run_only_if_mig_is_disabled()
def test_diag_stats_bad_statspath_standalone(handle, gpuIds):
helper_test_bad_statspath(handle, gpuIds)
| DCGM-master | testing/python3/tests/test_diag_stats.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from socket import AF_INET, SOCK_DGRAM
from common.Struct import Struct
from dcgm_telegraf import DcgmTelegraf
def test_send_to_telegraf():
# Can't create a proper closure in Python, so we create an object which acts
# as a closure
namespace = Struct(message=None, dest=None)
def mysendto(_message, _dest):
namespace.message = _message
namespace.dest = _dest
mysock = Struct(sendto=mysendto)
dr = DcgmTelegraf('FAKE_HOST', 101010)
# Assert that we are sending over UDP
assert dr.m_sock.family == AF_INET
assert dr.m_sock.type == SOCK_DGRAM
dr.m_sock = mysock
dr.SendToTelegraf('message')
assert(namespace.message == 'message')
assert(namespace.dest == ('FAKE_HOST', 101010))
def test_telegraph_custom_json_handler():
namespace = Struct(arg=None)
def MySendToTelegraf(json):
namespace.arg = json # pylint: disable=no-member
dr = DcgmTelegraf('FAKE_HOST', 101010)
dr.SendToTelegraf = MySendToTelegraf
dr.CustomJsonHandler('value')
assert namespace.arg == 'value' # pylint: disable=no-member
| DCGM-master | testing/python3/tests/test_dcgm_telegraf.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pydcgm
import dcgm_structs
import dcgm_agent_internal
import dcgm_agent
import logger
import test_utils
import dcgm_fields
import dcgm_internal_helpers
import option_parser
import DcgmDiag
import dcgm_errors
import threading
import time
import sys
import os
import signal
import utils
import json
import tempfile
import shutil
from ctypes import *
from apps.app_runner import AppRunner
from apps.dcgmi_app import DcgmiApp
from dcgm_field_injection_helpers import inject_value, inject_nvml_value
import dcgm_field_injection_helpers
# Most injection tests use memtest plugin, which also sleeps for 3 seconds
# These are used on all architectures but are specific to each.
injection_offset = 3
def check_diag_result_fail(response, gpuIndex, testIndex):
return response.perGpuResponses[gpuIndex].results[testIndex].result == dcgm_structs.DCGM_DIAG_RESULT_FAIL
def check_diag_result_pass(response, gpuIndex, testIndex):
return response.perGpuResponses[gpuIndex].results[testIndex].result == dcgm_structs.DCGM_DIAG_RESULT_PASS
def diag_result_assert_fail(response, gpuIndex, testIndex, msg, errorCode):
# Instead of checking that it failed, just make sure it didn't pass because we want to ignore skipped
# tests or tests that did not run.
assert response.perGpuResponses[gpuIndex].results[testIndex].result != dcgm_structs.DCGM_DIAG_RESULT_PASS, msg
if response.version == dcgm_structs.dcgmDiagResponse_version8:
codeMsg = "Failing test expected error code %d, but found %d" % \
(errorCode, response.perGpuResponses[gpuIndex].results[testIndex].error.code)
assert response.perGpuResponses[gpuIndex].results[testIndex].error.code == errorCode, codeMsg
def diag_result_assert_pass(response, gpuIndex, testIndex, msg):
# Instead of checking that it passed, just make sure it didn't fail because we want to ignore skipped
# tests or tests that did not run.
assert response.perGpuResponses[gpuIndex].results[testIndex].result != dcgm_structs.DCGM_DIAG_RESULT_FAIL, msg
if response.version == dcgm_structs.dcgmDiagResponse_version8:
codeMsg = "Passing test somehow has a non-zero error code!"
assert response.perGpuResponses[gpuIndex].results[testIndex].error.code == 0, codeMsg
def helper_check_diag_empty_group(handle, gpuIds):
handleObj = pydcgm.DcgmHandle(handle=handle)
systemObj = handleObj.GetSystem()
groupObj = systemObj.GetEmptyGroup("test1")
runDiagInfo = dcgm_structs.c_dcgmRunDiag_t()
runDiagInfo.version = dcgm_structs.dcgmRunDiag_version
runDiagInfo.groupId = groupObj.GetId()
runDiagInfo.validate = 1
with test_utils.assert_raises(dcgm_structs.dcgmExceptionClass(dcgm_structs.DCGM_ST_GROUP_IS_EMPTY)):
response = test_utils.action_validate_wrapper(runDiagInfo, handle)
# Now make sure everything works well with a group
groupObj.AddGpu(gpuIds[0])
response = test_utils.action_validate_wrapper(runDiagInfo, handle)
assert response, "Should have received a response now that we have a non-empty group"
@test_utils.run_with_embedded_host_engine()
@test_utils.run_only_with_live_gpus()
@test_utils.run_only_if_mig_is_disabled()
def test_helper_embedded_check_diag_empty_group(handle, gpuIds):
helper_check_diag_empty_group(handle, gpuIds)
@test_utils.run_with_standalone_host_engine(120)
@test_utils.run_with_initialized_client()
@test_utils.run_with_injection_gpus(2)
def test_helper_standalone_check_diag_empty_group(handle, gpuIds):
helper_check_diag_empty_group(handle, gpuIds)
def diag_assert_error_found(response, gpuId, testIndex, errorStr):
if response.perGpuResponses[gpuId].results[testIndex].result != dcgm_structs.DCGM_DIAG_RESULT_SKIP and \
response.perGpuResponses[gpuId].results[testIndex].result != dcgm_structs.DCGM_DIAG_RESULT_NOT_RUN:
warningFound = response.perGpuResponses[gpuId].results[testIndex].error.msg
assert warningFound.find(errorStr) != -1, "Expected to find '%s' as a warning, but found '%s'" % (errorStr, warningFound)
def diag_assert_error_not_found(response, gpuId, testIndex, errorStr):
if response.perGpuResponses[gpuId].results[testIndex].result != dcgm_structs.DCGM_DIAG_RESULT_SKIP and \
response.perGpuResponses[gpuId].results[testIndex].result != dcgm_structs.DCGM_DIAG_RESULT_NOT_RUN:
warningFound = response.perGpuResponses[gpuId].results[testIndex].error.msg
assert warningFound.find(errorStr) == -1, "Expected not to find '%s' as a warning, but found it: '%s'" % (errorStr, warningFound)
def helper_check_diag_thermal_violation(handle, gpuIds):
dd = DcgmDiag.DcgmDiag(gpuIds=gpuIds, testNamesStr='diagnostic', paramsStr='diagnostic.test_duration=10')
# kick off a thread to inject the failing value while I run the diag
inject_value(handle, gpuIds[0], dcgm_fields.DCGM_FI_DEV_THERMAL_VIOLATION, 9223372036854775792, 0, repeatCount=3, repeatOffset=5)
response = test_utils.diag_execute_wrapper(dd, handle)
assert response.gpuCount == len(gpuIds), "Expected %d gpus, but found %d reported" % (len(gpuIds), response.gpuCount)
for gpuIndex in range(response.gpuCount):
diag_assert_error_not_found(response, gpuIndex, dcgm_structs.DCGM_DIAGNOSTIC_INDEX, "Thermal violations")
"""
@test_utils.run_with_injection_nvml()
@test_utils.run_with_embedded_host_engine()
@test_utils.run_only_if_mig_is_disabled()
def TODO: add the injection nvml test here
"""
def helper_check_diag_high_temp_fail(handle, gpuIds):
dd = DcgmDiag.DcgmDiag(gpuIds=gpuIds, testNamesStr='diagnostic', paramsStr='diagnostic.test_duration=10')
# kick off a thread to inject the failing value while I run the diag
inject_value(handle, gpuIds[0], dcgm_fields.DCGM_FI_DEV_GPU_TEMP, 120, 0, repeatCount=3, repeatOffset=5)
response = test_utils.diag_execute_wrapper(dd, handle)
assert response.gpuCount == len(gpuIds), "Expected %d gpus, but found %d reported" % (len(gpuIds), response.gpuCount)
diag_result_assert_fail(response, gpuIds[0], dcgm_structs.DCGM_DIAGNOSTIC_INDEX, "Expected a failure due to 120 degree inserted temp.", dcgm_errors.DCGM_FR_TEMP_VIOLATION)
def helper_check_dcgm_run_diag_backwards_compatibility(handle, gpuId):
"""
Verifies that the dcgmActionValidate_v2 API supports older versions of the dcgmRunDiag struct
by using the old structs to run a short validation test.
"""
def test_dcgm_run_diag(drd, version):
drd.validate = 1 # run a short test
drd.gpuList = str(gpuId)
# This will throw an exception on error
response = test_utils.action_validate_wrapper(drd, handle, version)
# Test version 6
drd = dcgm_structs.c_dcgmRunDiag_v7()
test_dcgm_run_diag(drd, dcgm_structs.dcgmRunDiag_version7)
@test_utils.run_with_embedded_host_engine()
@test_utils.run_only_with_live_gpus()
@test_utils.run_only_if_mig_is_disabled()
def test_dcgm_run_diag_backwards_compatibility_embedded(handle, gpuIds):
helper_check_dcgm_run_diag_backwards_compatibility(handle, gpuIds[0])
@test_utils.run_with_standalone_host_engine(120)
@test_utils.run_with_initialized_client()
@test_utils.run_with_injection_gpus(2)
def test_dcgm_run_diag_backwards_compatibility_standalone(handle, gpuIds):
helper_check_dcgm_run_diag_backwards_compatibility(handle, gpuIds[0])
checked_gpus = {} # Used to track that a GPU has been verified as passing
# Makes sure a very basic diagnostic passes and returns a DcgmDiag object
def helper_verify_diag_passing(handle, gpuIds, testNames="memtest", testIndex=dcgm_structs.DCGM_MEMTEST_INDEX, params="memtest.test_duration=15", version=dcgm_structs.dcgmRunDiag_version, useFakeGpus=False):
dd = DcgmDiag.DcgmDiag(gpuIds=gpuIds, testNamesStr=testNames, paramsStr=params, version=version)
dd.SetThrottleMask(0) # We explicitly want to fail for throttle reasons since this test inserts throttling errors
# for verification
if useFakeGpus:
dd.UseFakeGpus()
# If we've already chchecked this GPU, then use the previous result
runDiag = False
for gpuId in gpuIds:
if gpuId in checked_gpus:
if checked_gpus[gpuId] == False:
test_utils.skip_test("Skipping because GPU %s does not pass memtest Perf test. "
"Please verify whether the GPU is supported and healthy." % gpuId)
else:
runDiag = True
if runDiag == False:
return dd
response = test_utils.diag_execute_wrapper(dd, handle)
for gpuId in gpuIds:
if not check_diag_result_pass(response, gpuId, testIndex):
checked_gpus[gpuId] = False
test_utils.skip_test("Skipping because GPU %s does not pass SM Perf test. "
"Please verify whether the GPU is supported and healthy." % gpuId)
else:
checked_gpus[gpuId] = True
return dd
def find_throttle_failure(response, gpuId, pluginIndex):
if response.perGpuResponses[gpuId].results[pluginIndex].result != dcgm_structs.DCGM_DIAG_RESULT_PASS:
error = response.perGpuResponses[gpuId].results[pluginIndex].error.msg
if error.find('clock throttling') != -1:
return True, "%s (%s)" % (error, response.perGpuResponses[gpuId].results[pluginIndex].error.msg)
else:
return False, error
return False, ""
def helper_test_thermal_violations_in_seconds(handle):
gpuIds = test_utils.create_injection_nvml_gpus(handle, 2)
dd = DcgmDiag.DcgmDiag(gpuIds=gpuIds, testNamesStr='diagnostic', paramsStr='diagnostic.test_duration=10')
dd.UseFakeGpus()
fieldId = dcgm_fields.DCGM_FI_DEV_THERMAL_VIOLATION
injected_value = 2344122048
inject_nvml_value(handle, gpuIds[0], fieldId, injected_value, 10)
# Verify that the inserted values are visible in DCGM before starting the diag
assert dcgm_internal_helpers.verify_field_value(gpuIds[0], fieldId, injected_value, maxWait=5, numMatches=1), \
"Expected inserted values to be visible in DCGM"
# Start the diag
response = dd.Execute(handle)
testIndex = dcgm_structs.DCGM_DIAGNOSTIC_INDEX
errmsg = response.perGpuResponses[gpuIds[0]].results[testIndex].error.msg
# Check for 'hermal' instead of thermal because sometimes it's capitalized
if errmsg.find("hermal violations") != -1:
foundError = True
import re
match = re.search(r"totaling.*?seconds", errmsg)
assert match, "Expected to find 'totaling <seconds> seconds' in error message but found %s" % errmsg
else:
# Didn't find an error
assert False, "Thermal violations were injected but not found in error message: '%s'." % errmsg
@test_utils.run_with_injection_nvml()
@test_utils.run_with_standalone_host_engine(120)
@test_utils.run_with_initialized_client()
def test_thermal_violations_in_seconds_standalone(handle):
helper_test_thermal_violations_in_seconds(handle)
#####
# Helper method for inserting errors and performing the diag
def perform_diag_with_throttle_mask_and_verify(dd, handle, gpuId, inserted_error, throttle_mask, shouldPass, failureMsg):
fieldId = dcgm_fields.DCGM_FI_DEV_CLOCK_THROTTLE_REASONS
interval = 0.1
if throttle_mask is not None:
dd.SetThrottleMask(throttle_mask)
inject_value(handle, gpuId, fieldId, inserted_error, injection_offset, True, repeatCount=5)
inject_value(handle, gpuId, dcgm_fields.DCGM_FI_DEV_GPU_TEMP, 1000, injection_offset, True, repeatCount=5)
# Verify that the inserted values are visible in DCGM before starting the diag
assert dcgm_internal_helpers.verify_field_value(gpuId, fieldId, inserted_error, checkInterval=interval, maxWait=15, numMatches=1), \
"Expected inserted values to be visible in DCGM"
# Start the diag
response = test_utils.diag_execute_wrapper(dd, handle)
# Check for pass or failure as per the shouldPass parameter
throttled, errMsg = find_throttle_failure(response, gpuId, dcgm_structs.DCGM_MEMTEST_INDEX)
if shouldPass:
assert throttled == False, "Expected to not have a throttling error but found %s" % errMsg
else:
assert throttled == True, "Expected to find a throttling error but did not (%s)" % errMsg
def helper_test_throttle_mask_fail_hw_slowdown(handle, gpuId):
"""
Verifies that the throttle ignore mask ignores the masked throttling reasons.
"""
dd = helper_verify_diag_passing(handle, [gpuId], useFakeGpus=True)
#####
# Insert a throttling error and verify that the test fails
perform_diag_with_throttle_mask_and_verify(
dd, handle, gpuId, inserted_error=dcgm_fields.DCGM_CLOCKS_THROTTLE_REASON_HW_SLOWDOWN,
throttle_mask=0, shouldPass=False, failureMsg="Expected test to fail because of throttling"
)
@test_utils.run_with_standalone_host_engine(120, heEnv=test_utils.smallFbModeEnv)
@test_utils.run_with_initialized_client()
@test_utils.run_with_injection_gpus(2)
def test_dcgm_diag_throttle_mask_fail_hw_slowdown(handle, gpuIds):
helper_test_throttle_mask_fail_hw_slowdown(handle, gpuIds[0])
@test_utils.run_with_standalone_host_engine(120)
@test_utils.run_with_initialized_client()
@test_utils.run_with_injection_gpus(2)
def test_run_injection(handle, gpuIds):
helper_test_throttle_mask_fail_hw_slowdown(handle, gpuIds[0])
def helper_test_throttle_mask_ignore_hw_slowdown(handle, gpuId):
dd = helper_verify_diag_passing(handle, [gpuId], useFakeGpus=True)
# Insert throttling error and set throttle mask to ignore it (as integer value)
perform_diag_with_throttle_mask_and_verify(
dd, handle, gpuId, inserted_error=dcgm_fields.DCGM_CLOCKS_THROTTLE_REASON_HW_SLOWDOWN,
throttle_mask=dcgm_fields.DCGM_CLOCKS_THROTTLE_REASON_HW_SLOWDOWN, shouldPass=True,
failureMsg="Expected test to pass because throttle mask (interger bitmask) ignores the throttle reason"
)
@test_utils.run_with_standalone_host_engine(120, heEnv=test_utils.smallFbModeEnv)
@test_utils.run_with_initialized_client()
@test_utils.run_with_injection_gpus(2)
def test_dcgm_diag_throttle_mask_ignore_hw_slowdown(handle, gpuIds):
helper_test_throttle_mask_ignore_hw_slowdown(handle, gpuIds[0])
def helper_test_throttle_mask_ignore_hw_slowdown_string(handle, gpuId):
dd = helper_verify_diag_passing(handle, [gpuId], useFakeGpus=True)
# Insert throttling error and set throttle mask to ignore it (as string name)
perform_diag_with_throttle_mask_and_verify(
dd, handle, gpuId, inserted_error=dcgm_fields.DCGM_CLOCKS_THROTTLE_REASON_HW_SLOWDOWN,
throttle_mask="HW_SLOWDOWN", shouldPass=True,
failureMsg="Expected test to pass because throttle mask (named reason) ignores the throttle reason"
)
@test_utils.run_with_standalone_host_engine(120, heEnv=test_utils.smallFbModeEnv)
@test_utils.run_with_initialized_client()
@test_utils.run_with_injection_gpus(2)
def test_dcgm_diag_throttle_mask_ignore_hw_slowdown_string(handle, gpuIds):
helper_test_throttle_mask_ignore_hw_slowdown_string(handle, gpuIds[0])
def helper_test_throttle_mask_fail_double_inject_ignore_one(handle, gpuId):
dd = helper_verify_diag_passing(handle, [gpuId], useFakeGpus=True)
# Insert two throttling errors and set throttle mask to ignore only one (as integer)
perform_diag_with_throttle_mask_and_verify(
dd, handle, gpuId,
inserted_error=dcgm_fields.DCGM_CLOCKS_THROTTLE_REASON_HW_SLOWDOWN | dcgm_fields.DCGM_CLOCKS_THROTTLE_REASON_SW_THERMAL,
throttle_mask=dcgm_fields.DCGM_CLOCKS_THROTTLE_REASON_HW_SLOWDOWN, shouldPass=False,
failureMsg="Expected test to fail because throttle mask (interger bitmask) ignores one of the throttle reasons"
)
@test_utils.run_with_standalone_host_engine(120, heEnv=test_utils.smallFbModeEnv)
@test_utils.run_with_initialized_client()
@test_utils.run_with_injection_gpus(2)
def test_dcgm_diag_throttle_mask_fail_double_inject_ignore_one(handle, gpuIds):
helper_test_throttle_mask_fail_double_inject_ignore_one(handle, gpuIds[0])
def helper_test_throttle_mask_fail_double_inject_ignore_one_string(handle, gpuId):
dd = helper_verify_diag_passing(handle, [gpuId], useFakeGpus=True)
# Insert two throttling errors and set throttle mask to ignore only one (as string name)
perform_diag_with_throttle_mask_and_verify(
dd, handle, gpuId,
inserted_error=dcgm_fields.DCGM_CLOCKS_THROTTLE_REASON_HW_SLOWDOWN | dcgm_fields.DCGM_CLOCKS_THROTTLE_REASON_SW_THERMAL,
throttle_mask="HW_SLOWDOWN", shouldPass=False,
failureMsg="Expected test to fail because throttle mask (named reason) ignores one of the throttle reasons"
)
@test_utils.run_with_standalone_host_engine(120, heEnv=test_utils.smallFbModeEnv)
@test_utils.run_with_initialized_client()
@test_utils.run_only_with_live_gpus()
@test_utils.for_all_same_sku_gpus()
def test_dcgm_diag_context_create(handle, gpuIds):
helper_verify_diag_passing(handle, gpuIds, "context_create", dcgm_structs.DCGM_CONTEXT_CREATE_INDEX, params="")
@test_utils.run_with_standalone_host_engine(120, heEnv={'__DCGM_PCIE_AER_COUNT' : '100'})
@test_utils.run_with_initialized_client()
@test_utils.run_only_with_live_gpus()
@test_utils.for_all_same_sku_gpus()
def test_dcgm_diag_pcie_failure(handle, gpuIds):
if test_utils.get_build_type() != "Debug":
test_utils.skip_test("Debug test only")
dd = DcgmDiag.DcgmDiag(gpuIds=[gpuIds[0]], testNamesStr="pcie", paramsStr="pcie.test_duration=60;pcie.test_with_gemm=true",
version=dcgm_structs.dcgmRunDiag_version7)
response = test_utils.diag_execute_wrapper(dd, handle)
assert check_diag_result_fail(response, gpuIds[0], dcgm_structs.DCGM_PCI_INDEX), "No failure detected in diagnostic"
@test_utils.run_with_standalone_host_engine(120, heEnv=test_utils.smallFbModeEnv)
@test_utils.run_with_initialized_client()
@test_utils.run_with_injection_gpus(2)
def test_dcgm_diag_throttle_mask_fail_double_inject_ignore_one_string(handle, gpuIds):
helper_test_throttle_mask_fail_double_inject_ignore_one_string(handle, gpuIds[0])
def helper_test_throttle_mask_fail_ignore_different_throttle(handle, gpuId):
dd = helper_verify_diag_passing(handle, [gpuId], useFakeGpus=True)
# Insert throttling error and set throttle mask to ignore a different reason (as integer value)
perform_diag_with_throttle_mask_and_verify(
dd, handle, gpuId, inserted_error=dcgm_fields.DCGM_CLOCKS_THROTTLE_REASON_HW_SLOWDOWN,
throttle_mask=dcgm_fields.DCGM_CLOCKS_THROTTLE_REASON_HW_POWER_BRAKE, shouldPass=False,
failureMsg="Expected test to fail because throttle mask (interger bitmask) ignores different throttle reason"
)
@test_utils.run_with_standalone_host_engine(120, heEnv=test_utils.smallFbModeEnv)
@test_utils.run_with_initialized_client()
@test_utils.run_with_injection_gpus(2)
def test_dcgm_diag_throttle_mask_fail_ignore_different_throttle(handle, gpuIds):
helper_test_throttle_mask_fail_ignore_different_throttle(handle, gpuIds[0])
def helper_test_throttle_mask_fail_ignore_different_throttle_string(handle, gpuId):
dd = helper_verify_diag_passing(handle, [gpuId], useFakeGpus=True)
# Insert throttling error and set throttle mask to ignore a different reason (as string name)
perform_diag_with_throttle_mask_and_verify(
dd, handle, gpuId, inserted_error=dcgm_fields.DCGM_CLOCKS_THROTTLE_REASON_HW_SLOWDOWN,
throttle_mask="HW_POWER_BRAKE", shouldPass=False,
failureMsg="Expected test to fail because throttle mask (named reason) ignores different throttle reason"
)
@test_utils.run_with_standalone_host_engine(120, heEnv=test_utils.smallFbModeEnv)
@test_utils.run_with_initialized_client()
@test_utils.run_with_injection_gpus(2)
def test_dcgm_diag_throttle_mask_fail_ignore_different_throttle_string(handle, gpuIds):
helper_test_throttle_mask_fail_ignore_different_throttle_string(handle, gpuIds[0])
def helper_test_throttle_mask_pass_no_throttle(handle, gpuId):
dd = helper_verify_diag_passing(handle, [gpuId], useFakeGpus=True)
# Clear throttling reasons and mask to verify test passes
dd.SetThrottleMask("")
perform_diag_with_throttle_mask_and_verify(
dd, handle, gpuId, inserted_error=0, throttle_mask=None, shouldPass=True,
failureMsg="Expected test to pass because there is no throttling"
)
@test_utils.run_with_standalone_host_engine(120, heEnv=test_utils.smallFbModeEnv)
@test_utils.run_with_initialized_client()
@test_utils.run_with_injection_gpus(2)
def test_dcgm_diag_throttle_mask_pass_no_throttle(handle, gpuIds):
helper_test_throttle_mask_pass_no_throttle(handle, gpuIds[0])
def helper_check_diag_stop_on_interrupt_signals(handle, gpuId):
"""
Verifies that a launched diag is stopped when the dcgmi executable recieves a SIGINT, SIGHUP, SIGQUIT, or SIGTERM
signal.
"""
# First check whether the GPU is healthy/supported
dd = DcgmDiag.DcgmDiag(gpuIds=[gpuId], testNamesStr="memtest", paramsStr="memtest.test_duration=2",
version=dcgm_structs.dcgmRunDiag_version7)
response = test_utils.diag_execute_wrapper(dd, handle)
if not check_diag_result_pass(response, gpuId, dcgm_structs.DCGM_MEMTEST_INDEX):
test_utils.skip_test("Skipping because GPU %s does not pass memtest. "
"Please verify whether the GPU is supported and healthy." % gpuId)
# paths to dcgmi executable
paths = {
"Linux_32bit": "./apps/x86/dcgmi",
"Linux_64bit": "./apps/amd64/dcgmi",
"Linux_ppc64le": "./apps/ppc64le/dcgmi",
"Linux_aarch64": "./apps/aarch64/dcgmi"
}
# Verify test is running on a supported platform
if utils.platform_identifier not in paths:
test_utils.skip_test("Dcgmi is not supported on the current platform.")
dcgmi_path = paths[utils.platform_identifier]
def verify_exit_code_on_signal(signum):
# Ensure that host engine is ready to launch a new diagnostic
dd = DcgmDiag.DcgmDiag(gpuIds=[gpuId], testNamesStr='1')
success = False
start = time.time()
while not success and (time.time() - start) <= 3:
try:
response = test_utils.diag_execute_wrapper(dd, handle)
success = True
except dcgm_structs.dcgmExceptionClass(dcgm_structs.DCGM_ST_DIAG_ALREADY_RUNNING):
# Only acceptable error due to small race condition between the nvvs process exiting and
# hostengine actually processing the exit. We try for a maximum of 3 seconds since this
# should be rare and last only for a short amount of time
time.sleep(1.5)
diagApp = AppRunner(dcgmi_path, args=["diag", "-r", "memtest", "-i", "%s" % gpuId,
"-d", "INFO", "--debugLogFile", "/tmp/nvvs.log"])
# Start the diag
diagApp.start(timeout=40)
logger.info("Launched dcgmi process with pid: %s" % diagApp.getpid())
# Ensure diag is running before sending interrupt signal
running, debug_output = dcgm_internal_helpers.check_nvvs_process(want_running=True, attempts=50)
assert running, "The nvvs process did not start within 25 seconds: %s" % (debug_output)
# There is a small race condition here - it is possible that the hostengine sends a SIGTERM before the
# nvvs process has setup a signal handler, and so the nvvs process does not stop when SIGTERM is sent.
# We sleep for 1 second to reduce the possibility of this scenario
time.sleep(1)
diagApp.signal(signum)
retCode = diagApp.wait()
# Check the return code and stdout/stderr output before asserting for better debugging info
if retCode == 0:
logger.error("Got retcode '%s' from launched diag." % retCode)
if diagApp.stderr_lines or diagApp.stdout_lines:
logger.info("dcgmi output:")
for line in diagApp.stdout_lines:
logger.info(line)
for line in diagApp.stderr_lines:
logger.error(line)
assert retCode != 0, "Expected a non-zero exit code, but got 0"
# Since the app returns a non zero exit code, we call the validate method to prevent false
# failures from the test framework
diagApp.validate()
# Give the launched nvvs process 15 seconds to terminate.
not_running, debug_output = dcgm_internal_helpers.check_nvvs_process(want_running=False, attempts=50)
assert not_running, "The launched nvvs process did not terminate within 25 seconds. pgrep output:\n%s" \
% debug_output
# Verify return code on SIGINT
# We simply verify the return code because explicitly checking whether the nvvs process has terminated is
# clunky and error-prone
logger.info("Testing stop on SIGINT")
verify_exit_code_on_signal(signal.SIGINT)
# Verify return code on SIGHUP
logger.info("Testing stop on SIGHUP")
verify_exit_code_on_signal(signal.SIGHUP)
# Verify return code on SIGQUIT
logger.info("Testing stop on SIGQUIT")
verify_exit_code_on_signal(signal.SIGQUIT)
# Verify return code on SIGTERM
logger.info("Testing stop on SIGTERM")
verify_exit_code_on_signal(signal.SIGTERM)
@test_utils.run_with_embedded_host_engine()
@test_utils.run_only_with_live_gpus()
@test_utils.run_only_if_mig_is_disabled()
def test_dcgm_diag_stop_on_signal_embedded(handle, gpuIds):
if not option_parser.options.developer_mode:
# This test can run into a race condition when using embedded host engine, which can cause nvvs to
# take >60 seconds to terminate after receiving a SIGTERM.
test_utils.skip_test("Skip test for more debugging")
helper_check_diag_stop_on_interrupt_signals(handle, gpuIds[0])
@test_utils.run_with_standalone_host_engine(120)
@test_utils.run_with_initialized_client()
@test_utils.run_only_with_live_gpus()
@test_utils.run_only_if_mig_is_disabled()
def test_dcgm_diag_stop_on_signal_standalone(handle, gpuIds):
helper_check_diag_stop_on_interrupt_signals(handle, gpuIds[0])
def helper_verify_log_file_creation(handle, gpuIds):
dd = helper_verify_diag_passing(handle, gpuIds, testNames="memtest", testIndex=dcgm_structs.DCGM_MEMTEST_INDEX, params="memtest.test_duration=10", useFakeGpus=True)
logname = '/tmp/tmp_test_debug_log'
dd.SetDebugLogFile(logname)
dd.SetDebugLevel(5)
response = test_utils.diag_execute_wrapper(dd, handle)
if len(response.systemError.msg) == 0:
skippedAll = True
passedCount = 0
errors = ""
for gpuId in gpuIds:
resultType = response.perGpuResponses[gpuId].results[dcgm_structs.DCGM_MEMTEST_INDEX].result
if resultType not in [dcgm_structs.DCGM_DIAG_RESULT_SKIP, dcgm_structs.DCGM_DIAG_RESULT_NOT_RUN]:
skippedAll = False
if resultType == dcgm_structs.DCGM_DIAG_RESULT_PASS:
passedCount = passedCount + 1
else:
warning = response.perGpuResponses[gpuId].results[dcgm_structs.DCGM_MEMTEST_INDEX].error.msg
if len(warning):
errors = "%s, GPU %d failed: %s" % (errors, gpuId, warning)
if skippedAll == False:
detailedMsg = "passed on %d of %d GPUs" % (passedCount, response.gpuCount)
if len(errors):
detailedMsg = "%s and had these errors: %s" % (detailedMsg, errors)
logger.info(detailedMsg)
assert os.path.isfile(logname), "Logfile '%s' was not created and %s" % (logname, detailedMsg)
else:
logger.info("The diagnostic was skipped, so we cannot run this test.")
else:
logger.info("The diagnostic had a problem when executing, so we cannot run this test.")
@test_utils.run_with_standalone_host_engine(120, heEnv=test_utils.smallFbModeEnv)
@test_utils.run_with_initialized_client()
@test_utils.run_with_injection_gpus(2)
def test_dcgm_diag_verify_log_file_creation_standalone(handle, gpuIds):
helper_verify_log_file_creation(handle, gpuIds)
def helper_throttling_masking_failures(handle, gpuId):
#####
# First check whether the GPU is healthy
dd = DcgmDiag.DcgmDiag(gpuIds=[gpuId], testNamesStr="memtest", paramsStr="memtest.test_duration=2",
version=dcgm_structs.dcgmRunDiag_version)
dd.SetThrottleMask(0) # We explicitly want to fail for throttle reasons since this test inserts throttling errors
# for verification
dd.UseFakeGpus()
response = test_utils.diag_execute_wrapper(dd, handle)
if not check_diag_result_pass(response, gpuId, dcgm_structs.DCGM_MEMTEST_INDEX):
test_utils.skip_test("Skipping because GPU %s does not pass memtest. "
"Please verify whether the GPU is supported and healthy." % gpuId)
#####
dd = DcgmDiag.DcgmDiag(gpuIds=[gpuId], testNamesStr="memtest", paramsStr="memtest.test_duration=15",
version=dcgm_structs.dcgmRunDiag_version)
dd.SetThrottleMask(0)
dd.UseFakeGpus()
fieldId = dcgm_fields.DCGM_FI_DEV_CLOCK_THROTTLE_REASONS
insertedError = dcgm_fields.DCGM_CLOCKS_THROTTLE_REASON_HW_SLOWDOWN
interval = 0.1
logger.info("Injecting benign errors")
inject_value(handle, gpuId, fieldId, 3, 1, True)
# Verify that the inserted values are visible in DCGM before starting the diag
assert dcgm_internal_helpers.verify_field_value(gpuId, fieldId, 3, checkInterval=interval, maxWait=5, numMatches=1), \
"Expected inserted values to be visible in DCGM"
logger.info("Injecting actual errors")
inject_value(handle, gpuId, fieldId, insertedError, injection_offset, True, repeatCount=5)
inject_value(handle, gpuId, dcgm_fields.DCGM_FI_DEV_GPU_TEMP, 1000, injection_offset, True, repeatCount=5)
logger.info("Started diag")
response = test_utils.diag_execute_wrapper(dd, handle)
# Verify that the inserted values are visible in DCGM
# Max wait of 8 is because of 5 second offset + 2 seconds required for 20 matches + 1 second buffer.
assert dcgm_internal_helpers.verify_field_value(gpuId, fieldId, insertedError, checkInterval=0.1, numMatches=1, maxWait=8), \
"Expected inserted errors to be visible in DCGM"
throttled, errMsg = find_throttle_failure(response, gpuId, dcgm_structs.DCGM_MEMTEST_INDEX)
assert throttled, "Expected to find throttling failure, but did not: (%s)" % errMsg
@test_utils.run_with_standalone_host_engine(120, heEnv=test_utils.smallFbModeEnv)
@test_utils.run_with_initialized_client()
@test_utils.run_with_injection_gpus(2)
def test_dcgm_diag_throttling_masking_failures_standalone(handle, gpuIds):
helper_throttling_masking_failures(handle, gpuIds[0])
@test_utils.run_with_standalone_host_engine(120, heEnv=test_utils.smallFbModeEnv)
@test_utils.run_with_initialized_client()
@test_utils.run_with_injection_gpus(2)
def test_dcgm_diag_handle_concurrency_standalone(handle, gpuIds):
'''
Test that we can use a DCGM handle concurrently with a diagnostic running
'''
diagDuration = 10
gpuId = gpuIds[0]
dd = DcgmDiag.DcgmDiag(gpuIds=[gpuId], testNamesStr="memtest", paramsStr="memtest.test_duration=%d" % diagDuration,
version=dcgm_structs.dcgmRunDiag_version)
dd.UseFakeGpus()
response = [None]
def run(dd, response):
response = test_utils.diag_execute_wrapper(dd, handle)
diagStartTime = time.time()
threadObj = threading.Thread(target=run, args=[dd, response])
threadObj.start()
#Give threadObj a head start on its 10 second run
time.sleep(1.0)
firstReturnedRequestLatency = None
numConcurrentCompleted = 0
sleepDuration = 1.0
while threadObj.is_alive():
#Make another request on the handle concurrently
moduleStatuses = dcgm_agent.dcgmModuleGetStatuses(handle)
secondRequestLatency = time.time() - diagStartTime
numConcurrentCompleted += 1
if firstReturnedRequestLatency is None:
firstReturnedRequestLatency = secondRequestLatency
time.sleep(sleepDuration)
diagThreadEndTime = time.time()
diagDuration = diagThreadEndTime - diagStartTime
if firstReturnedRequestLatency is None:
test_utils.skip_test("Diag returned instantly. It is probably not supported for gpuId %u" % gpuId)
logger.info("Completed %d concurrent requests. Diag ran for %.1f seconds" % (numConcurrentCompleted, diagDuration))
#We should have been able to complete a request every 2 seconds if we slept for 1 (conservatively)
numShouldHaveCompleted = int((diagDuration / sleepDuration) / 2.0)
assert numConcurrentCompleted >= numShouldHaveCompleted, "Expected at least %d concurrent tests completed. Got %d" % (numShouldHaveCompleted, numConcurrentCompleted)
def helper_per_gpu_responses_api(handle, gpuIds, testDir):
"""
Verify that pass/fail status for diagnostic tests are reported on a per GPU basis via dcgmActionValidate API call
"""
failGpuId = gpuIds[0]
dd = helper_verify_diag_passing(handle, gpuIds, useFakeGpus=True)
dd = DcgmDiag.DcgmDiag(gpuIds=[failGpuId], testNamesStr="memtest", paramsStr="memtest.test_duration=15", version=dcgm_structs.dcgmRunDiag_version)
dd.SetThrottleMask(0) # We explicitly want to fail for throttle reasons since this test inserts throttling errors
# for verification
dd.UseFakeGpus()
dd.SetStatsPath(testDir)
dd.SetStatsOnFail(1)
# Setup injection app
fieldId = dcgm_fields.DCGM_FI_DEV_CLOCK_THROTTLE_REASONS
insertedError = dcgm_fields.DCGM_CLOCKS_THROTTLE_REASON_HW_SLOWDOWN
interval = 0.1
# Use an offset to make these errors start after the benign values
inject_value(handle, failGpuId, fieldId, insertedError, injection_offset, True, repeatCount=5)
inject_value(handle, failGpuId, dcgm_fields.DCGM_FI_DEV_GPU_TEMP, 1000, injection_offset, True, repeatCount=5)
# Verify that the inserted values are visible in DCGM before starting the diag
assert dcgm_internal_helpers.verify_field_value(failGpuId, fieldId, insertedError, checkInterval=interval, maxWait=5, numMatches=1), \
"Expected inserted values to be visible in DCGM"
response = test_utils.diag_execute_wrapper(dd, handle)
logger.info("Started diag")
# Verify that responses are reported on a per gpu basis. Ensure the first GPU failed, and all others passed
for gpuId in gpuIds:
throttled, errMsg = find_throttle_failure(response, gpuId, dcgm_structs.DCGM_MEMTEST_INDEX)
if gpuId == failGpuId:
assert throttled, "Expected throttling error but found none (%s)" % errMsg
else:
assert not throttled, "Expected not to find a throttling error but found '%s'" % errMsg
def helper_per_gpu_responses_dcgmi(handle, gpuIds, testName, testParams):
"""
Verify that pass/fail status for diagnostic tests are reported on a per GPU basis via dcgmi (for both normal stdout
and JSON output).
"""
def get_stdout(app):
output = ''
for line in app.stdout_lines:
output = output + line + " "
return output
def print_output(app):
logger.info(get_stdout(app))
for line in app.stderr_lines:
logger.error(line)
def verify_successful_dcgmi_run(app):
app.start(timeout=40)
logger.info("Started dcgmi diag with pid %s" % app.getpid())
retcode = app.wait()
if test_utils.is_mig_incompatible_failure(get_stdout(app)):
app.validate()
test_utils.skip_test("Skipping this test because MIG is configured incompatibly (preventing access to the whole GPU)")
# dcgm returns DCGM_ST_NVVS_ERROR on diag failure (which is expected here).
expected_retcode = c_uint8(dcgm_structs.DCGM_ST_NVVS_ISOLATE_ERROR).value
if retcode != expected_retcode:
if app.stderr_lines or app.stdout_lines:
logger.info("dcgmi output:")
print_output(app)
assert retcode == expected_retcode, \
"Expected dcgmi diag to have retcode %s. Got return code %s" % (expected_retcode, retcode)
app.validate() # non-zero exit code must be validated
#helper_verify_diag_passing(handle, gpuIds, useFakeGpus=True)
# Setup injection app
interval = 0.1
fieldId = dcgm_fields.DCGM_FI_DEV_CLOCK_THROTTLE_REASONS
insertedError = dcgm_fields.DCGM_CLOCKS_THROTTLE_REASON_HW_SLOWDOWN
# Use an offset to make these errors start after the benign values
inject_value(handle, gpuIds[0], fieldId, insertedError, injection_offset, True, repeatCount=5)
inject_value(handle, gpuIds[0], dcgm_fields.DCGM_FI_DEV_GPU_TEMP, 1000, injection_offset, True, repeatCount=5)
# Verify that the inserted values are visible in DCGM before starting the diag
assert dcgm_internal_helpers.verify_field_value(gpuIds[0], fieldId, insertedError, checkInterval=interval, maxWait=5, numMatches=1), \
"Expected inserted values to be visible in DCGM"
# Verify dcgmi output
gpuIdStrings = list(map(str, gpuIds))
gpuList = ",".join(gpuIdStrings)
args = ["diag", "-r", testName, "-p", testParams, "-f", gpuList, "--throttle-mask", "0"]
dcgmiApp = DcgmiApp(args=args)
logger.info("Verifying stdout output")
verify_successful_dcgmi_run(dcgmiApp)
# Verify dcgmi output shows per gpu results (crude approximation of verifying correct console output)
memtest_header_found = False
fail_gpu_found = False
fail_gpu_text = "Fail - GPU: %s" % gpuIds[0]
check_for_warning = False
warning_found = False
for line in dcgmiApp.stdout_lines:
if not memtest_header_found:
if "Memtest" not in line:
continue
memtest_header_found = True
continue
if not fail_gpu_found:
if fail_gpu_text not in line:
continue
fail_gpu_found = True
check_for_warning = True
continue
if check_for_warning:
if "Warning" in line:
warning_found = True
break
if not (memtest_header_found and fail_gpu_found and warning_found):
logger.info("dcgmi output:")
print_output(dcgmiApp)
assert memtest_header_found, "Expected to see 'Memtest' header in output"
assert fail_gpu_found, "Expected to see %s in output" % fail_gpu_text
assert warning_found, "Expected to see 'Warning' in output after GPU failure text"
inject_value(handle, gpuIds[0], fieldId, insertedError, injection_offset, True, repeatCount=5)
inject_value(handle, gpuIds[0], dcgm_fields.DCGM_FI_DEV_GPU_TEMP, 1000, injection_offset, True, repeatCount=5)
# Verify that the inserted values are visible in DCGM before starting the diag
assert dcgm_internal_helpers.verify_field_value(gpuIds[0], fieldId, insertedError, checkInterval=interval, maxWait=5, numMatches=1), \
"Expected inserted values to be visible in DCGM"
# Verify JSON output
logger.info("Verifying JSON output")
args.append("-j")
dcgmiApp = DcgmiApp(args=args)
verify_successful_dcgmi_run(dcgmiApp)
# Stop error insertion
logger.info("Stopped error injection")
# Verify per GPU results
json_output = "\n".join(dcgmiApp.stdout_lines)
output = json.loads(json_output)
verified = False
if (len(output.get("DCGM GPU Diagnostic", {}).get("test_categories", [])) == 2
and output["DCGM GPU Diagnostic"]["test_categories"][1].get("category", None) == "Stress"
and output["DCGM GPU Diagnostic"]["test_categories"][1]["tests"][0]["name"] == testName
and len(output["DCGM GPU Diagnostic"]["test_categories"][1]["tests"][0]["results"]) >= 2
and output["DCGM GPU Diagnostic"]["test_categories"][1]["tests"][0]["results"][0]["gpu_ids"] == str(gpuIds[0])
and output["DCGM GPU Diagnostic"]["test_categories"][1]["tests"][0]["results"][0]["status"] == "Fail"
and output["DCGM GPU Diagnostic"]["test_categories"][1]["tests"][0]["results"][0]["error_id"] == dcgm_errors.DCGM_FR_TEMP_VIOLATION
and output["DCGM GPU Diagnostic"]["test_categories"][1]["tests"][0]["results"][1]["status"] == "Pass"):
verified = True
if not verified:
print_output(dcgmiApp)
assert verified, "dcgmi JSON output did not pass verification"
@test_utils.run_with_standalone_host_engine(120, heEnv=test_utils.smallFbModeEnv)
@test_utils.run_with_initialized_client()
@test_utils.run_with_injection_gpus(2)
def test_dcgm_diag_per_gpu_responses_standalone_api(handle, gpuIds):
if len(gpuIds) < 2:
test_utils.skip_test("Skipping because this test requires 2 or more GPUs with same SKU")
if test_utils.is_throttling_masked_by_nvvs(handle, gpuIds[0], dcgm_fields.DCGM_CLOCKS_THROTTLE_REASON_HW_SLOWDOWN):
test_utils.skip_test("Skipping because this SKU ignores the throttling we inject for this test")
logger.info("Starting test for per gpu responses (API call)")
outputFile = "stats_memtest.json"
try:
testDirectory = tempfile.mkdtemp()
except OSError:
test_utils.skip_test("Unable to create the test directory")
else:
try:
helper_per_gpu_responses_api(handle, gpuIds, testDirectory)
assert os.path.isfile(os.path.join(testDirectory, outputFile)), "Expected stats file {} was not created".format(os.path.join(testDirectory, outputFile))
finally:
shutil.rmtree(testDirectory, ignore_errors=True)
@test_utils.run_with_standalone_host_engine(120, heEnv=test_utils.smallFbModeEnv)
@test_utils.run_with_initialized_client()
@test_utils.run_with_injection_gpus(2)
def test_dcgm_diag_per_gpu_responses_standalone_dcgmi(handle, gpuIds):
if len(gpuIds) < 2:
test_utils.skip_test("Skipping because this test requires 2 or more GPUs with same SKU")
if test_utils.is_throttling_masked_by_nvvs(handle, gpuIds[0], dcgm_fields.DCGM_CLOCKS_THROTTLE_REASON_HW_SLOWDOWN):
test_utils.skip_test("Skipping because this SKU ignores the throttling we inject for this test")
logger.info("Starting test for per gpu responses (dcgmi output)")
helper_per_gpu_responses_dcgmi(handle, gpuIds, "Memtest", "memtest.test_duration=5,pcie.max_pcie_replays=1")
@test_utils.run_with_standalone_host_engine(120, heEnv=test_utils.smallFbModeEnv)
@test_utils.run_with_initialized_client()
@test_utils.run_with_injection_gpus(2)
def test_dcgm_diag_memtest_fails_standalone_dcgmi(handle, gpuIds):
if len(gpuIds) < 2:
test_utils.skip_test("Skipping because this test requires 2 or more GPUs with same SKU")
if test_utils.is_throttling_masked_by_nvvs(handle, gpuIds[0], dcgm_fields.DCGM_CLOCKS_THROTTLE_REASON_HW_SLOWDOWN):
test_utils.skip_test("Skipping because this SKU ignores the throttling we inject for this test")
logger.info("Starting test for per gpu responses (dcgmi output)")
helper_per_gpu_responses_dcgmi(handle, gpuIds, "Memtest", "memtest.test_duration=15")
def helper_test_diagnostic_config_usage(handle, gpuIds):
dd = DcgmDiag.DcgmDiag(gpuIds=gpuIds, testNamesStr="diagnostic", paramsStr="diagnostic.test_duration=10")
dd.SetConfigFileContents("%YAML 1.2\n\ncustom:\n- custom:\n diagnostic:\n max_sbe_errors: 1")
inject_value(handle, gpuIds[0], dcgm_fields.DCGM_FI_DEV_ECC_SBE_VOL_TOTAL, 1000, injection_offset, True, repeatCount=5)
response = test_utils.diag_execute_wrapper(dd, handle)
assert response.perGpuResponses[gpuIds[0]].results[dcgm_structs.DCGM_DIAGNOSTIC_INDEX].result != dcgm_structs.DCGM_DIAG_RESULT_PASS, \
"Should have a failure due to injected SBEs, but got passing result"
@test_utils.run_with_standalone_host_engine(120, heEnv=test_utils.smallFbModeEnv)
@test_utils.run_with_initialized_client()
@test_utils.run_with_injection_gpus(2)
def test_diagnostic_config_usage_standalone(handle, gpuIds):
helper_test_diagnostic_config_usage(handle, gpuIds)
def helper_test_dcgm_short_diagnostic_run(handle, gpuIds):
dd = DcgmDiag.DcgmDiag(gpuIds=gpuIds, testNamesStr="diagnostic", paramsStr="diagnostic.test_duration=15")
response = test_utils.diag_execute_wrapper(dd, handle)
for gpuId in gpuIds:
if response.perGpuResponses[gpuId].results[dcgm_structs.DCGM_DIAGNOSTIC_INDEX].result == dcgm_structs.DCGM_DIAG_RESULT_SKIP:
logger.info("Got status DCGM_DIAG_RESULT_SKIP for gpuId %d. This is expected if this GPU does not support the Diagnostic test." % gpuId)
continue
assert response.perGpuResponses[gpuId].results[dcgm_structs.DCGM_DIAGNOSTIC_INDEX].result == dcgm_structs.DCGM_DIAG_RESULT_PASS, \
"Should have passed the 15 second diagnostic for all GPUs"
@test_utils.run_with_standalone_host_engine(120, heEnv=test_utils.smallFbModeEnv)
@test_utils.run_with_initialized_client()
@test_utils.run_with_injection_gpus(2)
def test_memtest_failures_standalone(handle, gpuIds):
dd = DcgmDiag.DcgmDiag(gpuIds=gpuIds, testNamesStr="memtest", paramsStr="memtest.test_duration=10")
inject_value(handle, gpuIds[0], dcgm_fields.DCGM_FI_DEV_ECC_DBE_VOL_TOTAL, 1000, injection_offset, True, repeatCount=5)
response = test_utils.diag_execute_wrapper(dd, handle)
assert response.perGpuResponses[gpuIds[0]].results[dcgm_structs.DCGM_MEMTEST_INDEX].result != dcgm_structs.DCGM_DIAG_RESULT_PASS, \
"Should have a failure due to injected DBEs, but got passing result"
@test_utils.run_with_standalone_host_engine(120, heEnv=test_utils.smallFbModeEnv)
@test_utils.run_with_initialized_client()
@test_utils.run_only_with_live_gpus()
@test_utils.for_all_same_sku_gpus()
@test_utils.run_only_if_mig_is_disabled()
def test_dcgm_short_memtest_run(handle, gpuIds):
dd = DcgmDiag.DcgmDiag(gpuIds=gpuIds, testNamesStr="memtest", paramsStr="memtest.test_duration=10;memtest.test10=false")
response = test_utils.diag_execute_wrapper(dd, handle)
for gpuId in gpuIds:
if response.perGpuResponses[gpuId].results[dcgm_structs.DCGM_MEMTEST_INDEX].result == dcgm_structs.DCGM_DIAG_RESULT_SKIP:
logger.info("Got status DCGM_DIAG_RESULT_SKIP for gpuId %d. This is expected if this GPU does not support the Diagnostic test." % gpuId)
continue
assert response.perGpuResponses[gpuId].results[dcgm_structs.DCGM_MEMTEST_INDEX].result == dcgm_structs.DCGM_DIAG_RESULT_PASS, \
"Should have passed the 15 second diagnostic for all GPUs"
@test_utils.run_with_diag_small_fb_mode() #Needs to be before host engine start
@test_utils.run_with_embedded_host_engine()
@test_utils.run_only_with_live_gpus()
@test_utils.for_all_same_sku_gpus()
@test_utils.run_only_if_mig_is_disabled()
def test_dcgm_diag_output(handle, gpuIds):
if len(gpuIds) <= 1:
test_utils.skip_test("Skipping because test requires >1 live gpus")
try:
os.environ['__DCGM_DIAG_MEMTEST_FAIL_GPU'] = "1"
dd = DcgmDiag.DcgmDiag(gpuIds=gpuIds, testNamesStr="memtest", paramsStr="memtest.test_duration=10;memtest.test10=false")
response = test_utils.diag_execute_wrapper(dd, handle)
assert response.perGpuResponses[0].results[dcgm_structs.DCGM_MEMTEST_INDEX].result == dcgm_structs.DCGM_DIAG_RESULT_PASS, \
"GPU 0 should have passed the 15 second diagnostic"
assert response.perGpuResponses[1].results[dcgm_structs.DCGM_MEMTEST_INDEX].result == dcgm_structs.DCGM_DIAG_RESULT_FAIL, \
"GPU 1 should NOT have passed the 15 second diagnostic"
finally:
del os.environ['__DCGM_DIAG_MEMTEST_FAIL_GPU']
@test_utils.run_with_standalone_host_engine(120, heEnv=test_utils.smallFbModeEnv)
@test_utils.run_with_initialized_client()
@test_utils.run_only_with_live_gpus()
@test_utils.for_all_same_sku_gpus()
@test_utils.run_only_if_mig_is_disabled()
def test_dcgm_short_diagnostic_run(handle, gpuIds):
helper_test_dcgm_short_diagnostic_run(handle, gpuIds)
def helper_test_dcgm_diag_paused(handle, gpuIds):
"""
Test that DCGM_ST_PAUSED is returned when the host engine is paused
"""
dd = DcgmDiag.DcgmDiag(gpuIds=gpuIds, testNamesStr="1")
dcgmHandle = pydcgm.DcgmHandle(handle=handle)
dcgmSystem = dcgmHandle.GetSystem()
try:
dcgmSystem.PauseTelemetryForDiag()
response = test_utils.diag_execute_wrapper(dd, handle)
dcgmSystem.ResumeTelemetryForDiag()
except dcgm_structs.DCGMError as e:
assert e.value == dcgm_structs.DCGM_ST_PAUSED, "Expected DCGM_ST_PAUSED error"
@test_utils.run_with_embedded_host_engine()
@test_utils.run_with_injection_gpus(1)
def test_dcgm_diag_paused_embedded(handle, gpuIds):
helper_test_dcgm_diag_paused(handle, gpuIds)
@test_utils.run_with_standalone_host_engine(120)
@test_utils.run_with_initialized_client()
@test_utils.run_with_injection_gpus(1)
def test_dcgm_diag_paused_standalone(handle, gpuIds):
helper_test_dcgm_diag_paused(handle, gpuIds)
| DCGM-master | testing/python3/tests/test_dcgm_diag.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# test the performance of DCGM
import time
import datetime
import json
import os
import sys
import pkgutil
import operator
import math
from collections import defaultdict
from distutils.version import LooseVersion # pylint: disable=import-error,no-name-in-module
import dcgm_structs
import dcgm_agent_internal
import dcgm_fields
import dcgm_fields_internal
import pydcgm
import logger
import test_utils
import utils
import stats
import option_parser
REQ_MATPLOTLIB_VER = '1.5.1'
def isReqMatplotlibVersion():
return 'matplotlib' in sys.modules and \
LooseVersion(matplotlib.__version__) >= LooseVersion(REQ_MATPLOTLIB_VER)
try:
import matplotlib
except ImportError:
logger.info('Graphs for performance tests will be missing since "matplotlib" is not installed')
else:
if not isReqMatplotlibVersion():
logger.info('Graphs for performance tests will be missing since "matplotlib" version '
+ '%s is less than the required version %s' % (matplotlib.__version__, REQ_MATPLOTLIB_VER))
else:
# must do this before importing matplotlib.pyplot
# to use backend that does not require X11 display server running
matplotlib.use('AGG')
from matplotlib import pyplot as plt
plt.style.use('ggplot') # pylint: disable=no-member
# duration to gather data for when we limit the record count for DCGM to store
# This time needs to be long enough for memory usage to level off.
BOUNDED_TEST_DURATION = 40
class MetadataTimeseries(object):
def __init__(self):
self.timestamps = []
self.fieldVals = defaultdict(list)
self.fieldGroupVals = defaultdict(list)
self.allFieldsVals = []
self.processVals = []
class CpuTimeseries(object):
def __init__(self):
self.timestamps = []
self.cpuInfo = []
def _plotFinalValueOrderedBarChart(points, title, ylabel, filenameBase, topValCount=20):
'''points are (x, y) pairs where x is the xlabel and y is the height of the var'''
if not isReqMatplotlibVersion():
logger.info('not generating ordered bar chart since "matplotlib" is not the required version')
return
if logger.log_dir is None:
logger.info('not generating ordered bar chart since logging is disabled')
return
width = 4
topSortedPts = sorted(points, key=operator.itemgetter(1), reverse=True)[:topValCount]
xTickLabels = [point[0] for point in topSortedPts]
x = list(range(0, len(topSortedPts)*width, width))
y = [point[1] for point in topSortedPts]
ax = plt.subplot(1, 1, 1)
ax.set_title(title)
plt.xlabel('ID')
plt.ylabel(ylabel)
ax.set_xticklabels(xTickLabels)
ax.set_xticks([tick + width/2. for tick in x])
ax.bar(left=x, height=y, width=4)
plt.tight_layout(pad=2.0, h_pad=2.0, w_pad=2.0)
plt.gcf().set_size_inches(9, 6)
filename = 'OrderedBar-%s-%s.png' % ('-'.join(title.split()), filenameBase)
figName = os.path.join(logger.log_dir, filename)
plt.savefig(figName)
plt.close()
logger.info('ordered bar chart for %s saved in %s' % (title, utils.shorten_path(figName)))
def _plot_metadata(x, yLists, title, ylabel, plotNum):
ax = plt.subplot(2, 2, plotNum)
ax.set_title(title)
for y in yLists:
ax.plot(x, y)
plt.xlabel('seconds since start')
plt.ylabel(ylabel)
def _generate_metadata_line_charts(metadataTSeries, ylabel, title):
if not isReqMatplotlibVersion():
logger.info('Not generating memory usage plots since "matplotlib" is not the required version')
return
if logger.log_dir is None:
logger.info('Not generating memory usage plots since logging is disabled')
return
if metadataTSeries.allFieldsVals:
_plot_metadata(x=metadataTSeries.timestamps,
yLists=[metadataTSeries.allFieldsVals],
title='%s for all fields' % title,
ylabel=ylabel,
plotNum=1)
if metadataTSeries.fieldVals:
_plot_metadata(x=metadataTSeries.timestamps,
yLists=list(metadataTSeries.fieldVals.values()),
title='%s for fields' % title,
ylabel=ylabel,
plotNum=2)
if metadataTSeries.processVals:
_plot_metadata(x=metadataTSeries.timestamps,
yLists=[metadataTSeries.processVals],
title='%s for process' % title,
ylabel=ylabel,
plotNum=3)
if metadataTSeries.fieldGroupVals:
_plot_metadata(x=metadataTSeries.timestamps,
yLists=list(metadataTSeries.fieldGroupVals.values()),
title='%s for field groups' % title,
ylabel=ylabel,
plotNum=4)
plt.tight_layout(pad=1.0, h_pad=1.0, w_pad=1.0)
plt.gcf().set_size_inches(11, 7)
filename = '%s-%s.png' % ('-'.join(title.split()), test_dcgm_standalone_perf_bounded.__name__)
figName = os.path.join(logger.log_dir, filename)
plt.savefig(figName)
plt.close()
logger.info('%s figure saved in %s' % (title, utils.shorten_path(figName)))
def _gather_perf_timeseries(handle, watchedFieldIds):
'''
Gathers metadata over time and returns a tuple of
4 MetadataTimeseries (mem usage, exec time, avg exec time, cpu utilization)
'''
system = pydcgm.DcgmSystem(handle)
memUsageTS = MetadataTimeseries()
execTimeTS= MetadataTimeseries()
execTimeAvgTS = MetadataTimeseries()
cpuUtilTS = CpuTimeseries()
numFields = min(len(watchedFieldIds), 50)
fieldGroups = []
for i in range(1,6):
fieldGroups.append(pydcgm.DcgmFieldGroup(handle, "my_field_group_%d" % i, list(watchedFieldIds)[0:numFields]))
startTime = datetime.datetime.now()
while (datetime.datetime.now() - startTime).total_seconds() < BOUNDED_TEST_DURATION:
# poll memory usage
memUsageTS.timestamps.append((datetime.datetime.now() - startTime).total_seconds())
memUsageTS.processVals.append(system.introspect.memory.GetForHostengine().bytesUsed)
memUsageTS.allFieldsVals.append(system.introspect.memory.GetForAllFields().aggregateInfo.bytesUsed)
for id in watchedFieldIds:
memUsageTS.fieldVals[id].append(
dcgm_agent_internal.dcgmIntrospectGetFieldMemoryUsage(handle.handle, id).aggregateInfo.bytesUsed)
for fieldGroup in fieldGroups:
memUsageTS.fieldGroupVals[int(fieldGroup.fieldGroupId.value)].append(system.introspect.memory.GetForFieldGroup(fieldGroup).aggregateInfo.bytesUsed)
# poll execution time
execTimeTS.timestamps.append((datetime.datetime.now() - startTime).total_seconds())
execTimeTS.allFieldsVals.append(system.introspect.execTime.GetForAllFields().aggregateInfo.totalEverUpdateUsec)
for id in watchedFieldIds:
execTimeTS.fieldVals[id].append(
dcgm_agent_internal.dcgmIntrospectGetFieldExecTime(handle.handle, id).aggregateInfo.totalEverUpdateUsec)
#logger.info("fieldId %d: %s" % (id, str(execTimeTS.fieldVals[id][-1])))
for fieldGroup in fieldGroups:
execTimeTS.fieldGroupVals[int(fieldGroup.fieldGroupId.value)].append(system.introspect.execTime.GetForFieldGroup(fieldGroup).aggregateInfo.totalEverUpdateUsec)
# poll average execution time
execTimeAvgTS.timestamps.append((datetime.datetime.now() - startTime).total_seconds())
execTimeAvgTS.allFieldsVals.append(system.introspect.execTime.GetForAllFields().aggregateInfo.recentUpdateUsec)
for id in watchedFieldIds:
execTimeAvgTS.fieldVals[id].append(
dcgm_agent_internal.dcgmIntrospectGetFieldExecTime(handle.handle, id).aggregateInfo.recentUpdateUsec)
for fieldGroup in fieldGroups:
execTimeAvgTS.fieldGroupVals[int(fieldGroup.fieldGroupId.value)].append(system.introspect.execTime.GetForFieldGroup(fieldGroup).aggregateInfo.recentUpdateUsec)
# poll cpu utilization
cpuUtilTS.timestamps.append((datetime.datetime.now() - startTime).total_seconds())
cpuUtilTS.cpuInfo.append(system.introspect.cpuUtil.GetForHostengine())
time.sleep(0.050)
return memUsageTS, execTimeTS, execTimeAvgTS, cpuUtilTS
# generating graphs may cause hostengine to timeout so make timeout an extra 20 sec
@test_utils.run_with_standalone_host_engine(timeout=BOUNDED_TEST_DURATION + 20)
@test_utils.run_with_initialized_client()
def test_dcgm_standalone_perf_bounded(handle):
'''
Test that runs some subtests. When we bound the number of samples to keep for each field:
- DCGM memory usage eventually flatlines on a field, field group, all fields, and process level.
- DCGM memory usage is at a value that we expect (golden value). If what we
expect changes over time the we must update what these values are (the tests will fail if we don't).
Plots of the memory usage and execution time generated during this test are saved and the
filename of the figure is output on the terminal.
Multiple tests are included in this test in order to save time by only gathering data once.
'''
if not option_parser.options.developer_mode:
test_utils.skip_test("Skipping developer test.")
handle = pydcgm.DcgmHandle(handle)
group = pydcgm.DcgmGroup(handle, groupName="metadata-test", groupType=dcgm_structs.DCGM_GROUP_DEFAULT)
updateFreq = 1000000 # 1 second. Needs to be long enough for all fields on all GPUs to update, or the record density will vary based on CPU consumption
watchedFieldIds = test_utils.watch_all_fields(handle.handle,
group.GetGpuIds(),
updateFreq,
maxKeepAge=0.0, #Use maxKeepEntries only to enforce the quota
maxKeepEntries=10)
memUsageTS, execTimeTS, execTimeAvgTS, cpuUtilTS = _gather_perf_timeseries(handle, watchedFieldIds)
activeGpuCount = test_utils.get_live_gpu_count(handle.handle)
# run the actual tests on the gathered data
# test that memory usage flatlines
test_utils.run_subtest(_test_mem_bounded_flatlines_fields, memUsageTS)
test_utils.run_subtest(_test_mem_bounded_flatlines_fieldgroups, memUsageTS)
test_utils.run_subtest(_test_mem_bounded_flatlines_allfields, memUsageTS)
test_utils.run_subtest(_test_mem_bounded_flatlines_process, memUsageTS)
# test that memory usage is at an expected level (golden value)
# the tail end of the series should be VERY close to the end since we compare the mean
# of the tail to the golden value
tailStart = int(0.8 * len(memUsageTS.timestamps))
test_utils.run_subtest(_test_mem_bounded_golden_values_fields, activeGpuCount, memUsageTS, tailStart)
test_utils.run_subtest(_test_mem_bounded_golden_values_allfields, activeGpuCount, memUsageTS, tailStart, len(watchedFieldIds))
test_utils.run_subtest(_test_mem_bounded_golden_values_process, memUsageTS, tailStart, len(watchedFieldIds))
# tests for CPU utilization (see functions for descriptions)
test_utils.run_subtest(_test_cpuutil_bounded_flatlines_hostengine, cpuUtilTS)
# test that execution time grows at a linear rate
#test_utils.run_subtest(_test_exectime_bounded_linear_growth, execTimeTS)
# make some pretty graphs to look at for insight or to help debug failures
_generate_metadata_line_charts(memUsageTS, ylabel='bytes', title='Bytes Used')
_generate_metadata_line_charts(execTimeTS, ylabel='usec', title='Execution Time')
_generate_metadata_line_charts(execTimeAvgTS, ylabel='usec', title='Recent Exec Time')
_generate_cpu_line_charts(cpuUtilTS)
barPlotPoints = [(id, execTimeAvgTS.fieldVals[id][-1]) for id in execTimeAvgTS.fieldVals]
_plotFinalValueOrderedBarChart(barPlotPoints,
title='Top 20 Field Recent Exec Time',
ylabel='usec',
filenameBase='test-perf')
def _generate_cpu_line_charts(cpuUtilTS):
if not isReqMatplotlibVersion():
logger.info('Not generating CPU utilization graphs since "matplotlib" is not the required version')
return
if logger.log_dir is None:
logger.info('Not generating CPU utilization graphs since logging is disabled')
return
x = cpuUtilTS.timestamps
totalCpuUtil = [100*data.total for data in cpuUtilTS.cpuInfo]
kernelCpuUtil = [100*data.kernel for data in cpuUtilTS.cpuInfo]
userCpuUtil = [100*data.user for data in cpuUtilTS.cpuInfo]
fig, ax = plt.subplots()
ax.set_title('CPU Utilization')
# hacky way of generating legend colors to match graph colors
polys = ax.stackplot(x, kernelCpuUtil, userCpuUtil)
legendProxies = []
for poly in polys:
legendProxies.append(plt.Rectangle((0,0), 1, 1, fc=poly.get_facecolor()[0]))
ax.legend(legendProxies, ['kernel', 'user'], loc='upper right')
plt.xlabel('seconds since start')
plt.ylabel('% of device CPU resources')
plt.tight_layout(pad=1.0, h_pad=1.0, w_pad=1.0)
plt.gcf().set_size_inches(11, 7)
filename = '%s-%s.png' % ('CPU-Util', test_dcgm_standalone_perf_bounded.__name__)
figName = os.path.join(logger.log_dir, filename)
plt.savefig(figName)
plt.close()
logger.info('%s figure saved in %s' % ('CPU-Util', utils.shorten_path(figName)))
def _test_exectime_bounded_linear_growth(execTimeTS):
'''
Test that when the number of samples that DCGM collects is limited there is linear growth
in the total amount of time used to retrieve that each field.
'''
tolerance = 0.60
for fieldId, series in execTimeTS.fieldVals.items():
tailStart = int(0.4*len(series))
tailLen = len(series) - tailStart
# take a linear regression of the execution timeseries
# if its corr. coeff. is not high (1.0 is highest)
# OR
# if its slope is much different from the actual start -> end slope
# THEN something is wrong.
# calc the lin. regr. slope
# taken from https://en.wikipedia.org/wiki/Simple_linear_regression#Fitting_the_regression_line
x = execTimeTS.timestamps[tailStart:]
y = series[tailStart:]
if y[-1] == 0:
logger.info("Skipping fieldId %d with exec times of 0" % fieldId)
continue
#logger.info("x %s, y %s" % (str(x), str(y)))
rxy = stats.correlation_coefficient(x, y)
sx = stats.standard_deviation(x)
sy = stats.standard_deviation(y)
assert(rxy >= 0.90), (
'execution time for field %s did not have a strong linear correlation. ' % fieldId +
'Its correlation coefficient was %.4f' % rxy)
logger.debug('corr. coeff. for field %s: %s' % (fieldId, rxy))
linRegSlope = rxy * (sy / sx)
slope = (y[-1] - y[0]) / float(x[-1] - x[0])
minSlope = (1-tolerance)*linRegSlope
maxSlope = (1+tolerance)*linRegSlope
assert(minSlope <= slope <= maxSlope), (
'execution time growth for field %s was not linear. ' % fieldId +
'It had an overall slope of %s but the linear regression slope was %s. '
% (slope, linRegSlope) +
'Tolerated min: %s, tolerated max: %s' % (minSlope, maxSlope))
def _assert_flatlines(seriesType, seriesId, series):
if sum(series) == 0:
return
tailStart = int(0.4 * len(series))
seriesTail = series[tailStart:]
# assert that the each point on the tail is no more than 5% away from the mean
# this indicates that the series leveled-off
flatlineVal = stats.mean(point for point in seriesTail)
for point in seriesTail:
dFlatlinePercent = (point - flatlineVal) / flatlineVal
assert (abs(dFlatlinePercent) < 0.05), ('memory usage did not flatline. '
+ 'A point of type "%s" with ID "%s" was %.4f%% away from indicating a flat line. \nTail Points: %s\nPoints: %s'
% (seriesType, seriesId, 100*dFlatlinePercent, str(seriesTail), str(series))
+ 'See the the memory usage plot ".png" file outputted on the terminal above for further details')
def _test_mem_bounded_flatlines_allfields(memUsageTS):
_assert_flatlines('all-fields', '', memUsageTS.allFieldsVals)
def _test_mem_bounded_flatlines_process(memUsageTS):
_assert_flatlines('process', '', memUsageTS.processVals)
def _test_mem_bounded_flatlines_fields(memUsageTS):
for id, series in memUsageTS.fieldVals.items():
_assert_flatlines('field', id, series)
def _test_mem_bounded_flatlines_fieldgroups(memUsageTS):
for id, series in memUsageTS.fieldGroupVals.items():
_assert_flatlines('field-group', id, series)
def helper_field_has_variable_size(fieldId):
'''
Returns True if a field has a variable memory size per record. False if it doesn't.
'''
if fieldId == dcgm_fields_internal.DCGM_FI_DEV_GPU_UTIL_SAMPLES or \
fieldId == dcgm_fields_internal.DCGM_FI_DEV_MEM_COPY_UTIL_SAMPLES or \
fieldId == dcgm_fields_internal.DCGM_FI_DEV_GRAPHICS_PIDS or \
fieldId == dcgm_fields_internal.DCGM_FI_DEV_COMPUTE_PIDS:
return True
fieldMeta = dcgm_fields.DcgmFieldGetById(fieldId)
if fieldMeta.fieldType == dcgm_fields.DCGM_FT_BINARY:
return True
else:
return False
def _test_mem_bounded_golden_values_fields(activeGpuCount, memUsageTS, tailStart):
goldenVal = 1148 # 1 KB plus some swag per field instance (Global, GPU). This is based off of the keyed vector block size and default number of blocks
tolerance = 0.10 # low tolerance, amount of records stored is bounded
for fieldId, series in memUsageTS.fieldVals.items():
seriesTail = series[tailStart:]
# skip fields that are not implemented
if sum(seriesTail) == 0:
continue
#Don't check the size of binary fields since it's arbitrary per fieldId
if helper_field_has_variable_size(fieldId):
logger.info("Skipping variable-sized fieldId %d" % fieldId)
continue
mean = stats.mean(seriesTail)
lowLimit = (1-tolerance)*goldenVal
highLimit = (1+tolerance)*goldenVal*activeGpuCount
assert lowLimit < mean < highLimit, \
'Expected field "%d" memory usage to be between %s and %s but got %s' % \
(fieldId, lowLimit, highLimit, mean) \
+ 'If this new value is expected, change the golden value used for comparison.'
def _test_mem_bounded_golden_values_allfields(activeGpuCount, memUsageTS, tailStart, numFieldIds):
goldenVal = 2000 * numFieldIds * activeGpuCount # 2 KiB per fieldId per GPU. This gives some swag for the binary fields that are larger
tolerance = 0.15 # low tolerance, amount of records stored is bounded
mean = stats.mean(memUsageTS.allFieldsVals[tailStart:])
logger.info("Mean total field value memory usage: %f" % mean)
assert mean < (1+tolerance)*goldenVal, \
'Expected all fields bytes used to be within %.2f%% of %d but it was %d. ' % \
(100*tolerance, goldenVal, mean) \
+ 'If this new value is expected, change the golden value used for comparison.'
def _test_mem_bounded_golden_values_process(memUsageTS, tailStart, numFieldIds):
highWaterMark = 29000000 #Setting a canary in the coal mine value. This comes from the /proc filesystem and can report anywhere from 15 to 28 MiB.
mean = stats.mean(memUsageTS.processVals[tailStart:])
assert (mean < highWaterMark), \
'Expected bytes used of the process to be under %d but it was %.2f. ' % \
(highWaterMark, mean) \
+ 'If this new value is expected, change the high water mark.'
def _test_cpuutil_bounded_flatlines_hostengine(cpuUtilTS):
'''
Test that the CPU utilization flatlines when record storage is bounded
'''
tailStart = int(0.4 * len(cpuUtilTS.timestamps))
tail = [data.total for data in cpuUtilTS.cpuInfo[tailStart:]]
tolerance = 0.20 # points more than this much relative distance from the mean are outliers
relativeOutliersAllowed = 0.02 # 2% outliers allowed
outlierCount = 0
mean = stats.mean(tail)
for cpuUtil in tail:
if not ((1-tolerance)*mean < cpuUtil < (1+tolerance)*mean):
outlierCount += 1
relativeOutliers = outlierCount / float(len(tail))
assert(relativeOutliers < relativeOutliersAllowed), (
'CPU utilization did not stay consistent. It varied for %.2f%% of the time out of %d points '
% (100*relativeOutliers, len(tail))
+ 'but it is only allowed to vary %.2f%% of the time' % (100*relativeOutliersAllowed))
| DCGM-master | testing/python3/tests/test_perf.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pydcgm
import dcgm_field_helpers
import dcgm_structs
import dcgm_structs_internal
import test_utils
import dcgm_errors
def helper_test_dcgm_error_get_priority(handle, gpuIds):
prio = dcgm_errors.dcgmErrorGetPriorityByCode(dcgm_errors.DCGM_FR_VOLATILE_DBE_DETECTED)
assert prio == dcgm_errors.DCGM_ERROR_ISOLATE, "DBE errors should be an isolate priority, but found %d" % prio
prio = dcgm_errors.dcgmErrorGetPriorityByCode(dcgm_errors.DCGM_FR_NVML_API)
assert prio == dcgm_errors.DCGM_ERROR_MONITOR, "DBE errors should be a monitor priority, but found %d" % prio
prio = dcgm_errors.dcgmErrorGetPriorityByCode(-1)
assert prio == dcgm_errors.DCGM_ERROR_UNKNOWN, "Non-existent error should be unknown priority, but found %d" % prio
prio = dcgm_errors.dcgmErrorGetPriorityByCode(dcgm_errors.DCGM_FR_ERROR_SENTINEL)
assert prio == dcgm_errors.DCGM_ERROR_UNKNOWN, "The sentinel error error should be unknown priority, but found %d" % prio
def helper_test_dcgm_error_get_msg(handle, gpuIds):
msg = dcgm_errors.dcgmErrorGetFormatMsgByCode(dcgm_errors.DCGM_FR_NVLINK_CRC_ERROR_THRESHOLD)
assert msg == dcgm_errors.DCGM_FR_NVLINK_CRC_ERROR_THRESHOLD_MSG, \
"Expected '%s' as msg, but found '%s'" % (dcgm_errors.DCGM_FR_NVLINK_CRC_ERROR_THRESHOLD_MSG, msg)
msg = dcgm_errors.dcgmErrorGetFormatMsgByCode(dcgm_errors.DCGM_FR_DEVICE_COUNT_MISMATCH)
assert msg == dcgm_errors.DCGM_FR_DEVICE_COUNT_MISMATCH_MSG, \
"Expected '%s' as msg, but found '%s'" % (dcgm_errors.DCGM_FR_DEVICE_COUNT_MISMATCH_MSG, msg)
msg = dcgm_errors.dcgmErrorGetFormatMsgByCode(dcgm_errors.DCGM_FR_ERROR_SENTINEL)
assert not msg, "The sentinel error error should be empty, but found %s" % msg
msg = dcgm_errors.dcgmErrorGetFormatMsgByCode(-1)
assert not msg, "Non-existent error should be empty, but found %s" % msg
@test_utils.run_with_embedded_host_engine()
@test_utils.run_only_with_live_gpus()
def test_dcgm_error_get_priority_embedded(handle, gpuIds):
helper_test_dcgm_error_get_priority(handle, gpuIds)
@test_utils.run_with_standalone_host_engine(20)
@test_utils.run_with_initialized_client()
@test_utils.run_only_with_live_gpus()
def test_dcgm_error_get_priority_standalone(handle, gpuIds):
helper_test_dcgm_error_get_priority(handle, gpuIds)
@test_utils.run_with_embedded_host_engine()
@test_utils.run_only_with_live_gpus()
def test_dcgm_error_get_msg_embedded(handle, gpuIds):
helper_test_dcgm_error_get_msg(handle, gpuIds)
@test_utils.run_with_standalone_host_engine(20)
@test_utils.run_with_initialized_client()
@test_utils.run_only_with_live_gpus()
def test_dcgm_error_get_msg_standalone(handle, gpuIds):
helper_test_dcgm_error_get_msg(handle, gpuIds)
| DCGM-master | testing/python3/tests/test_errors.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import test_utils
import utils
import pydcgm
import dcgm_structs
import dcgm_fields
import logger
import time
import dcgm_agent
import datetime
import os
import signal
def test_connection_disconnect_error_after_shutdown():
'''
Test that DCGM_ST_BADPARAM is returned when the dcgm API is used after
a call to dcgmShutdown has been made.
'''
handle = pydcgm.DcgmHandle()
group = pydcgm.DcgmGroup(handle, groupName='test-connection')
gpudIds = group.GetGpuIds()
handle.Shutdown()
with test_utils.assert_raises(dcgm_structs.dcgmExceptionClass(dcgm_structs.DCGM_ST_BADPARAM)):
gpuIds = group.GetGpuIds()
@test_utils.run_with_standalone_host_engine(passAppAsArg=True)
@test_utils.run_with_initialized_client()
def test_dcgm_standalone_connection_disconnect_error_after_hostengine_terminate(handle, hostengineApp):
'''
Test that DCGM_ST_CONNECTION_NOT_VALID is returned when the dcgm API is used after
the hostengine process is terminated via `nv-hostengine --term`.
'''
handle = pydcgm.DcgmHandle(handle)
group = pydcgm.DcgmGroup(handle, groupName='test-connection')
gpudIds = group.GetGpuIds()
hostengineApp.terminate()
hostengineApp.validate()
with test_utils.assert_raises(dcgm_structs.dcgmExceptionClass(
dcgm_structs.DCGM_ST_CONNECTION_NOT_VALID)):
gpuIds = group.GetGpuIds()
# When fabric manager is enabled and the hostengine is killed via SIGKILL, the apprunner framework is unable to detect
# that the hostengine has actually stopped. In the app runner's retvalue() method, subprocess.poll() returns None
# which implies that the hostengine is still running. As a temporary WaR, we do not enable the fabric manager for this
# test. (It is possible that there is a race condition once SIGKILL is sent which causes subprocess.poll()
# to return None - I did not get a chance to investigate it further).
@test_utils.run_with_standalone_host_engine(passAppAsArg=True)
@test_utils.run_with_initialized_client()
def test_dcgm_standalone_connection_disconnect_error_after_hostengine_murder(handle, hostengineApp):
'''
Test that DCGM_ST_CONNECTION_NOT_VALID is returned when the dcgm API is used after
the hostengine process is killed via a `SIGKILL` signal.
'''
handle = pydcgm.DcgmHandle(handle)
group = pydcgm.DcgmGroup(handle, groupName='test-connection')
gpudIds = group.GetGpuIds()
pid = hostengineApp.getpid()
os.kill(pid, signal.SIGKILL)
utils.wait_for_pid_to_die(pid)
with test_utils.assert_raises(dcgm_structs.dcgmExceptionClass(
dcgm_structs.DCGM_ST_CONNECTION_NOT_VALID)):
gpuIds = group.GetGpuIds()
@test_utils.run_only_as_root()
def test_dcgm_connection_error_when_no_hostengine_exists():
if not utils.is_bare_metal_system():
test_utils.skip_test("Virtualization Environment not supported")
with test_utils.assert_raises(dcgm_structs.dcgmExceptionClass(
dcgm_structs.DCGM_ST_CONNECTION_NOT_VALID)):
# use a TEST-NET (rfc5737) addr instead of loopback in case a local hostengine is running
handle = pydcgm.DcgmHandle(ipAddress='192.0.2.0', timeoutMs=100)
@test_utils.run_with_standalone_host_engine(20)
@test_utils.run_with_initialized_client()
@test_utils.run_only_with_live_gpus()
def test_dcgm_connection_client_cleanup(handle, gpuIds):
'''
Make sure that resources that were allocated by a client are cleaned up
'''
fieldGroupFieldIds = [dcgm_fields.DCGM_FI_DEV_GPU_TEMP, ]
#Get a 2nd connection which we'll check for cleanup. Use the raw APIs so we can explicitly cleanup
connectParams = dcgm_structs.c_dcgmConnectV2Params_v1()
connectParams.version = dcgm_structs.c_dcgmConnectV2Params_version
connectParams.persistAfterDisconnect = 0
cleanupHandle = dcgm_agent.dcgmConnect_v2('localhost', connectParams)
groupName = 'clientcleanupgroup'
groupId = dcgm_agent.dcgmGroupCreate(cleanupHandle, dcgm_structs.DCGM_GROUP_EMPTY, groupName)
fieldGroupName = 'clientcleanupfieldgroup'
fieldGroupId = dcgm_agent.dcgmFieldGroupCreate(cleanupHandle, fieldGroupFieldIds, fieldGroupName)
#Disconnect our second handle. This should cause the cleanup to occur
dcgm_agent.dcgmDisconnect(cleanupHandle)
time.sleep(1.0) #Allow connection cleanup to occur since it's asynchronous
#Try to retrieve the field group info. This should throw an exception
with test_utils.assert_raises(dcgm_structs.dcgmExceptionClass(dcgm_structs.DCGM_ST_NO_DATA)):
fieldGroupInfo = dcgm_agent.dcgmFieldGroupGetInfo(handle, fieldGroupId)
#Try to retrieve the group info. This should throw an exception
with test_utils.assert_raises(dcgm_structs.dcgmExceptionClass(dcgm_structs.DCGM_ST_NOT_CONFIGURED)):
groupInfo = dcgm_agent.dcgmGroupGetInfo(handle, groupId)
@test_utils.run_with_standalone_host_engine(20)
@test_utils.run_with_initialized_client()
def test_dcgm_connection_versions(handle):
'''
Test that different versions of dcgmConnect_v2 work
'''
localhostStr = "127.0.0.1"
v1Struct = dcgm_structs.c_dcgmConnectV2Params_v1()
v1Struct.version = dcgm_structs.c_dcgmConnectV2Params_version1
#These APIs throw exceptions on error
v1Handle = dcgm_agent.dcgmConnect_v2(localhostStr, v1Struct, dcgm_structs.c_dcgmConnectV2Params_version1)
v2Struct = dcgm_structs.c_dcgmConnectV2Params_v2()
v2Struct.version = dcgm_structs.c_dcgmConnectV2Params_version2
#These APIs throw exceptions on error
v2Handle = dcgm_agent.dcgmConnect_v2(localhostStr, v2Struct, dcgm_structs.c_dcgmConnectV2Params_version2)
#Do a basic request with each handle
gpuIds = dcgm_agent.dcgmGetAllSupportedDevices(v1Handle)
gpuIds2 = dcgm_agent.dcgmGetAllSupportedDevices(v2Handle)
#Clean up the handles
dcgm_agent.dcgmDisconnect(v1Handle)
dcgm_agent.dcgmDisconnect(v2Handle)
def _test_connection_helper(domainSocketName):
#Make sure the library is initialized
dcgm_agent.dcgmInit()
#First, try the raw method of using the dcgm_agent API directly
v2Struct = dcgm_structs.c_dcgmConnectV2Params_v2()
v2Struct.version = dcgm_structs.c_dcgmConnectV2Params_version2
v2Struct.addressIsUnixSocket = 1
v2Handle = dcgm_agent.dcgmConnect_v2(domainSocketName, v2Struct, dcgm_structs.c_dcgmConnectV2Params_version2)
#Use the handle, which will throw an exception on error
gpuIds2 = dcgm_agent.dcgmGetAllSupportedDevices(v2Handle)
dcgm_agent.dcgmDisconnect(v2Handle)
#Now use the DcgmHandle method
dcgmHandle = pydcgm.DcgmHandle(unixSocketPath=domainSocketName)
dcgmSystem = dcgmHandle.GetSystem()
gpuIds = dcgmSystem.discovery.GetAllGpuIds()
#Try to disconnect cleanly from our domain socket
del(dcgmHandle)
dcgmHandle = None
# Add a date-based extension to the path to prevent having trouble when the framework is run as root
# and then again as non-root
domainSocketFilename = '/tmp/dcgm_test%s' % (datetime.datetime.now().strftime("%j%f"))
@test_utils.run_with_standalone_host_engine(20, heArgs=['-d', domainSocketFilename])
def test_dcgm_connection_domain_socket():
'''
Test that DCGM can listen on a unix domain socket, you can connect to it,
and you can do basic queries against it
'''
_test_connection_helper(domainSocketFilename)
defaultSocketFilename = '/tmp/nv-hostengine'
@test_utils.run_only_as_root()
@test_utils.run_with_standalone_host_engine(20, heArgs=['-d'])
def test_dcgm_connection_domain_socket_default():
'''
Test that DCGM can listen on the default unix domain socket, you can connect to it,
and you can do basic queries against it
'''
_test_connection_helper(defaultSocketFilename)
| DCGM-master | testing/python3/tests/test_connection.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pydcgm
import dcgm_structs
import dcgm_structs_internal
import dcgm_agent_internal
import dcgm_fields
from dcgm_structs import dcgmExceptionClass
import test_utils
import time
import os
import sys
# Set up the environment for the DcgmCollectd class before importing
os.environ['DCGM_TESTING_FRAMEWORK'] = 'True'
if 'LD_LIBRARY_PATH' in os.environ:
os.environ['DCGMLIBPATH'] = os.environ['LD_LIBRARY_PATH']
stubspath = os.path.dirname(os.path.realpath(__file__)) + '/stubs/'
if stubspath not in sys.path:
sys.path.insert(0, stubspath)
import collectd_tester_globals
import dcgm_collectd_plugin
class Config:
"""
pseudo collectd Config class.
"""
def __init__(self, key = None, values = None):
self.key = key
self.values = values
self.children = []
@test_utils.run_with_standalone_host_engine(20)
@test_utils.run_with_initialized_client()
@test_utils.run_only_with_live_gpus()
def test_collectd_basic_integration(handle, gpuIds):
"""
Verifies that we can inject specific data and get that same data back
"""
dcgmHandle = pydcgm.DcgmHandle(handle)
dcgmSystem = dcgmHandle.GetSystem()
specificFieldIds = [dcgm_fields.DCGM_FI_DEV_RETIRED_DBE,
dcgm_fields.DCGM_FI_DEV_RETIRED_SBE,
dcgm_fields.DCGM_FI_DEV_POWER_VIOLATION,
dcgm_fields.DCGM_FI_DEV_THERMAL_VIOLATION]
fieldValues = [1,
5,
1000,
9000]
for gpuId in gpuIds:
for i in range(0, len(specificFieldIds)):
field = dcgm_structs_internal.c_dcgmInjectFieldValue_v1()
field.version = dcgm_structs_internal.dcgmInjectFieldValue_version1
field.fieldId = specificFieldIds[i]
field.status = 0
field.fieldType = ord(dcgm_fields.DCGM_FT_INT64)
field.ts = int((time.time()+10) * 1000000.0) # set the injected data into the future
field.value.i64 = fieldValues[i]
ret = dcgm_agent_internal.dcgmInjectFieldValue(handle, gpuId, field)
assert (ret == dcgm_structs.DCGM_ST_OK)
gvars = collectd_tester_globals.gvars
assert 'config' in gvars
gvars['config']()
assert 'init' in gvars
gvars['init']()
assert 'read' in gvars
gvars['read']()
assert 'out' in gvars
outDict = gvars['out']
assert 'shutdown' in gvars
# gvars['shutdown']()
# Verify that we can read back the fields we watch.
for gpuId in gpuIds:
assert str(gpuId) in outDict
gpuDict = outDict[str(gpuId)]
for i in range(0, len(specificFieldIds)):
fieldTag = dcgmSystem.fields.GetFieldById(specificFieldIds[i]).tag
assert fieldTag in gpuDict
assert gpuDict[fieldTag] == fieldValues[i]
def helper_collectd_config(gpuIds, config, verify_fields = True):
"""
Verify config via dcgm plugin. Verify fields parsed, if desired.
"""
gvars = collectd_tester_globals.gvars
assert 'config' in gvars
gvars['config'](config)
assert 'init' in gvars
gvars['init']()
assert 'read' in gvars
gvars['read']()
assert 'out' in gvars
outDict = gvars['out']
assert 'shutdown' in gvars
gvars['shutdown']()
if (verify_fields):
# Verify that we can read back the fields we watch.
fieldTags = [ 'sm_clock', 'memory_clock', 'video_clock' ]
for gpuId in gpuIds:
assert str(gpuId) in outDict
gpuDict = outDict[str(gpuId)]
for fieldTag in fieldTags:
assert fieldTag in gpuDict
# We don't actually verify the value here, just the field tag
# name.
#
# This verifies that we parsed the fields properly, set the
# watches, and actually retrieves values for those fields. The
# value will likely be zero on an initial read, but we can't
# guarantee this. The basic test checks reading back expected
# values.
# assert gpuDict[fieldTag] == fieldValues[i]
@test_utils.run_with_standalone_host_engine(20)
@test_utils.run_with_initialized_client()
@test_utils.run_only_with_live_gpus()
def test_collectd_config_integration(handle, gpuIds):
"""
Verifies that we can parse config and get specified fields back.
"""
config = Config()
config.children = [Config('Interval', ['2']), Config('FieldIds', ['(100,memory_clock):5,video_clock:.1'])]
helper_collectd_config(gpuIds, config)
@test_utils.run_with_standalone_host_engine(20)
@test_utils.run_with_initialized_client()
@test_utils.run_only_with_live_gpus()
def test_collectd_config_bad_alpha_field(handle, gpuIds):
"""
Verifies that we can parse config and get specified fields back, despite a
bad alpha field.
"""
config = Config()
config.children = [Config('Interval', ['2']), Config('FieldIds', ['(100,memory_clock):5,video_clock:.1,foo_clock:1'])]
helper_collectd_config(gpuIds, config)
@test_utils.run_with_standalone_host_engine(20)
@test_utils.run_with_initialized_client()
@test_utils.run_only_with_live_gpus()
def test_collectd_config_bad_numeric_field(handle, gpuIds):
"""
Verifies that we can parse config and get specified fields back despite a
bad numeric field.
"""
config = Config()
config.children = [Config('Interval', ['2']), Config('FieldIds', ['(100,memory_clock):5,video_clock:.1,1010:1'])]
helper_collectd_config(gpuIds, config)
@test_utils.run_with_standalone_host_engine(20)
@test_utils.run_with_initialized_client()
@test_utils.run_only_with_live_gpus()
def test_collectd_config_no_fields(handle, gpuIds):
"""
Verifies that we can parse config if no fields are specified.
"""
config = Config()
config.children = [Config('Interval', ['2'])]
helper_collectd_config(gpuIds, config, False)
| DCGM-master | testing/python3/tests/test_dcgm_collectd_plugin.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# test the metadata API calls for DCGM
import time
from sys import float_info
import dcgm_structs
import dcgm_agent
import dcgm_agent_internal
import dcgm_fields
import pydcgm
import logger
import test_utils
import stats
@test_utils.run_with_standalone_host_engine()
@test_utils.run_with_initialized_client()
@test_utils.run_only_on_architecture('amd64|ppc64le')
def test_dcgm_standalone_metadata_memory_get_hostengine_sane(handle):
"""
Sanity test for API that gets memory usage of the hostengine process
"""
handle = pydcgm.DcgmHandle(handle)
system = pydcgm.DcgmSystem(handle)
bytesUsed = system.introspect.memory.GetForHostengine().bytesUsed
logger.debug('the hostengine process is using %.2f MB' % (bytesUsed / 1024. / 1024.))
assert(1*1024*1024 < bytesUsed < 100*1024*1024), bytesUsed # 1MB to 100MB
def _cpu_load(start_time, duration_sec, x):
while time.time() - start_time < duration_sec:
x*x
def _cpu_load_star(arg1, arg2):
""" Convert arguments from (start_time, duration_sec), x to start_time, duration_sec, x"""
_cpu_load(*arg1, arg2)
@test_utils.run_with_embedded_host_engine()
def test_dcgm_embedded_metadata_cpuutil_get_hostengine_sane(handle):
"""
Sanity test for API that gets CPU Utilization of the hostengine process.
"""
from multiprocessing import cpu_count
handle = pydcgm.DcgmHandle(handle)
system = pydcgm.DcgmSystem(handle)
def generate_cpu_load(duration_sec):
"""
Generate a CPU load for a given duration.
"""
from multiprocessing import Pool
from itertools import repeat
start_time = time.time()
processes = cpu_count()
with Pool(processes) as pool:
pool.starmap(_cpu_load_star, zip(repeat((start_time, duration_sec)), range(processes)))
def get_current_process_cpu_util():
"""Return a tuple representing CPU user-time and system-time and total for the current process
"""
import os
with open('/proc/self/stat', "rb", buffering=0) as f:
data = f.readline()
values = data[data.rfind(b')') + 2:].split()
utime = float(values[11]) / os.sysconf("SC_CLK_TCK")
stime = float(values[12]) / os.sysconf("SC_CLK_TCK")
return utime, stime, utime + stime
start = time.time()
start_cpu_util = get_current_process_cpu_util()
system.introspect.cpuUtil.GetForHostengine()
generate_cpu_load(1)
stop = time.time()
stop_cpu_util = get_current_process_cpu_util()
cpuUtil = system.introspect.cpuUtil.GetForHostengine(False)
#diff_utime = stop_cpu_util[0] - start_cpu_util[0]
#diff_stime = stop_cpu_util[1] - start_cpu_util[1]
diff_total = stop_cpu_util[2] - start_cpu_util[2]
diff_time = stop - start
overall_cpu_util = diff_total / diff_time
logger.debug("DCGM CPU Util: %f" % (cpuUtil.total * cpu_count()))
logger.debug('Stats CPU Util: %f' % overall_cpu_util)
assert abs(overall_cpu_util - (cpu_count() * cpuUtil.total)) < 0.05, "CPU Utilization was not within 5% of expected value"
# test that user and kernel add to total (with rough float accuracy)
assert abs(cpuUtil.total - (cpuUtil.user + cpuUtil.kernel)) <= 4*float_info.epsilon, \
'CPU kernel and user utilization did not add up to total. Kernel: %f, User: %f, Total: %f' \
% (cpuUtil.kernel, cpuUtil.user, cpuUtil.total)
| DCGM-master | testing/python3/tests/test_metadata.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| DCGM-master | testing/python3/tests/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import dcgm_structs
import dcgm_agent_internal
import dcgm_agent
import logger
import utils
import test_utils
import dcgm_fields
import apps
import dcgmvalue
from apps.app_runner import AppRunner
import string
import time
from ctypes import *
import sys
import os
import subprocess
from subprocess import PIPE
import pprint
from sys import stdout
paths = {
"Linux_32bit": "./apps/x86/",
"Linux_64bit": "./apps/amd64/",
"Linux_ppc64le": "./apps/ppc64le/",
"Linux_aarch64": "./apps/aarch64/",
"Windows_64bit": "./apps/amd64/"
}
sdk_path = paths[utils.platform_identifier]
sdk_sample_scripts_path = "./sdk_samples/scripts"
# the sample scripts can potentially take a long time to run since they perform
# a health check
SAMPLE_SCRIPT_TIMEOUT = 120.0
def initialize_sdk(fileName):
sdk_executable = sdk_path + fileName
if utils.is_linux():
if os.path.exists(sdk_executable):
# On linux, for binaries inside the package (not just commands in the path) test that they have +x
# e.g. if package is extracted on windows and copied to Linux, the +x privileges will be lost
assert os.access(sdk_executable, os.X_OK), \
"SDK binary %s is not executable! Make sure that the testing archive has been correctly extracted." \
% sdk_executable
# -f -> fast mode to shorten the test duration
return subprocess.Popen( [sdk_executable, '-f'], stdout=PIPE, stdin=PIPE)
@test_utils.run_with_embedded_host_engine()
@test_utils.run_only_with_live_gpus()
@test_utils.run_only_as_root()
@test_utils.run_only_if_mig_is_disabled() # We cannot set the compute mode when MIG is enabled
@test_utils.for_all_same_sku_gpus()
def test_sdk_configuration_sample_embedded(handle, gpuIds):
"""
Test SDK configuration sample
"""
sdk_subprocess = initialize_sdk("configuration_sample")
sdk_stdout = sdk_subprocess.communicate(input=b'0')[0] #input 0 through stdin (embeddded)
ss = ""
for line in sdk_stdout.decode():
ss += line
assert "error" not in ss.lower(), "Error detected in SDK sample. Output: %s" % ss
assert sdk_subprocess.returncode == dcgm_structs.DCGM_ST_OK, "SDK sample encountered an error. Return code: %d" % sdk_subprocess.returncode
@test_utils.run_only_if_mig_is_disabled() # We cannot set the compute mode when MIG is enabled
@test_utils.run_with_standalone_host_engine(20)
@test_utils.run_with_initialized_client()
@test_utils.run_only_with_live_gpus()
@test_utils.run_only_as_root()
@test_utils.for_all_same_sku_gpus()
def test_sdk_configuration_sample_standalone(handle, gpuIds):
"""
Test SDK configuration sample
"""
sdk_subprocess = initialize_sdk("configuration_sample")
sdk_stdout = sdk_subprocess.communicate(input=b'1\n127.0.0.1')[0]
ss = ""
for line in sdk_stdout.decode():
ss += line
assert "error" not in ss.lower(), "Error detected in SDK sample. Output: %s" % ss
assert sdk_subprocess.returncode == dcgm_structs.DCGM_ST_OK, "SDK sample encountered an error. Return code: %d" % sdk_subprocess.returncode
@test_utils.run_with_embedded_host_engine()
@test_utils.run_only_with_live_gpus()
def test_sdk_health_sample_embedded(handle, gpuIds):
"""
Test SDK health sample
"""
test_utils.skip_unhealthy_mem(handle, gpuIds)
gpuGroups = test_utils.group_gpu_ids_by_sku(handle, gpuIds)
if len(gpuGroups) > 1:
test_utils.skip_test("Test only works for gpus with same sku")
sdk_subprocess = initialize_sdk("health_sample")
sdk_stdout = sdk_subprocess.communicate(input=b'0')[0]
ss = ""
for line in sdk_stdout.decode():
ss += line
assert "error" not in ss.lower(), "Error detected in SDK sample. Output: %s" % ss
assert sdk_subprocess.returncode == dcgm_structs.DCGM_ST_OK or sdk_subprocess.returncode == dcgm_structs.DCGM_ST_NO_DATA, "SDK sample encountered an error. Return code: %d. stdout: %s" % (sdk_subprocess.returncode, sdk_stdout.decode('utf-8'))
@test_utils.run_with_standalone_host_engine(20)
@test_utils.run_with_initialized_client()
@test_utils.run_only_with_live_gpus()
def test_sdk_health_sample_standalone(handle, gpuIds):
"""
Test SDK health sample
"""
test_utils.skip_unhealthy_mem(handle, gpuIds)
gpuGroups = test_utils.group_gpu_ids_by_sku(handle, gpuIds)
if len(gpuGroups) > 1:
test_utils.skip_test("Test only works for gpus with same sku")
sdk_subprocess = initialize_sdk("health_sample")
sdk_stdout = sdk_subprocess.communicate(input=b'1\n127.0.0.1')[0]
ss = ""
for line in sdk_stdout.decode():
ss += line
assert "error" not in ss.lower(), "Error detected in SDK sample. Output: %s" % ss
assert sdk_subprocess.returncode == dcgm_structs.DCGM_ST_OK or sdk_subprocess.returncode == dcgm_structs.DCGM_ST_NO_DATA, "SDK sample encountered an error. Return code: %d" % sdk_subprocess.returncode
@test_utils.run_with_embedded_host_engine()
@test_utils.run_only_with_live_gpus()
@test_utils.for_all_same_sku_gpus()
def test_sdk_policy_sample_embedded(handle, gpuIds):
"""
Test SDK policy sample
"""
sdk_subprocess = initialize_sdk("policy_sample")
sdk_stdout = sdk_subprocess.communicate(input=b'0')[0]
ss = ""
for line in sdk_stdout.decode():
ss += line
assert "error" not in ss.lower(), "Error detected in SDK sample. Output: %s" % ss
assert sdk_subprocess.returncode == dcgm_structs.DCGM_ST_OK, "SDK sample encountered an error. Return code: %d" % sdk_subprocess.returncode
@test_utils.run_with_standalone_host_engine(20)
@test_utils.run_with_initialized_client()
@test_utils.run_only_with_live_gpus()
@test_utils.for_all_same_sku_gpus()
def test_sdk_policy_sample_standalone(handle, gpuIds):
"""
Test SDK policy sample
"""
sdk_subprocess = initialize_sdk("policy_sample")
sdk_stdout = sdk_subprocess.communicate(input=b'1\n127.0.0.1')[0]
ss = ""
for line in sdk_stdout.decode():
ss += line
assert "error" not in ss.lower(), "Error detected in SDK sample. Output: %s" % ss
assert sdk_subprocess.returncode == dcgm_structs.DCGM_ST_OK, "SDK sample encountered an error. Return code: %d" % sdk_subprocess.returncode
@test_utils.run_with_embedded_host_engine()
@test_utils.run_only_with_live_gpus()
@test_utils.for_all_same_sku_gpus()
def test_sdk_field_value_sample_embedded(handle, gpuIds):
"""
Test SDK field value sample
"""
sdk_subprocess = initialize_sdk("field_value_sample")
sdk_stdout = sdk_subprocess.communicate(input=b'0')[0]
ss = ""
for line in sdk_stdout.decode():
ss += line
assert "error" not in ss.lower(), "Error detected in SDK sample. Output: %s" % ss
assert sdk_subprocess.returncode == dcgm_structs.DCGM_ST_OK, "SDK sample encountered an error. Return code: %d. Output %s" % (sdk_subprocess.returncode, ss)
@test_utils.run_with_standalone_host_engine(20)
@test_utils.run_with_initialized_client()
@test_utils.run_only_with_live_gpus()
@test_utils.for_all_same_sku_gpus()
def test_sdk_field_value_sample_standalone(handle, gpuIds):
"""
Test SDK policy sample
"""
sdk_subprocess = initialize_sdk("field_value_sample")
sdk_stdout = sdk_subprocess.communicate(input=b'1\n127.0.0.1')[0]
ss = ""
for line in sdk_stdout.decode():
ss += line
assert "error" not in ss.lower(), "Error detected in SDK sample. Output: %s" % ss
assert sdk_subprocess.returncode == dcgm_structs.DCGM_ST_OK, "SDK sample encountered an error. Return code: %d. Output: %s" % (sdk_subprocess.returncode, ss)
@test_utils.run_with_standalone_host_engine(timeout=SAMPLE_SCRIPT_TIMEOUT)
@test_utils.run_only_as_root()
def test_sdk_example_script_smoke_standalone_auto():
"""
Smoke test ensuring that the example script for using dcgm does not fail
for a standalone hostengine with auto operation mode
"""
env = {'PYTHONPATH': ':'.join(sys.path)}
script = os.path.join(sdk_sample_scripts_path, 'dcgm_example.py')
example = AppRunner(sys.executable, [script, '--opmode=auto', '--type=standalone'], env=env)
example.run(timeout=SAMPLE_SCRIPT_TIMEOUT)
@test_utils.run_only_as_root()
def test_sdk_example_script_smoke_embedded_auto():
"""
Smoke test ensuring that the example script for using dcgm does not fail
for an embedded hostengine with auto operation mode
"""
env = {'PYTHONPATH': ':'.join(sys.path)}
script = os.path.join(sdk_sample_scripts_path, 'dcgm_example.py')
example = AppRunner(sys.executable, [script, '--opmode=auto', '--type=embedded'], env=env)
example.run(timeout=SAMPLE_SCRIPT_TIMEOUT)
@test_utils.run_only_as_root()
def test_sdk_example_script_smoke_embedded_manual():
"""
Smoke test ensuring that the example script for using dcgm does not fail
for an embedded hostengine with manual operation mode
"""
env = {'PYTHONPATH': ':'.join(sys.path)}
script = os.path.join(sdk_sample_scripts_path, 'dcgm_example.py')
example = AppRunner(sys.executable, [script, '--opmode=manual', '--type=embedded'], env=env)
example.run(timeout=SAMPLE_SCRIPT_TIMEOUT)
"""
@test_utils.run_with_embedded_host_engine()
@test_utils.run_only_with_live_gpus()
@test_utils.for_all_same_sku_gpus()
def test_sdk_process_stats_sample_embedded(handle, gpuIds):
""
Test SDK process stats sample
""
devices = dcgm_agent.dcgmGetAllDevices(handle)
for gpu in devices:
device = dcgm_agent.dcgmGetDeviceAttributes(handle, gpu)
if device is None:
continue
else:
break
if device == None:
test_utils.skip_test("No GPU to run on")
dcgm_agent.dcgmWatchFields(handle, dcgm_structs.DCGM_GROUP_ALL_GPUS, dcgm_fields.DCGM_FC_PROCESSINFO, 1000000, 3600, 0)
ctx = apps.CudaCtxCreateAdvancedApp(
[
"--ctxCreate", device.identifiers.pciBusId,
"--busyGpu", device.identifiers.pciBusId, "5000", # keep GPU busy (100% utilization) for 15s
"--ctxDestroy", device.identifiers.pciBusId,
])
ctx.start(10000)
cuda_pid = ctx.getpid()
ctx.wait()
time.sleep(1.0)
ctx.validate()
dcgm_agent.dcgmUpdateAllFields(handle, 1) #force update
sdk_subprocess = initialize_sdk("process_stats_sample")
sdk_stdout = sdk_subprocess.communicate(input= b"0\n" + str(cuda_pid) )[0]
ss = ""
for line in sdk_stdout.decode():
ss += line
#assert "error" not in ss.lower(), "Error detected in SDK sample. Output: %s" % ss
# Right now it returns no-data since we are passing in a random PID
assert sdk_subprocess.returncode == dcgm_structs.DCGM_ST_OK or sdk_subprocess.returncode == dcgm_structs.DCGM_ST_NO_DATA, "SDK sample encountered an error. Return code: %d" % sdk_subprocess.returncode
@test_utils.run_with_standalone_host_engine(20)
@test_utils.run_with_initialized_client()
@test_utils.run_only_with_live_gpus()
@test_utils.for_all_same_sku_gpus()
def test_sdk_process_stats_sample_standalone(handle, gpuIds):
""
Test SDK process stats sample
""
devices = dcgm_agent.dcgmGetAllDevices(handle)
for gpu in devices:
device = dcgm_agent.dcgmGetDeviceAttributes(handle, gpu)
if device is None:
continue
else:
break
if device == None:
test_utils.skip_test("No GPU to run on")
dcgm_agent.dcgmWatchFields(handle, dcgm_structs.DCGM_GROUP_ALL_GPUS, dcgm_fields.DCGM_FC_PROCESSINFO, 1000000, 3600, 0)
ctx = apps.CudaCtxCreateAdvancedApp(
[
"--ctxCreate", device.identifiers.pciBusId,
"--busyGpu", device.identifiers.pciBusId, "5000", # keep GPU busy (100% utilization) for 5s
"--ctxDestroy", device.identifiers.pciBusId,
])
ctx.start(10000)
cuda_pid = ctx.getpid()
ctx.wait()
time.sleep(1.0)
ctx.validate()
dcgm_agent.dcgmUpdateAllFields(handle, 1) #force update
time.sleep(1.0)
sdk_subprocess = initialize_sdk("process_stats_sample")
sdk_stdout = sdk_subprocess.communicate(input= b"1\n127.0.0.1\n" + str(cuda_pid) )[0]
ss = ""
for line in sdk_stdout.decode():
ss += line
#assert "error" not in ss.lower(), "Error detected in SDK sample. Output: %s" % ss
# Right now it returns no-data since we are passing in a random PID
assert sdk_subprocess.returncode == dcgm_structs.DCGM_ST_OK or sdk_subprocess.returncode == dcgm_structs.DCGM_ST_NO_DATA, "SDK sample encountered an error. Return code: %d" % sdk_subprocess.returncode
#ctx.wait()
#ctx.validate()
"""
| DCGM-master | testing/python3/tests/test_sdk.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
# APIs that use a versioned structure from dcgm_agent.py:
# dcgmConfigGet
# dcgmConfigSet
# dcgmConnect_v2
# dcgmFieldGroupGetAll - Bug found
# dcgmFieldGroupGetInfo
# dcgmGetDeviceAttributes
# dcgmGetPidInfo - Bug found
# dcgmGroupGetInfo
# dcgmHealthCheck
# dcgmIntrospectGetFieldsExecTime - Bug found
# vtDcgmIntrospectGetFieldsMemoryUsage - Bug found
# dcgmIntrospectGetHostengineCpuUtilization
# dcgmIntrospectGetHostengineMemoryUsage
# dcgmJobGetStats
# dcgmPolicyGet
# dcgmRunDiagnostic
# APIs that use a versioned structure from dcgm_agent_internal:
# vtDcgmGetVgpuDeviceAttributes
# dcgmGetVgpuInstanceAttributes
# dcgmIntrospectGetFieldExecTime
# dcgmIntrospectGetFieldMemoryUsage
# dcgmVgpuConfigGet
# dcgmVgpuConfigSet
##
# If a new API that uses a versioned structure is added, the corresponding test should be added in this file
import apps
import logger
import test_utils
import dcgm_agent
import dcgm_agent_internal
import pydcgm
import dcgm_structs
import dcgm_fields
from ctypes import *
from dcgm_structs import dcgmExceptionClass
# Provides access to functions from dcgm_agent_internal
dcgmFP = dcgm_structs._dcgmGetFunctionPointer
def vtDcgmConnect_v2(ip_address, connectParams, versionTest):
connectParams = dcgm_structs.c_dcgmConnectV2Params_v1()
connectParams.version = dcgm_structs.make_dcgm_version(connectParams, 1)
logger.debug("Structure version: %d" % connectParams.version)
connectParams.version = versionTest
dcgm_handle = c_void_p()
fn = dcgmFP("dcgmConnect_v2")
ret = fn(ip_address, byref(connectParams), byref(dcgm_handle))
dcgm_structs._dcgmCheckReturn(ret)
return dcgm_handle
@test_utils.run_with_standalone_host_engine(20)
@test_utils.run_with_initialized_client()
@test_utils.run_only_with_live_gpus()
def test_dcgm_connect_validate(handle, gpuIds):
"""
Validates structure version
"""
fieldGroupFieldIds = [dcgm_fields.DCGM_FI_DEV_GPU_TEMP, ]
connectParams = dcgm_structs.c_dcgmConnectV2Params_v1()
connectParams.persistAfterDisconnect = 0
with test_utils.assert_raises(dcgmExceptionClass(dcgm_structs.DCGM_ST_VER_MISMATCH)):
versionTest = 0 #invalid version
ret = vtDcgmConnect_v2('localhost', connectParams, versionTest)
with test_utils.assert_raises(dcgmExceptionClass(dcgm_structs.DCGM_ST_VER_MISMATCH)):
versionTest = 50 #random number version
ret = vtDcgmConnect_v2('localhost', connectParams, versionTest)
def vtDcgmGetDeviceAttributes(dcgm_handle, gpuId, versionTest):
fn = dcgmFP("dcgmGetDeviceAttributes")
device_values = dcgm_structs.c_dcgmDeviceAttributes_deprecated_v1()
device_values.version = dcgm_structs.make_dcgm_version(device_values, 1)
logger.debug("Structure version: %d" % device_values.version)
device_values.version = versionTest
ret = fn(dcgm_handle, c_int(gpuId), byref(device_values))
dcgm_structs._dcgmCheckReturn(ret)
return device_values
@test_utils.run_with_embedded_host_engine()
@test_utils.run_only_with_live_gpus()
@test_utils.run_only_as_root()
def test_dcgm_get_device_attributes_validate(handle, gpuIds):
"""
Validates structure version
"""
handleObj = pydcgm.DcgmHandle(handle=handle)
systemObj = handleObj.GetSystem()
groupObj = systemObj.GetEmptyGroup("test1")
## Add first GPU to the group
groupObj.AddGpu(gpuIds[0])
gpuIds = groupObj.GetGpuIds() #Only reference GPUs we are testing against
#Make sure the device attributes and config fields have updated
systemObj.UpdateAllFields(1)
with test_utils.assert_raises(dcgmExceptionClass(dcgm_structs.DCGM_ST_VER_MISMATCH)):
versionTest = 0 #invalid version
ret = vtDcgmGetDeviceAttributes(handle, gpuIds[0], versionTest)
with test_utils.assert_raises(dcgmExceptionClass(dcgm_structs.DCGM_ST_VER_MISMATCH)):
versionTest = 50 #random invalid version
ret = vtDcgmGetDeviceAttributes(handle, gpuIds[0], versionTest)
def vtDcgmGroupGetInfo(dcgm_handle, group_id, versionTest):
fn = dcgmFP("dcgmGroupGetInfo")
device_values = dcgm_structs.c_dcgmGroupInfo_v2()
device_values.version = dcgm_structs.make_dcgm_version(device_values, 2)
logger.debug("Structure version: %d" % device_values.version)
device_values.version = versionTest
ret = fn(dcgm_handle, group_id, byref(device_values))
dcgm_structs._dcgmCheckReturn(ret)
return device_values
@test_utils.run_with_embedded_host_engine()
def test_dcgm_group_get_info_validate(handle):
"""
Validates structure version
"""
handleObj = pydcgm.DcgmHandle(handle=handle)
systemObj = handleObj.GetSystem()
groupId = dcgm_agent.dcgmGroupCreate(handle, dcgm_structs.DCGM_GROUP_DEFAULT, "test1")
with test_utils.assert_raises(dcgmExceptionClass(dcgm_structs.DCGM_ST_VER_MISMATCH)):
versionTest = 0 #invalid version
ret = vtDcgmGroupGetInfo(handle, groupId, versionTest)
with test_utils.assert_raises(dcgmExceptionClass(dcgm_structs.DCGM_ST_VER_MISMATCH)):
versionTest = 50 #random number version
ret = vtDcgmGroupGetInfo(handle, groupId, versionTest)
def vtDcgmFieldGroupGetInfo(dcgm_handle, fieldGroupId, versionTest):
c_fieldGroupInfo = dcgm_structs.c_dcgmFieldGroupInfo_v1()
c_fieldGroupInfo.version = dcgm_structs.make_dcgm_version(c_fieldGroupInfo, 1)
logger.debug("Structure version: %d" % c_fieldGroupInfo.version)
c_fieldGroupInfo.version = versionTest
c_fieldGroupInfo.fieldGroupId = fieldGroupId
fn = dcgmFP("dcgmFieldGroupGetInfo")
ret = fn(dcgm_handle, byref(c_fieldGroupInfo))
dcgm_structs._dcgmCheckReturn(ret)
return c_fieldGroupInfo
@test_utils.run_with_embedded_host_engine()
def test_dcgm_field_group_get_info_validate(handle):
"""
Validates structure version
"""
fieldIds = [dcgm_fields.DCGM_FI_DRIVER_VERSION, dcgm_fields.DCGM_FI_DEV_NAME, dcgm_fields.DCGM_FI_DEV_BRAND]
handle = pydcgm.DcgmHandle(handle)
fieldGroup = pydcgm.DcgmFieldGroup(handle, "mygroup", fieldIds)
with test_utils.assert_raises(dcgmExceptionClass(dcgm_structs.DCGM_ST_VER_MISMATCH)):
versionTest = 0 #invalid version
ret = vtDcgmFieldGroupGetInfo(handle.handle, fieldGroup.fieldGroupId, versionTest)
with test_utils.assert_raises(dcgmExceptionClass(dcgm_structs.DCGM_ST_VER_MISMATCH)):
versionTest = 50 #random number version
ret = vtDcgmFieldGroupGetInfo(handle.handle, fieldGroup.fieldGroupId, versionTest)
def vtDcgmFieldGroupGetAll(dcgm_handle, versionTest):
c_allGroupInfo = dcgm_structs.c_dcgmAllFieldGroup_v1()
c_allGroupInfo.version = dcgm_structs.make_dcgm_version(c_allGroupInfo, 1)
logger.debug("Structure version: %d" % c_allGroupInfo.version)
c_allGroupInfo.version = versionTest
fn = dcgmFP("dcgmFieldGroupGetAll")
ret = fn(dcgm_handle, byref(c_allGroupInfo))
dcgm_structs._dcgmCheckReturn(ret)
return c_allGroupInfo
@test_utils.run_with_embedded_host_engine()
def test_dcgm_field_group_get_all_validate(handle):
"""
Validates structure version
"""
handleObj = pydcgm.DcgmHandle(handle=handle)
systemObj = handleObj.GetSystem()
gpuIdList = systemObj.discovery.GetAllGpuIds()
assert len(gpuIdList) >= 0, "Not able to find devices on the node for embedded case"
with test_utils.assert_raises(dcgmExceptionClass(dcgm_structs.DCGM_ST_VER_MISMATCH)):
versionTest = 0 #invalid version
vtDcgmFieldGroupGetAll(handle, versionTest)
with test_utils.assert_raises(dcgmExceptionClass(dcgm_structs.DCGM_ST_VER_MISMATCH)):
versionTest = 50 #random number version
vtDcgmFieldGroupGetAll(handle, versionTest)
def vtDcgmConfigSet(dcgm_handle, group_id, configToSet, status_handle, versionTest):
fn = dcgmFP("dcgmConfigSet")
config_values = dcgm_structs.c_dcgmDeviceConfig_v1()
config_values.version = dcgm_structs.make_dcgm_version(config_values, 1)
logger.debug("Structure version: %d" % config_values.version)
configToSet.version = versionTest
ret = fn(dcgm_handle, group_id, byref(configToSet), status_handle)
dcgm_structs._dcgmCheckReturn(ret)
return ret
@test_utils.run_with_embedded_host_engine()
def test_dcgm_config_set_validate(handle):
"""
Validates structure version
"""
groupId = dcgm_agent.dcgmGroupCreate(handle, dcgm_structs.DCGM_GROUP_DEFAULT, "test1")
status_handle = dcgm_agent.dcgmStatusCreate()
config_values = dcgm_structs.c_dcgmDeviceConfig_v1()
with test_utils.assert_raises(dcgmExceptionClass(dcgm_structs.DCGM_ST_VER_MISMATCH)):
versionTest = 0 #invalid version
ret = vtDcgmConfigSet(handle,groupId,config_values, status_handle, versionTest)
with test_utils.assert_raises(dcgmExceptionClass(dcgm_structs.DCGM_ST_VER_MISMATCH)):
versionTest = 50 #random invalid version
ret = vtDcgmConfigSet(handle,groupId,config_values, status_handle, versionTest)
def vtDcgmConfigGet(dcgm_handle, group_id, reqCfgType, count, status_handle, versionTest):
fn = dcgmFP("dcgmConfigGet")
config_values_array = count * dcgm_structs.c_dcgmDeviceConfig_v1
c_config_values = config_values_array()
for index in range(0, count):
c_config_values[index].version = versionTest
ret = fn(dcgm_handle, group_id, reqCfgType, count, c_config_values, status_handle)
dcgm_structs._dcgmCheckReturn(ret)
return list(c_config_values[0:count])
@test_utils.run_with_embedded_host_engine()
def test_dcgm_config_get_validate(handle):
"""
Validates structure version
"""
handleObj = pydcgm.DcgmHandle(handle=handle)
systemObj = handleObj.GetSystem()
gpuIdList = systemObj.discovery.GetAllGpuIds()
assert len(gpuIdList) >= 0, "Not able to find devices on the node for embedded case"
groupId = dcgm_agent.dcgmGroupCreate(handle, dcgm_structs.DCGM_GROUP_DEFAULT, "test1")
groupInfo = dcgm_agent.dcgmGroupGetInfo(handle, groupId)
status_handle = dcgm_agent.dcgmStatusCreate()
with test_utils.assert_raises(dcgmExceptionClass(dcgm_structs.DCGM_ST_VER_MISMATCH)):
versionTest = 0 #invalid version
ret = vtDcgmConfigGet(handle, groupId, dcgm_structs.DCGM_CONFIG_CURRENT_STATE, groupInfo.count, status_handle, versionTest)
with test_utils.assert_raises(dcgmExceptionClass(dcgm_structs.DCGM_ST_VER_MISMATCH)):
versionTest = 50 #random number version
ret = vtDcgmConfigGet(handle, groupId, dcgm_structs.DCGM_CONFIG_CURRENT_STATE, groupInfo.count, status_handle, versionTest)
def vtDcgmPolicyGet(dcgm_handle, group_id, count, status_handle, versionTest):
fn = dcgmFP("dcgmPolicyGet")
policy_array = count * dcgm_structs.c_dcgmPolicy_v1
c_policy_values = policy_array()
policy = dcgm_structs.c_dcgmPolicy_v1()
policy.version = dcgm_structs.make_dcgm_version(policy, 1)
logger.debug("Structure version: %d" % policy.version)
policyCallback = dcgm_structs.c_dcgmPolicyCallbackResponse_v1()
policyCallback.version = dcgm_structs.make_dcgm_version(policyCallback, 1)
logger.debug("Structure version: %d" % policyCallback.version)
for index in range(0, count):
c_policy_values[index].version = versionTest
ret = fn(dcgm_handle, group_id, count, c_policy_values, status_handle)
dcgm_structs._dcgmCheckReturn(ret)
return c_policy_values[0:count]
@test_utils.run_with_embedded_host_engine()
def test_dcgm_policy_get_validate(handle):
"""
Validates structure version
"""
handleObj = pydcgm.DcgmHandle(handle=handle)
systemObj = handleObj.GetSystem()
gpuIdList = systemObj.discovery.GetAllGpuIds()
assert len(gpuIdList) >= 0, "Not able to find devices on the node for embedded case"
groupId = dcgm_agent.dcgmGroupCreate(handle, dcgm_structs.DCGM_GROUP_DEFAULT, "test1")
status_handle = dcgm_agent.dcgmStatusCreate()
count = 1
diagLevel = dcgm_structs.DCGM_DIAG_LVL_SHORT
with test_utils.assert_raises(dcgmExceptionClass(dcgm_structs.DCGM_ST_VER_MISMATCH)):
versionTest = 0 #invalid version
ret = vtDcgmPolicyGet(handle, groupId, count, status_handle, versionTest)
with test_utils.assert_raises(dcgmExceptionClass(dcgm_structs.DCGM_ST_VER_MISMATCH)):
versionTest = 50 #random number version
ret = vtDcgmPolicyGet(handle, groupId, count, status_handle, versionTest)
def vtDcgmHealthCheck(dcgm_handle, groupId, versionTest):
c_results = dcgm_structs.c_dcgmHealthResponse_v4()
c_results.version = dcgm_structs.make_dcgm_version(c_results, 4)
logger.debug("Structure version: %d" % c_results.version)
c_results.version = versionTest
fn = dcgmFP("dcgmHealthCheck")
ret = fn(dcgm_handle, groupId, byref(c_results))
dcgm_structs._dcgmCheckReturn(ret)
return c_results
@test_utils.run_with_embedded_host_engine()
def test_dcgm_health_check_validate(handle):
"""
Validates structure version
"""
handleObj = pydcgm.DcgmHandle(handle=handle)
systemObj = handleObj.GetSystem()
groupId = dcgm_agent.dcgmGroupCreate(handle, dcgm_structs.DCGM_GROUP_DEFAULT, "test1")
with test_utils.assert_raises(dcgmExceptionClass(dcgm_structs.DCGM_ST_VER_MISMATCH)):
versionTest = 0 #invalid version
ret = vtDcgmHealthCheck(handle, groupId, versionTest)
with test_utils.assert_raises(dcgmExceptionClass(dcgm_structs.DCGM_ST_VER_MISMATCH)):
versionTest = 50 #random number version
ret = vtDcgmHealthCheck(handle, groupId, versionTest)
def vtDcgmActionValidate_v2(dcgm_handle, runDiagInfo, versionTest):
response = dcgm_structs.c_dcgmDiagResponse_v8()
response.version = dcgm_structs.make_dcgm_version(response, 7)
logger.debug("Structure version: %d" % response.version)
runDiagInfo = dcgm_structs.c_dcgmRunDiag_v7()
runDiagInfo.version = dcgm_structs.dcgmRunDiag_version7
logger.debug("Structure version: %d" % runDiagInfo.version)
runDiagInfo.version = versionTest
response.version = versionTest
fn = dcgmFP("dcgmActionValidate_v2")
ret = fn(dcgm_handle, byref(runDiagInfo), byref(response))
dcgm_structs._dcgmCheckReturn(ret)
return response
def vtDcgmActionValidate(dcgm_handle, group_id, validate, versionTest):
response = dcgm_structs.c_dcgmDiagResponse_v8()
response.version = versionTest
# Put the group_id and validate into a dcgmRunDiag struct
runDiagInfo = dcgm_structs.c_dcgmRunDiag_v7()
runDiagInfo.version = versionTest
runDiagInfo.validate = validate
runDiagInfo.groupId = group_id
fn = dcgmFP("dcgmActionValidate_v2")
ret = fn(dcgm_handle, byref(runDiagInfo), byref(response))
dcgm_structs._dcgmCheckReturn(ret)
return response
def vtDcgmRunDiagnostic(dcgm_handle, group_id, diagLevel, versionTest):
response = dcgm_structs.c_dcgmDiagResponse_v8()
response.version = versionTest
fn = dcgmFP("dcgmRunDiagnostic")
ret = fn(dcgm_handle, group_id, diagLevel, byref(response))
dcgm_structs._dcgmCheckReturn(ret)
return response
@test_utils.run_with_embedded_host_engine()
@test_utils.run_only_with_live_gpus()
@test_utils.for_all_same_sku_gpus()
@test_utils.run_only_as_root()
@test_utils.run_with_max_power_limit_set()
def test_dcgm_run_diagnostic_validate(handle, gpuIds):
"""
Validates structure version
"""
handleObj = pydcgm.DcgmHandle(handle=handle)
systemObj = handleObj.GetSystem()
gpuIdList = systemObj.discovery.GetAllGpuIds()
assert len(gpuIdList) >= 0, "Not able to find devices on the node for embedded case"
groupId = dcgm_agent.dcgmGroupCreate(handle, dcgm_structs.DCGM_GROUP_DEFAULT, "test1")
groupInfo = dcgm_agent.dcgmGroupGetInfo(handle, groupId)
status_handle = dcgm_agent.dcgmStatusCreate()
diagLevel = dcgm_structs.DCGM_DIAG_LVL_SHORT
gpuIdStr = ""
for i, gpuId in enumerate(gpuIds):
if i > 0:
gpuIdStr += ","
gpuIdStr += str(gpuId)
drd = dcgm_structs.c_dcgmRunDiag_t()
drd.version = dcgm_structs.dcgmRunDiag_version
drd.validate = dcgm_structs.DCGM_POLICY_VALID_SV_SHORT
drd.groupId = groupId
drd.gpuList = gpuIdStr
with test_utils.assert_raises(dcgmExceptionClass(dcgm_structs.DCGM_ST_VER_MISMATCH)):
versionTest = 0 #invalid version
ret = vtDcgmActionValidate_v2(handle, drd, versionTest)
with test_utils.assert_raises(dcgmExceptionClass(dcgm_structs.DCGM_ST_VER_MISMATCH)):
versionTest = 50 #random number version
ret = vtDcgmActionValidate_v2(handle, drd, versionTest)
with test_utils.assert_raises(dcgmExceptionClass(dcgm_structs.DCGM_ST_VER_MISMATCH)):
versionTest = 0 #invalid version
ret = vtDcgmActionValidate(handle, drd.groupId, drd.validate, versionTest)
with test_utils.assert_raises(dcgmExceptionClass(dcgm_structs.DCGM_ST_VER_MISMATCH)):
versionTest = 50 #random number version
ret = vtDcgmActionValidate(handle, drd.groupId, drd.validate, versionTest)
with test_utils.assert_raises(dcgmExceptionClass(dcgm_structs.DCGM_ST_VER_MISMATCH)):
versionTest = 0 #invalid version
ret = vtDcgmRunDiagnostic(handle, drd.groupId, diagLevel, versionTest)
with test_utils.assert_raises(dcgmExceptionClass(dcgm_structs.DCGM_ST_VER_MISMATCH)):
versionTest = 50 #random number version
ret = vtDcgmRunDiagnostic(handle, drd.groupId, diagLevel, versionTest)
def vtDcgmGetPidInfo(dcgm_handle, groupId, pid, versionTest):
fn = dcgmFP("dcgmGetPidInfo")
pidInfo = dcgm_structs.c_dcgmPidInfo_v2()
pidInfo.version = dcgm_structs.make_dcgm_version(dcgm_structs.c_dcgmPidInfo_v2, 2)
logger.debug("Structure version: %d" % pidInfo.version)
pidInfo.version = versionTest
pidInfo.pid = pid
ret = fn(dcgm_handle, groupId, byref(pidInfo))
dcgm_structs._dcgmCheckReturn(ret)
return pidInfo
def StartAppOnGpus(handle):
dcgmHandle = pydcgm.DcgmHandle(handle=handle)
dcgmSystem = dcgmHandle.GetSystem()
allGpuIds = dcgmSystem.discovery.GetAllGpuIds()
gpuInfoList = []
addedPids = []
for gpuId in allGpuIds:
gpuAttrib = dcgmSystem.discovery.GetGpuAttributes(gpuId)
gpuInfoList.append((gpuId, gpuAttrib.identifiers.pciBusId))
for info in gpuInfoList:
gpuId = info[0]
busId = info[1]
appTimeout = int(1000) #miliseconds
#Start a cuda app so we have something to accounted
appParams = ["--ctxCreate", busId,
"--busyGpu", busId, str(appTimeout),
"--ctxDestroy", busId]
app = apps.CudaCtxCreateAdvancedApp(appParams, env=test_utils.get_cuda_visible_devices_env(handle, gpuId))
app.start(appTimeout*2)
pid = app.getpid()
addedPids.append(pid)
app.wait()
app.terminate()
app.validate()
logger.info("Started PID %d." % pid)
return addedPids
@test_utils.run_with_embedded_host_engine()
@test_utils.run_only_with_live_gpus()
def test_dcgm_get_pid_info_validate(handle, gpuIds):
"""
Validates structure version
"""
pidList = StartAppOnGpus(handle)
groupId = dcgm_agent.dcgmGroupCreate(handle, dcgm_structs.DCGM_GROUP_DEFAULT, "test1")
for pid in pidList:
with test_utils.assert_raises(dcgmExceptionClass(dcgm_structs.DCGM_ST_VER_MISMATCH)):
versionTest = 0 #invalid version
ret = vtDcgmGetPidInfo(handle, groupId, pid, versionTest)
with test_utils.assert_raises(dcgmExceptionClass(dcgm_structs.DCGM_ST_VER_MISMATCH)):
versionTest = 50 #random number version
ret = vtDcgmGetPidInfo(handle, groupId, pid, versionTest)
def vtDcgmJobGetStats(dcgm_handle, jobid, versionTest):
fn = dcgmFP("dcgmJobGetStats")
jobInfo = dcgm_structs.c_dcgmJobInfo_v3()
jobInfo.version = dcgm_structs.make_dcgm_version(jobInfo, 3)
logger.debug("Structure version: %d" % jobInfo.version)
jobInfo.version = versionTest
ret = fn(dcgm_handle, jobid, byref(jobInfo))
dcgm_structs._dcgmCheckReturn(ret)
return jobInfo
@test_utils.run_with_embedded_host_engine()
def test_dcgm_job_get_stats_validate(handle):
"""
Validates structure version
"""
jobid = "1"
with test_utils.assert_raises(dcgmExceptionClass(dcgm_structs.DCGM_ST_VER_MISMATCH)):
versionTest = 0 #invalid version
ret = vtDcgmJobGetStats(handle, jobid, versionTest)
with test_utils.assert_raises(dcgmExceptionClass(dcgm_structs.DCGM_ST_VER_MISMATCH)):
versionTest = 50 #random number version
ret = vtDcgmJobGetStats(handle, jobid, versionTest)
def vtDcgmIntrospectGetHostengineMemoryUsage(dcgm_handle, versionTest, waitIfNoData=True):
fn = dcgmFP("dcgmIntrospectGetHostengineMemoryUsage")
memInfo = dcgm_structs.c_dcgmIntrospectMemory_v1()
memInfo.version = dcgm_structs.make_dcgm_version(memInfo, 1)
logger.debug("Structure version: %d" % memInfo.version)
memInfo.version = versionTest
ret = fn(dcgm_handle, byref(memInfo), waitIfNoData)
dcgm_structs._dcgmCheckReturn(ret)
return memInfo
@test_utils.run_with_embedded_host_engine()
def test_dcgm_introspect_get_hostengine_memory_usage_validate(handle):
"""
Validates structure version
"""
waitIfNoData = True
with test_utils.assert_raises(dcgmExceptionClass(dcgm_structs.DCGM_ST_VER_MISMATCH)):
versionTest = 0 #invalid version
ret = vtDcgmIntrospectGetHostengineMemoryUsage(handle, versionTest, waitIfNoData)
with test_utils.assert_raises(dcgmExceptionClass(dcgm_structs.DCGM_ST_VER_MISMATCH)):
versionTest = 50 #random number version
ret = vtDcgmIntrospectGetHostengineMemoryUsage(handle, versionTest, waitIfNoData)
def vtDcgmIntrospectGetHostengineCpuUtilization(dcgm_handle, versionTest , waitIfNoData=True):
fn = dcgmFP("dcgmIntrospectGetHostengineCpuUtilization")
cpuUtil = dcgm_structs.c_dcgmIntrospectCpuUtil_v1()
cpuUtil.version = dcgm_structs.make_dcgm_version(cpuUtil, 1)
logger.debug("Structure version: %d" % cpuUtil.version)
cpuUtil.version = versionTest
ret = fn(dcgm_handle, byref(cpuUtil), waitIfNoData)
dcgm_structs._dcgmCheckReturn(ret)
return cpuUtil
@test_utils.run_with_embedded_host_engine()
def test_dcgm_introspect_get_hostengine_cpu_utilization_validate(handle):
"""
Validates structure version
"""
waitIfNoData = True
with test_utils.assert_raises(dcgmExceptionClass(dcgm_structs.DCGM_ST_VER_MISMATCH)):
versionTest = 0 #invalid version
ret = vtDcgmIntrospectGetHostengineCpuUtilization(handle, versionTest, waitIfNoData)
with test_utils.assert_raises(dcgmExceptionClass(dcgm_structs.DCGM_ST_VER_MISMATCH)):
versionTest = 50 #random number version
ret = vtDcgmIntrospectGetHostengineCpuUtilization(handle, versionTest, waitIfNoData)
########### dcgm_agent_internal.py ###########
def vtDcgmGetVgpuDeviceAttributes(dcgm_handle, gpuId, versionTest):
fn = dcgm_structs._dcgmGetFunctionPointer("dcgmGetVgpuDeviceAttributes")
device_values = dcgm_structs.c_dcgmVgpuDeviceAttributes_v6()
device_values.version = dcgm_structs.make_dcgm_version(device_values, 1)
logger.debug("Structure version: %d" % device_values.version)
device_values.version = versionTest
ret = fn(dcgm_handle, c_int(gpuId), byref(device_values))
dcgm_structs._dcgmCheckReturn(ret)
return device_values
@test_utils.run_with_standalone_host_engine(60)
@test_utils.run_with_initialized_client()
@test_utils.run_only_with_live_gpus()
def test_dcgm_get_vgpu_device_attributes_validate(handle, gpuIds):
"""
Verifies that vGPU attributes are properly queried
"""
with test_utils.assert_raises(dcgmExceptionClass(dcgm_structs.DCGM_ST_VER_MISMATCH)):
versionTest = 0 #invalid version
ret = vtDcgmGetVgpuDeviceAttributes(handle, gpuIds[0], versionTest)
with test_utils.assert_raises(dcgmExceptionClass(dcgm_structs.DCGM_ST_VER_MISMATCH)):
versionTest = 50 #random number version
ret = vtDcgmGetVgpuDeviceAttributes(handle, gpuIds[0], versionTest)
def vtDcgmGetVgpuInstanceAttributes(dcgm_handle, vgpuId, versionTest):
fn = dcgm_structs._dcgmGetFunctionPointer("dcgmGetVgpuInstanceAttributes")
device_values = dcgm_structs.c_dcgmVgpuInstanceAttributes_v1()
device_values.version = dcgm_structs.make_dcgm_version(device_values, 1)
logger.debug("Structure version: %d" % device_values.version)
device_values.version = versionTest
ret = fn(dcgm_handle, c_int(vgpuId), byref(device_values))
dcgm_structs._dcgmCheckReturn(ret)
return device_values
@test_utils.run_with_standalone_host_engine(60)
@test_utils.run_with_initialized_client()
@test_utils.run_only_with_live_gpus()
def test_dcgm_get_vgpu_instance_attributes_validate(handle, gpuIds):
"""
Verifies that vGPU attributes are properly queried
"""
with test_utils.assert_raises(dcgmExceptionClass(dcgm_structs.DCGM_ST_VER_MISMATCH)):
versionTest = 0 #invalid version
ret = vtDcgmGetVgpuInstanceAttributes(handle, gpuIds[0], versionTest)
with test_utils.assert_raises(dcgmExceptionClass(dcgm_structs.DCGM_ST_VER_MISMATCH)):
versionTest = 50 #random number version
ret = vtDcgmGetVgpuInstanceAttributes(handle, gpuIds[0], versionTest)
def vtDcgmVgpuConfigSet(dcgm_handle, group_id, configToSet, status_handle, versionTest):
fn = dcgm_structs._dcgmGetFunctionPointer("dcgmVgpuConfigSet")
configToSet.version = versionTest
ret = fn(dcgm_handle, group_id, byref(configToSet), status_handle)
dcgm_structs._dcgmCheckReturn(ret)
return ret
@test_utils.run_with_embedded_host_engine()
def test_dcgm_vgpu_config_set_validate(handle):
"""
Validates structure version
"""
groupId = dcgm_agent.dcgmGroupCreate(handle, dcgm_structs.DCGM_GROUP_DEFAULT, "test1")
status_handle = dcgm_agent.dcgmStatusCreate()
config_values = dcgm_structs.c_dcgmDeviceConfig_v1()
with test_utils.assert_raises(dcgmExceptionClass(dcgm_structs.DCGM_ST_VER_MISMATCH)):
versionTest = 0 #invalid version
ret = vtDcgmVgpuConfigSet(handle, groupId, config_values, status_handle, versionTest)
with test_utils.assert_raises(dcgmExceptionClass(dcgm_structs.DCGM_ST_VER_MISMATCH)):
versionTest = 50 #random invalid version
ret = vtDcgmVgpuConfigSet(handle, groupId, config_values, status_handle, versionTest)
def vtDcgmVgpuConfigGet(dcgm_handle, group_id, reqCfgType, count, status_handle, versionTest):
fn = dcgm_structs._dcgmGetFunctionPointer("dcgmVgpuConfigSet")
vgpu_config_values_array = count * dcgm_structs.c_dcgmDeviceVgpuConfig_v1
c_config_values = vgpu_config_values_array()
vgpuConfig = dcgm_structs.c_dcgmDeviceVgpuConfig_v1()
vgpuConfig.version = dcgm_structs.make_dcgm_version(vgpuConfig, 1)
logger.debug("Structure version: %d" % vgpuConfig.version)
for index in range(0, count):
c_config_values[index].version = versionTest
ret = fn(dcgm_handle, group_id, c_config_values, status_handle)
dcgm_structs._dcgmCheckReturn(ret)
return list(c_config_values[0:count])
@test_utils.run_with_embedded_host_engine()
def test_dcgm_vgpu_config_get_validate(handle):
"""
Validates structure version
"""
handleObj = pydcgm.DcgmHandle(handle=handle)
systemObj = handleObj.GetSystem()
gpuIdList = systemObj.discovery.GetAllGpuIds()
assert len(gpuIdList) >= 0, "Not able to find devices on the node for embedded case"
groupId = dcgm_agent.dcgmGroupCreate(handle, dcgm_structs.DCGM_GROUP_DEFAULT, "test1")
groupInfo = dcgm_agent.dcgmGroupGetInfo(handle, groupId)
status_handle = dcgm_agent.dcgmStatusCreate()
with test_utils.assert_raises(dcgmExceptionClass(dcgm_structs.DCGM_ST_VER_MISMATCH)):
versionTest = 0 #invalid version
ret = vtDcgmVgpuConfigGet(handle, groupId, dcgm_structs.DCGM_CONFIG_CURRENT_STATE, groupInfo.count, status_handle, versionTest)
with test_utils.assert_raises(dcgmExceptionClass(dcgm_structs.DCGM_ST_VER_MISMATCH)):
versionTest = 50 #random number version
ret = vtDcgmVgpuConfigGet(handle, groupId, dcgm_structs.DCGM_CONFIG_CURRENT_STATE, groupInfo.count, status_handle, versionTest)
| DCGM-master | testing/python3/tests/test_dcgm_versioned_structs.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
Tests written for test_utils.py
'''
import test_utils
import dcgm_agent_internal
@test_utils.run_with_embedded_host_engine()
def test_utils_run_with_embedded_host_engine(handle):
'''
Sanity test for running with an embedded host engine
'''
assert(handle.value == dcgm_agent_internal.DCGM_EMBEDDED_HANDLE.value), \
"Expected embedded handle %s but got %s" % \
(hex(dcgm_agent_internal.DCGM_EMBEDDED_HANDLE.value), hex(handle.value))
@test_utils.run_with_standalone_host_engine()
@test_utils.run_with_initialized_client()
def test_utils_run_with_standalone_host_engine(handle):
'''
Sanity test for running with a standalone host engine
'''
assert(handle.value != dcgm_agent_internal.DCGM_EMBEDDED_HANDLE.value), \
"Expected a handle different from the embedded one %s" % \
hex(dcgm_agent_internal.DCGM_EMBEDDED_HANDLE.value)
| DCGM-master | testing/python3/tests/test_validate_utils.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pydcgm
import dcgm_structs
import logger
import test_utils
import dcgm_fields
import dcgm_internal_helpers
import DcgmDiag
import option_parser
import os
import subprocess
import signal
import threading
import time
from dcgm_field_injection_helpers import inject_value
from shutil import which as find_executable
injection_offset = 3
################# General helpers #################
def check_diag_result_fail(response, gpuIndex, testIndex):
return response.perGpuResponses[gpuIndex].results[testIndex].result == dcgm_structs.DCGM_DIAG_RESULT_FAIL
def check_diag_result_pass(response, gpuIndex, testIndex):
return response.perGpuResponses[gpuIndex].results[testIndex].result == dcgm_structs.DCGM_DIAG_RESULT_PASS
################# General tests #################
##### Fail early behavior tests
def verify_early_fail_checks_for_test(handle, gpuId, test_name, testIndex, extraTestInfo):
"""
Helper method for verifying the fail early checks for the specified test.
"""
duration = 2 if testIndex != dcgm_structs.DCGM_TARGETED_POWER_INDEX else 30 # Prevent false failures due to min
# duration requirements for Targeted Power
paramsStr = "%s.test_duration=%s" % (test_name, duration)
###
# First verify that the given test passes for the gpu.
# If it doesn't pass, skip test and add note to check GPU health
logger.info("Checking whether %s test passes on GPU %s" % (test_name, gpuId))
dd = DcgmDiag.DcgmDiag(gpuIds=[gpuId], testNamesStr=test_name, paramsStr=paramsStr)
test_name_no_spaces = test_name.replace(" ", "_")
logname = '/tmp/nv_' + test_name_no_spaces + '%s.log'
dd.SetDebugLogFile(logname % 1)
dd.SetDebugLevel(5)
response = test_utils.diag_execute_wrapper(dd, handle)
if not check_diag_result_pass(response, gpuId, testIndex):
logger.info("Not testing %s because GPU %s does not pass. "
"Please verify whether the GPU is healthy." % (test_name, gpuId))
###
# Verify fail early behavior by inserting an error.
# Setup test parameters
# We will be exiting early so the following duration is just how long we allow the test
# to run before we kill it due to a suspected test failure.
# Note that this has been increased from 20 -> 60 because some platforms are egregiously
# slow for even small context create + smaller cuda malloc.
# If this test fails, it will take the full duration.
duration = 60
paramsStr = "%s.test_duration=%s" % (test_name, duration)
response = None
test_names = test_name
if extraTestInfo:
test_names += "," + extraTestInfo[0]
dd = DcgmDiag.DcgmDiag(gpuIds=[gpuId], testNamesStr=test_names, paramsStr=paramsStr)
dd.SetFailEarly(checkInterval=2) # enable fail early checks
dd.SetDebugLogFile(logname % 3)
inject_value(handle, gpuId, dcgm_fields.DCGM_FI_DEV_GPU_TEMP, 150, 1, True, repeatCount=10)
# launch the diagnostic
start = time.time()
response = test_utils.diag_execute_wrapper(dd, handle)
end = time.time()
total_time = end - start
assert total_time < duration, \
"Expected %s test to exit early. Test took %ss to complete.\nGot result: %s (\ninfo: %s,\n warning: %s)" \
% (test_name, total_time,
response.perGpuResponses[gpuId].results[testIndex].result,
response.perGpuResponses[gpuId].results[testIndex].info,
response.perGpuResponses[gpuId].results[testIndex].error.msg)
# Verify the test failed
assert check_diag_result_fail(response, gpuId, testIndex), \
"Expected %s test to fail due to injected dbes.\nGot result: %s (\ninfo: %s,\n warning: %s)" % \
(test_name, response.perGpuResponses[gpuId].results[testIndex].result,
response.perGpuResponses[gpuId].results[testIndex].info,
response.perGpuResponses[gpuId].results[testIndex].error.msg)
if extraTestInfo:
extraTestResult = response.perGpuResponses[gpuId].results[extraTestInfo[1]].result
assert extraTestResult == dcgm_structs.DCGM_DIAG_RESULT_NOT_RUN, \
"Expected the extra test to be skipped since the first test failed.\nGot results: %s" % \
(extraTestResult)
@test_utils.run_with_standalone_host_engine(120, heEnv=test_utils.smallFbModeEnv)
@test_utils.run_with_initialized_client()
@test_utils.run_only_with_live_gpus()
@test_utils.run_only_if_mig_is_disabled()
def test_nvvs_plugin_fail_early_diagnostic_standalone(handle, gpuIds):
verify_early_fail_checks_for_test(handle, gpuIds[0], "diagnostic", dcgm_structs.DCGM_DIAGNOSTIC_INDEX, None)
@test_utils.run_with_standalone_host_engine(120, heEnv=test_utils.smallFbModeEnv)
@test_utils.run_with_initialized_client()
@test_utils.run_only_with_live_gpus()
@test_utils.run_only_if_mig_is_disabled()
def test_nvvs_plugin_fail_early_targeted_stress_standalone(handle, gpuIds):
verify_early_fail_checks_for_test(handle, gpuIds[0], "targeted stress", dcgm_structs.DCGM_TARGETED_STRESS_INDEX, None)
@test_utils.run_with_standalone_host_engine(120, heEnv=test_utils.smallFbModeEnv)
@test_utils.run_with_initialized_client()
@test_utils.run_only_with_live_gpus()
@test_utils.run_only_if_mig_is_disabled()
def test_nvvs_plugin_fail_early_targeted_power_standalone(handle, gpuIds):
verify_early_fail_checks_for_test(handle, gpuIds[0], "targeted power", dcgm_structs.DCGM_TARGETED_POWER_INDEX, None)
@test_utils.run_with_standalone_host_engine(120, heEnv=test_utils.smallFbModeEnv)
@test_utils.run_with_initialized_client()
@test_utils.run_only_with_live_gpus()
@test_utils.run_only_if_mig_is_disabled()
def test_nvvs_plugin_fail_early_two_tests_standalone(handle, gpuIds):
extraTestInfo = [ "pcie", dcgm_structs.DCGM_PCI_INDEX ]
verify_early_fail_checks_for_test(handle, gpuIds[0], "diagnostic", dcgm_structs.DCGM_DIAGNOSTIC_INDEX, extraTestInfo)
################# Software plugin tests #################
def check_software_result_pass(response, index):
assert 0 <= index < dcgm_structs.LEVEL_ONE_MAX_RESULTS
return response.levelOneResults[index].result == dcgm_structs.DCGM_DIAG_RESULT_PASS
def check_software_result_pass_all(response):
for result in response.levelOneResults:
# ignore tests that are not run
if result.result != dcgm_structs.DCGM_DIAG_RESULT_PASS \
and result.result != dcgm_structs.DCGM_DIAG_RESULT_NOT_RUN:
return False
return True
def check_software_result_fail(response, index):
assert 0 <= index < dcgm_structs.LEVEL_ONE_MAX_RESULTS
return response.levelOneResults[index].result == dcgm_structs.DCGM_DIAG_RESULT_FAIL
def check_software_result_fail_all(response):
for result in response.levelOneResults:
# ignore tests that are not run
if result.result != dcgm_structs.DCGM_DIAG_RESULT_FAIL \
and result.result != dcgm_structs.DCGM_DIAG_RESULT_NOT_RUN:
return False
return True
def helper_check_software_page_retirements_fail_on_pending_retirements(handle, gpuId):
"""
Ensure that the software test for page retirements fails when there are pending page retirements.
"""
# First verify that the software test passes for the gpu.
# If it doesn't pass, skip test and add note to check GPU health
dd = DcgmDiag.DcgmDiag(gpuIds=[gpuId])
dd.UseFakeGpus()
response = test_utils.diag_execute_wrapper(dd, handle)
if not check_software_result_pass(response, dcgm_structs.DCGM_SWTEST_PAGE_RETIREMENT):
test_utils.skip_test("Skipping because GPU %s does not pass software page retirement test. "
"Please verify whether the GPU is healthy." % gpuId)
# Inject some pending page retirements
inject_value(handle, gpuId, dcgm_fields.DCGM_FI_DEV_RETIRED_PENDING, 1, injection_offset, True)
response = test_utils.diag_execute_wrapper(dd, handle)
# Ensure software test failed due to pending page retirments
assert check_software_result_fail(response, dcgm_structs.DCGM_SWTEST_PAGE_RETIREMENT), \
"Expected software test to fail due to pending page retirements in the GPU"
# Reset injected value
inject_value(handle, gpuId, dcgm_fields.DCGM_FI_DEV_RETIRED_PENDING, 0, injection_offset, True)
# Ensure diag passes now
response = test_utils.diag_execute_wrapper(dd, handle)
assert check_software_result_pass(response, dcgm_structs.DCGM_SWTEST_PAGE_RETIREMENT), \
"Expected software test to pass"
@test_utils.run_with_standalone_host_engine(120)
@test_utils.run_with_initialized_client()
@test_utils.run_with_injection_gpus(2)
def test_nvvs_plugin_software_pending_page_retirements_standalone(handle, gpuIds):
# Injection tests can only work with the standalone host engine
helper_check_software_page_retirements_fail_on_pending_retirements(handle, gpuIds[0])
def helper_check_software_page_retirements_fail_total_retirements(handle, gpuId):
"""
Ensure that the software test for page retirements fails when there are mroe than 60 page retirements.
"""
# First verify that the software test passes for the gpu. If it doesn't pass, skip test and add note to check GPU health
dd = DcgmDiag.DcgmDiag(gpuIds=[gpuId])
dd.UseFakeGpus()
response = test_utils.diag_execute_wrapper(dd, handle)
if not check_software_result_pass(response, dcgm_structs.DCGM_SWTEST_PAGE_RETIREMENT):
test_utils.skip_test("Skipping because GPU %s does not pass software page retirement test. "
"Please verify whether the GPU is healthy." % gpuId)
# Inject enough page retirements to cause failure
inject_value(handle, gpuId, dcgm_fields.DCGM_FI_DEV_RETIRED_DBE, 33, injection_offset, True)
inject_value(handle, gpuId, dcgm_fields.DCGM_FI_DEV_RETIRED_SBE, 33, injection_offset, True)
response = test_utils.diag_execute_wrapper(dd, handle)
assert check_software_result_fail(response, dcgm_structs.DCGM_SWTEST_PAGE_RETIREMENT), \
"Expected software test to fail due to 60 total page retirements in the GPU"
# Ensure 59 pages pass injected value
inject_value(handle, gpuId, dcgm_fields.DCGM_FI_DEV_RETIRED_SBE, 25, injection_offset, True)
# Ensure diag passes now
response = test_utils.diag_execute_wrapper(dd, handle)
assert check_software_result_pass(response, dcgm_structs.DCGM_SWTEST_PAGE_RETIREMENT), \
"Expected software test to pass since there are less than 60 total retired pages"
# Reset retired pages count and verify pass
inject_value(handle, gpuId, dcgm_fields.DCGM_FI_DEV_RETIRED_DBE, 0, injection_offset, True)
inject_value(handle, gpuId, dcgm_fields.DCGM_FI_DEV_RETIRED_SBE, 0, injection_offset, True)
# Ensure diag still passes
response = test_utils.diag_execute_wrapper(dd, handle)
assert check_software_result_pass(response, dcgm_structs.DCGM_SWTEST_PAGE_RETIREMENT), \
"Expected software test to pass since there are no retired pages"
@test_utils.run_with_standalone_host_engine(120)
@test_utils.run_with_initialized_client()
@test_utils.run_with_injection_gpus(2)
def test_nvvs_plugin_software_total_page_retirements_standalone(handle, gpuIds):
# Injection tests can only work with the standalone host engine
helper_check_software_page_retirements_fail_total_retirements(handle, gpuIds[0])
@test_utils.run_with_embedded_host_engine()
@test_utils.run_only_with_live_gpus()
@test_utils.run_only_if_mig_is_disabled()
@test_utils.for_all_same_sku_gpus()
def test_nvvs_plugin_software_inforom_embedded(handle, gpuIds):
dd = DcgmDiag.DcgmDiag(gpuIds=gpuIds, testNamesStr="short")
response = test_utils.diag_execute_wrapper(dd, handle)
for gpuId in gpuIds:
result = response.levelOneResults[dcgm_structs.DCGM_SWTEST_INFOROM].result
assert(result == dcgm_structs.DCGM_DIAG_RESULT_PASS or result == dcgm_structs.DCGM_DIAG_RESULT_SKIP)
def test_nvvs_plugins_required_symbols():
nmPath = find_executable('nm')
if nmPath is None:
test_utils.skip_test("'nm' is not installed on the system.")
pluginPath = os.path.join(os.environ['NVVS_BIN_PATH'], 'plugins')
numErrors = 0
requiredSymbols = [
'GetPluginInterfaceVersion',
'GetPluginInfo',
'InitializePlugin',
'RunTest',
'RetrieveCustomStats',
'RetrieveResults'
]
skipLibraries = [
'libpluginCommon.so',
'libcurand.so'
]
for cudaDirName in os.listdir(pluginPath):
cudaPluginPath = os.path.join(pluginPath, cudaDirName)
for soName in os.listdir(cudaPluginPath):
soPath = os.path.join(cudaPluginPath, soName)
#Skip symlinks
if os.path.islink(soPath):
continue
#Skip non-libraries
if not ".so" in soPath:
continue
#Skip some helper libraries that aren't plugin entry points
skip = False
for sl in skipLibraries:
if sl in soPath:
skip = True
if skip:
continue
args = [nmPath, soPath]
output = str(subprocess.check_output(args, stderr=subprocess.STDOUT))
if ': no symbols' in output:
test_utils.skip_test("The installed nm is unable to see symbols within our plugins.")
for rs in requiredSymbols:
if not rs in output:
logger.error("library %s is missing symbol %s" % (soPath, rs))
numErrors += 1
assert numErrors == 0, "Some plugins were missing symbols. See errors above."
| DCGM-master | testing/python3/tests/test_nvvs_plugins.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from _test_helpers import maybemock
from dcgm_structs import dcgmExceptionClass
from json import loads
import pydcgm
import dcgm_structs
import dcgm_structs_internal
import dcgm_agent_internal
import dcgm_fields
import test_utils
import time
import os
import sys
from common.Struct import Struct
from DcgmJsonReader import DcgmJsonReader
def create_fv(key, values):
fv_values = [Struct(fieldId=key, value=val) for val in values] # Struct(values=values)
return {key: Struct(values=fv_values)}
def test_convert_field_id_to_tag():
fieldTagMap = {
1: Struct(tag='field1'),
2: Struct(tag='field2'),
3: Struct(tag='field3'),
}
dr = DcgmJsonReader()
dr.m_fieldIdToInfo = fieldTagMap
for key in list(fieldTagMap.keys()):
assert (dr.ConvertFieldIdToTag(key) == fieldTagMap[key].tag) # pylint: disable=no-member
def test_prepare_json():
obj = {
'star wars': 'overrated'
}
gpuUuidMap = {
0: 'uuid0',
1: 'uuid1'
}
dr = DcgmJsonReader()
dr.m_gpuIdToUUId = gpuUuidMap
for gpuId in gpuUuidMap:
outJson = dr.PrepareJson(gpuId, obj)
outObj = loads(outJson)
assert(outObj['star wars'] == 'overrated')
assert(outObj['gpu_uuid'] == gpuUuidMap[gpuId])
def test_custom_data_handler():
namespace = Struct(called=False, result=None)
expected = {
'fieldName': 'value',
'gpu_uuid': 'this'
}
# This function tells us that the json callback is called by CustomDataHandler
# with the correct data
def setCalled(json):
namespace.called = True
namespace.result = loads(json)
gpuUuidMap = {0: 'notthis', 1: 'this'}
fvs = {1: create_fv('key', ['not last value', 'value'])}
dr = DcgmJsonReader()
dr.m_gpuIdToUUId = gpuUuidMap
dr.m_fieldIdToInfo = {'key': Struct(tag='fieldName')}
dr.CustomJsonHandler = setCalled
dr.CustomDataHandler(fvs)
assert namespace.called
assert expected == namespace.result
@maybemock.patch.multiple('logging', info=maybemock.DEFAULT, warning=maybemock.DEFAULT)
def test_json_reader_custom_json_handler(info, warning):
dr = DcgmJsonReader()
dr.CustomJsonHandler(1)
info.assert_called_with(1)
warning.assert_called_with('CustomJsonHandler has not been overriden')
| DCGM-master | testing/python3/tests/test_dcgm_json_reader.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pydcgm
import dcgm_field_helpers
import dcgm_structs
import dcgm_structs_internal
import dcgm_agent
import dcgm_agent_internal
import logger
import test_utils
import dcgm_fields
import dcgm_fields_internal
import dcgmvalue
import time
import ctypes
import apps
from dcgm_structs import dcgmExceptionClass
import utils
import os
g_profilingFieldIds = [
dcgm_fields.DCGM_FI_PROF_GR_ENGINE_ACTIVE,
dcgm_fields.DCGM_FI_PROF_SM_ACTIVE,
dcgm_fields.DCGM_FI_PROF_SM_OCCUPANCY,
dcgm_fields.DCGM_FI_PROF_PIPE_TENSOR_ACTIVE,
dcgm_fields.DCGM_FI_PROF_DRAM_ACTIVE,
dcgm_fields.DCGM_FI_PROF_PIPE_FP64_ACTIVE,
dcgm_fields.DCGM_FI_PROF_PIPE_FP32_ACTIVE,
dcgm_fields.DCGM_FI_PROF_PIPE_FP16_ACTIVE,
dcgm_fields.DCGM_FI_PROF_PCIE_TX_BYTES,
dcgm_fields.DCGM_FI_PROF_PCIE_RX_BYTES,
dcgm_fields.DCGM_FI_PROF_NVLINK_TX_BYTES,
dcgm_fields.DCGM_FI_PROF_NVLINK_RX_BYTES,
dcgm_fields.DCGM_FI_PROF_PIPE_TENSOR_IMMA_ACTIVE,
dcgm_fields.DCGM_FI_PROF_PIPE_TENSOR_HMMA_ACTIVE,
dcgm_fields.DCGM_FI_PROF_PIPE_TENSOR_DFMA_ACTIVE,
dcgm_fields.DCGM_FI_PROF_PIPE_INT_ACTIVE,
dcgm_fields.DCGM_FI_PROF_NVOFA0_ACTIVE,
]
for fieldId in range(dcgm_fields.DCGM_FI_PROF_NVDEC0_ACTIVE, dcgm_fields.DCGM_FI_PROF_NVDEC7_ACTIVE + 1):
g_profilingFieldIds.append(fieldId)
for fieldId in range(dcgm_fields.DCGM_FI_PROF_NVJPG0_ACTIVE, dcgm_fields.DCGM_FI_PROF_NVJPG7_ACTIVE + 1):
g_profilingFieldIds.append(fieldId)
for fieldId in range(dcgm_fields.DCGM_FI_PROF_NVLINK_L0_TX_BYTES, dcgm_fields.DCGM_FI_PROF_NVLINK_L17_RX_BYTES + 1):
g_profilingFieldIds.append(fieldId)
def get_usec_since_1970():
sec = time.time()
return int(sec * 1000000.0)
@test_utils.run_with_embedded_host_engine()
@test_utils.run_with_injection_gpus(1)
def test_dcgm_field_values_since_agent(handle, gpuIds):
handleObj = pydcgm.DcgmHandle(handle=handle)
systemObj = handleObj.GetSystem()
groupObj = systemObj.GetEmptyGroup("test1")
## Add first GPU to the group
gpuId = gpuIds[0]
groupObj.AddGpu(gpuId)
gpuIds = groupObj.GetGpuIds() #Only reference GPUs we are testing against
expectedValueCount = 0
#Make a base value that is good for starters
fvGood = dcgm_structs_internal.c_dcgmInjectFieldValue_v1()
fvGood.version = dcgm_structs_internal.dcgmInjectFieldValue_version1
fvGood.fieldId = dcgm_fields.DCGM_FI_DEV_POWER_USAGE
fvGood.status = 0
fvGood.fieldType = ord(dcgm_fields.DCGM_FT_DOUBLE)
fvGood.ts = get_usec_since_1970()
fvGood.value.dbl = 100.0
fieldGroupObj = pydcgm.DcgmFieldGroup(handleObj, "my_field_group", [fvGood.fieldId, ])
#This will throw an exception if it fails
dcgm_agent_internal.dcgmInjectFieldValue(handle, gpuId, fvGood)
expectedValueCount += 1
operationMode = dcgm_structs.DCGM_OPERATION_MODE_AUTO #Todo: Read from handleObj
updateFreq = 1000000
maxKeepAge = 86400.0
maxKeepSamples = 0
fieldWatcher = dcgm_field_helpers.DcgmFieldGroupWatcher(handle, groupObj.GetId(), fieldGroupObj,
operationMode, updateFreq, maxKeepAge, maxKeepSamples, 0)
# Using injected GPUs, so don't increment expectedValueCount here anymore
assert len(fieldWatcher.values[gpuId][fvGood.fieldId]) == expectedValueCount, "%d != %d" % (len(fieldWatcher.values[gpuId][fvGood.fieldId]), expectedValueCount)
#Cheat a bit by getting nextSinceTimestamp from the fieldWatcher. We are trying to
#insert after the last time records were read
nextSinceTimestamp = fieldWatcher._nextSinceTimestamp
#insert more than one value at a time so we can track
for numValuesPerLoop in range(0, 10):
fvGood.ts = nextSinceTimestamp
for i in range(numValuesPerLoop):
if i > 0:
fvGood.ts += 1
fvGood.value.dbl += 0.1
dcgm_agent_internal.dcgmInjectFieldValue(handle, gpuId, fvGood)
expectedValueCount += 1
newValueCount = fieldWatcher.GetAllSinceLastCall()
nextSinceTimestamp = fieldWatcher._nextSinceTimestamp
assert nextSinceTimestamp != 0, "Got 0 nextSinceTimestamp"
#A field value is always returned. If no data exists, then a single field value is returned with status set to NO DATA
if numValuesPerLoop == 0:
assert newValueCount == 1, "newValueCount %d != 1" % newValueCount
else:
assert newValueCount == numValuesPerLoop, "newValueCount %d != numValuesPerLoop %d" % (newValueCount, numValuesPerLoop)
def helper_dcgm_values_since(handle, gpuIds):
handleObj = pydcgm.DcgmHandle(handle=handle)
systemObj = handleObj.GetSystem()
groupObj = systemObj.GetDefaultGroup()
gpuId = gpuIds[0]
fieldId = dcgm_fields.DCGM_FI_DEV_BRAND #Should be a field id in fieldCollectionId
fieldGroupObj = pydcgm.DcgmFieldGroup(handleObj, "my_field_group", [fieldId, ])
operationMode = dcgm_structs.DCGM_OPERATION_MODE_AUTO #Todo: Read from handleObj
updateFreq = 100000
maxKeepAge = 86400.0
maxKeepSamples = 0
#Watch and read initial values
fieldWatcher = dcgm_field_helpers.DcgmFieldGroupWatcher(handle, groupObj.GetId(), fieldGroupObj,
operationMode, updateFreq, maxKeepAge, maxKeepSamples, 0)
firstReadSize = len(fieldWatcher.values[gpuId][fieldId])
assert firstReadSize > 0, "Expected values after first read. Got 0"
sleepFor = (updateFreq / 1000000.0) * 2 #Sleep for 2x update freq to allow an update to occur
time.sleep(sleepFor)
numRead = fieldWatcher.GetAllSinceLastCall()
secondReadSize = len(fieldWatcher.values[gpuId][fieldId])
assert fieldWatcher._nextSinceTimestamp != 0, "Expected nonzero nextSinceTimestamp"
assert numRead > 0, "Expected callbacks to be called from dcgmEngineGetValuesSince"
assert secondReadSize > firstReadSize, "Expected more records. 2nd %d. 1st %d" % (secondReadSize, firstReadSize)
@test_utils.run_with_embedded_host_engine()
@test_utils.run_only_with_live_gpus()
def test_dcgm_values_since_agent(handle, gpuIds):
helper_dcgm_values_since(handle, gpuIds)
@test_utils.run_with_standalone_host_engine(20)
@test_utils.run_with_initialized_client()
@test_utils.run_only_with_live_gpus()
def test_dcgm_values_since_remote(handle, gpuIds):
helper_dcgm_values_since(handle, gpuIds)
def helper_dcgm_values_since_entities(handle, gpuIds):
handleObj = pydcgm.DcgmHandle(handle=handle)
systemObj = handleObj.GetSystem()
groupObj = systemObj.GetDefaultGroup()
entityId = gpuIds[0]
entityGroupId = dcgm_fields.DCGM_FE_GPU
fieldId = dcgm_fields.DCGM_FI_DEV_BRAND #Should be a field id in fieldCollectionId
fieldGroupObj = pydcgm.DcgmFieldGroup(handleObj, "my_field_group", [fieldId, ])
operationMode = dcgm_structs.DCGM_OPERATION_MODE_AUTO #Todo: Read from handleObj
updateFreq = 100000
maxKeepAge = 86400.0
maxKeepSamples = 0
#Watch and read initial values
fieldWatcher = dcgm_field_helpers.DcgmFieldGroupEntityWatcher(handle, groupObj.GetId(), fieldGroupObj,
operationMode, updateFreq, maxKeepAge, maxKeepSamples, 0)
firstReadSize = len(fieldWatcher.values[entityGroupId][entityId][fieldId])
assert firstReadSize > 0, "Expected values after first read. Got 0"
sleepFor = (updateFreq / 1000000.0) * 2 #Sleep for 2x update freq to allow an update to occur
time.sleep(sleepFor)
numRead = fieldWatcher.GetAllSinceLastCall()
secondReadSize = len(fieldWatcher.values[entityGroupId][entityId][fieldId])
assert fieldWatcher._nextSinceTimestamp != 0, "Expected nonzero nextSinceTimestamp"
assert numRead > 0, "Expected callbacks to be called from dcgmEngineGetValuesSince"
assert secondReadSize > firstReadSize, "Expected more records. 2nd %d. 1st %d" % (secondReadSize, firstReadSize)
@test_utils.run_with_embedded_host_engine()
@test_utils.run_only_with_live_gpus()
def test_dcgm_values_since_entities_agent(handle, gpuIds):
helper_dcgm_values_since_entities(handle, gpuIds)
@test_utils.run_with_standalone_host_engine(20)
@test_utils.run_with_initialized_client()
@test_utils.run_only_with_live_gpus()
def test_dcgm_values_since_entities_remote(handle, gpuIds):
helper_dcgm_values_since_entities(handle, gpuIds)
def helper_dcgm_entity_get_latest_values(handle, gpuIds):
handleObj = pydcgm.DcgmHandle(handle=handle)
systemObj = handleObj.GetSystem()
groupObj = systemObj.GetDefaultGroup()
gpuId = gpuIds[0]
fieldIds = [dcgm_fields.DCGM_FI_DEV_BRAND, dcgm_fields.DCGM_FI_DEV_GPU_TEMP]
fieldGroupObj = pydcgm.DcgmFieldGroup(handleObj, "my_field_group", fieldIds)
updateFreq = 100000
maxKeepAge = 86400.0
maxKeepSamples = 0
groupObj.samples.WatchFields(fieldGroupObj, updateFreq, maxKeepAge, maxKeepSamples)
#Make sure our new fields have updated
systemObj.UpdateAllFields(True)
fieldValues = dcgm_agent.dcgmEntityGetLatestValues(handle, dcgm_fields.DCGM_FE_GPU, gpuId, fieldIds)
for i, fieldValue in enumerate(fieldValues):
logger.info(str(fieldValue))
assert(fieldValue.version != 0), "idx %d Version was 0" % i
assert(fieldValue.fieldId == fieldIds[i]), "idx %d fieldValue.fieldId %d != fieldIds[i] %d" % (i, fieldValue.fieldId, fieldIds[i])
assert(fieldValue.status == dcgm_structs.DCGM_ST_OK), "idx %d status was %d" % (i, fieldValue.status)
assert(fieldValue.ts != 0), "idx %d timestamp was 0" % i
@test_utils.run_with_embedded_host_engine()
@test_utils.run_only_with_live_gpus()
def test_dcgm_entity_get_latest_values_embedded(handle, gpuIds):
helper_dcgm_entity_get_latest_values(handle, gpuIds)
'''
Verify that the returned field values match the requested ones for dcgmEntitiesGetLatestValues
'''
def helper_validate_entities_lastest_values_request(handle, gpuIds, fieldIds):
entityPairList = []
responses = {}
for gpuId in gpuIds:
entityPairList.append(dcgm_structs.c_dcgmGroupEntityPair_t(dcgm_fields.DCGM_FE_GPU, gpuId))
for fieldId in fieldIds:
dictKey = "%d:%d:%d" % (dcgm_fields.DCGM_FE_GPU, gpuId, fieldId)
responses[dictKey] = 0 #0 responses so far
flags = dcgm_structs.DCGM_FV_FLAG_LIVE_DATA
fieldValues = dcgm_agent.dcgmEntitiesGetLatestValues(handle, entityPairList, fieldIds, flags)
for i, fieldValue in enumerate(fieldValues):
logger.info(str(fieldValue))
assert(fieldValue.version == dcgm_structs.dcgmFieldValue_version2), "idx %d Version was x%X. not x%X" % (i, fieldValue.version, dcgm_structs.dcgmFieldValue_version2)
dictKey = "%d:%d:%d" % (fieldValue.entityGroupId, fieldValue.entityId, fieldValue.fieldId)
assert dictKey in responses and responses[dictKey] == 0, "Mismatch on dictKey %s. Responses: %s" % (dictKey, str(responses))
assert(fieldValue.status == dcgm_structs.DCGM_ST_OK), "idx %d status was %d" % (i, fieldValue.status)
assert(fieldValue.ts != 0), "idx %d timestamp was 0" % i
assert(fieldValue.unused == 0), "idx %d unused was %d" % (i, fieldValue.unused)
responses[dictKey] += 1
def helper_dcgm_entities_get_latest_values(handle, gpuIds):
#Request various combinations of DCGM field IDs. We're mixing field IDs that
#have NVML mappings and those that don't in order to try and cause failures
#First, just field IDs that don't have mappings
nonMappedFieldIds = [dcgm_fields.DCGM_FI_DEV_BRAND,
dcgm_fields.DCGM_FI_DEV_GPU_TEMP,
dcgm_fields.DCGM_FI_DEV_SM_CLOCK,
dcgm_fields.DCGM_FI_DEV_MEM_CLOCK,
dcgm_fields.DCGM_FI_DEV_VIDEO_CLOCK]
fieldIds = nonMappedFieldIds
helper_validate_entities_lastest_values_request(handle, gpuIds, fieldIds)
#Now just field IDs that have mappings
fieldIds = []
for fieldId in range(dcgm_fields.DCGM_FI_DEV_ECC_SBE_VOL_TOTAL, dcgm_fields.DCGM_FI_DEV_ECC_DBE_AGG_TEX):
fieldIds.append(fieldId)
helper_validate_entities_lastest_values_request(handle, gpuIds, fieldIds)
#Now a mix of both
fieldIds = []
for i in range(len(nonMappedFieldIds)):
fieldIds.append(nonMappedFieldIds[i])
fieldIds.append(i + dcgm_fields.DCGM_FI_DEV_ECC_SBE_VOL_TOTAL)
helper_validate_entities_lastest_values_request(handle, gpuIds, fieldIds)
@test_utils.run_with_embedded_host_engine()
@test_utils.run_only_with_live_gpus()
def test_dcgm_entities_get_latest_values_embedded(handle, gpuIds):
helper_dcgm_entities_get_latest_values(handle, gpuIds)
#Skip this test when running in injection-only mode
@test_utils.run_with_embedded_host_engine()
@test_utils.run_only_with_live_gpus()
@test_utils.run_only_if_mig_is_disabled() # This test relies on accounting data, which doesn't work with MIG mode
def test_dcgm_live_accounting_data(handle, gpuIds):
if test_utils.is_nvswitch_detected():
test_utils.skip_test("Skipping GPU Cuda tests on NvSwitch systems since they require the FM to be loaded")
groupId = dcgm_agent.dcgmGroupCreate(handle, dcgm_structs.DCGM_GROUP_DEFAULT, "mygroup")
gpuId = dcgm_agent.dcgmGroupGetInfo(handle, groupId).entityList[0].entityId
#Get the busid of the GPU
fieldId = dcgm_fields.DCGM_FI_DEV_PCI_BUSID
updateFreq = 1000000
maxKeepAge = 3600.0 #one hour
maxKeepEntries = 0 #no limit
dcgm_agent_internal.dcgmWatchFieldValue(handle, gpuId, fieldId, updateFreq, maxKeepAge, maxKeepEntries)
dcgm_agent.dcgmUpdateAllFields(handle, 1)
values = dcgm_agent_internal.dcgmGetLatestValuesForFields(handle, gpuId, [fieldId,])
busId = values[0].value.str
fieldId = dcgm_fields.DCGM_FI_DEV_ACCOUNTING_DATA
updateFreq = 100000
maxKeepAge = 3600.0 #one hour
maxKeepEntries = 0 #no limit
try:
dcgm_agent_internal.dcgmWatchFieldValue(handle, gpuId, fieldId, updateFreq, maxKeepAge, maxKeepEntries)
except dcgm_structs.dcgmExceptionClass(dcgm_structs.DCGM_ST_REQUIRES_ROOT) as e:
test_utils.skip_test("Skipping test for non-root due to accounting mode not being watched ahead of time.")
return #Nothing further to do for non-root without accounting data
#Start a cuda app so we have something to accounted
appTimeout = 1000
appParams = ["--ctxCreate", busId,
"--busyGpu", busId, str(appTimeout),
"--ctxDestroy", busId]
app = apps.CudaCtxCreateAdvancedApp(appParams, env=test_utils.get_cuda_visible_devices_env(handle, gpuId))
app.start(appTimeout*2)
appPid = app.getpid()
#Force an update
dcgm_agent.dcgmUpdateAllFields(handle, 1)
app.wait()
#Wait for RM to think the app has exited. Querying immediately after app.wait() did not work
time.sleep(1.0)
#Force an update after app exits
dcgm_agent.dcgmUpdateAllFields(handle, 1)
maxCount = 2000 #Current accounting buffer is 1920
startTs = 0
endTs = 0
values = dcgm_agent_internal.dcgmGetMultipleValuesForField(handle, gpuId, fieldId, maxCount, startTs, endTs, dcgm_structs.DCGM_ORDER_ASCENDING)
foundOurPid = False
#There could be multiple accounted processes in our data structure. Look for the PID we just ran
for value in values:
accStats = dcgm_structs.c_dcgmDevicePidAccountingStats_v1()
ctypes.memmove(ctypes.addressof(accStats), value.value.blob, accStats.FieldsSizeof())
#print "Pid %d: %s" % (accStats.pid, str(accStats))
if appPid == accStats.pid:
#Found it!
foundOurPid = True
break
assert foundOurPid, "Did not find app PID of %d in list of %d PIDs for gpuId %d" % (appPid, len(values), gpuId)
def helper_dcgm_values_pid_stats(handle, gpuIds):
handleObj = pydcgm.DcgmHandle(handle=handle)
systemObj = handleObj.GetSystem()
groupObj = systemObj.GetEmptyGroup("test1")
## Add first GPU to the group (only need single GPU)
gpuId = gpuIds[0]
groupObj.AddGpu(gpuId)
busId = _get_gpu_bus_id(gpuId, handle)
#watch the process info fields
if utils.is_root():
groupObj.stats.WatchPidFields(updateFreq=100000, maxKeepAge=3600, maxKeepSamples=0)
else:
try:
groupObj.stats.WatchPidFields(updateFreq=100000, maxKeepAge=3600, maxKeepSamples=0)
except dcgm_structs.dcgmExceptionClass(dcgm_structs.DCGM_ST_REQUIRES_ROOT) as e:
return
#Start a cuda app so we have something to accounted
#Start a 2nd app that will appear in the compute app list
appTimeout = 5000
app = _create_cuda_app_for_pid_stats(handle, busId, appTimeout, gpuId)
app2 = _create_cuda_app_for_pid_stats(handle, busId, appTimeout, gpuId)
app3 = _create_cuda_assert_app_for_pid_stats(handle, busId, appTimeout, gpuId)
app.wait()
app2.wait()
app3.wait()
app3.terminate()
app3.validate()
#Wait for RM to think the app has exited. Querying immediately after app.wait() did not work
time.sleep(1.0)
_assert_pid_utilization_rate(systemObj, groupObj, app.getpid())
_assert_other_compute_pid_seen(systemObj, groupObj, app.getpid(), app2.getpid())
_assert_pid_cuda_assert_occurence(systemObj, groupObj, app.getpid())
def _assert_pid_cuda_assert_occurence(dcgmSystem, dcgmGroup, appPid):
''' Force an update and verifies that a xid error occurred during the life of the process '''
dcgmSystem.UpdateAllFields(1)
pidInfo = dcgmGroup.stats.GetPidInfo(appPid)
assert pidInfo.summary.numXidCriticalErrors > 0, "At least one Xid error should have been caught, but (%d) were found" % pidInfo.summary.numXidCriticalErrors
for index in range(pidInfo.summary.numXidCriticalErrors):
assert pidInfo.summary.xidCriticalErrorsTs[index] != 0, "Unable to find a valid timestamp for the Xid Error %d" % index
logger.debug("Xid Timestamp: " + str(pidInfo.summary.xidCriticalErrorsTs[index]))
def _assert_pid_utilization_rate(dcgmSystem, dcgmGroup, appPid):
'''Force an update and then assert that utilization rates are recorded for a PID'''
dcgmSystem.UpdateAllFields(1)
pidInfo = dcgmGroup.stats.GetPidInfo(appPid)
assert pidInfo.gpus[0].processUtilization.pid == appPid, " Expected PID %d, got PID %d" % (appPid, pidInfo.gpus[0].processUtilization.pid)
utilizationRate = 0
if pidInfo.gpus[0].processUtilization.smUtil > 0 :
utilizationRate = 1
if pidInfo.gpus[0].processUtilization.memUtil > 0 :
utilizationRate = 1
#TODO: DCGM-1418 - Uncomment the following line again
#assert utilizationRate, "Expected non-zero utilization rates for the PID %d" %appPid
def _assert_other_compute_pid_seen(dcgmSystem, dcgmGroup, app1Pid, app2Pid):
'''Force an update and then assert that PID 1 stats see PID2'''
dcgmSystem.UpdateAllFields(1)
pidInfo = dcgmGroup.stats.GetPidInfo(app1Pid)
assert pidInfo.summary.numOtherComputePids >= 1, "Expected other pid of %d" % app2Pid
#Check for the expected PID in the range of OtherCompute Pids in the process stats
pidFound = False
for pid in range(0, pidInfo.summary.numOtherComputePids):
if app2Pid == pidInfo.summary.otherComputePids[pid]:
pidFound = True
break
assert pidFound, "Expected other compute pid %d, number of other Compute Pids - %d \
. PIDs Found %d, %d, %d , %d, %d, %d, %d, %d, %d, %d"\
% (app2Pid, pidInfo.summary.numOtherComputePids, pidInfo.summary.otherComputePids[0], pidInfo.summary.otherComputePids[1],\
pidInfo.summary.otherComputePids[2], pidInfo.summary.otherComputePids[3], pidInfo.summary.otherComputePids[4], pidInfo.summary.otherComputePids[5],\
pidInfo.summary.otherComputePids[6], pidInfo.summary.otherComputePids[7], pidInfo.summary.otherComputePids[8], pidInfo.summary.otherComputePids[9])
def _create_cuda_app_for_pid_stats(handle, busId, appTimeout, gpuId):
app = apps.CudaCtxCreateAdvancedApp(["--ctxCreate", busId,
"--busyGpu", busId, str(appTimeout),
"--ctxDestroy", busId], env=test_utils.get_cuda_visible_devices_env(handle, gpuId))
app.start(appTimeout*2)
return app
def _create_cuda_assert_app_for_pid_stats(handle, busId, appTimeout, gpuId):
app = apps.RunCudaAssert(["--ctxCreate", busId,
"--cuMemAlloc", busId, "200",
"--cuMemFree", busId,
"--assertGpu", busId, str(appTimeout)], env=test_utils.get_cuda_visible_devices_env(handle, gpuId))
app.start(appTimeout*2)
return app
def _get_gpu_bus_id(gpuId, handle):
#Get the busid of the GPU
fieldId = dcgm_fields.DCGM_FI_DEV_PCI_BUSID
updateFreq = 100000
maxKeepAge = 3600.0 #one hour
maxKeepEntries = 0 #no limit
dcgm_agent_internal.dcgmWatchFieldValue(handle, gpuId, fieldId, updateFreq, maxKeepAge, maxKeepEntries)
dcgm_agent.dcgmUpdateAllFields(handle, 1)
values = dcgm_agent_internal.dcgmGetLatestValuesForFields(handle, gpuId, [fieldId,])
return values[0].value.str
def helper_dcgm_values_pid_stats_realtime(handle, gpuIds):
handleObj = pydcgm.DcgmHandle(handle=handle)
systemObj = handleObj.GetSystem()
groupObj = systemObj.GetEmptyGroup("test1")
## Add first GPU to the group (only need single GPU)
gpuId = gpuIds[0]
groupObj.AddGpu(gpuId)
busId = _get_gpu_bus_id(gpuId, handle)
#watch the process info fields
try:
groupObj.stats.WatchPidFields(updateFreq=100000,
maxKeepAge=3600,
maxKeepSamples=0)
except dcgm_structs.dcgmExceptionClass(dcgm_structs.DCGM_ST_REQUIRES_ROOT) as e:
test_utils.skip_test("Skipping test for non-root due to accounting mode not being watched ahead of time.")
return
#Start a cuda app so we have something to accounted
appTimeout = 10000
app = _create_cuda_app_for_pid_stats(handle, busId, appTimeout, gpuId)
appPid = app.getpid()
app.stdout_readtillmatch(lambda s: s.find("Calling cuInit") != -1)
#Start a 2nd app that will appear in the compute app list
app2 = _create_cuda_app_for_pid_stats(handle, busId, appTimeout, gpuId)
app2Pid = app2.getpid()
app2.stdout_readtillmatch(lambda s: s.find("Calling cuInit") != -1)
time.sleep(1.0)
_assert_other_compute_pid_seen(systemObj, groupObj, appPid, app2Pid)
app.wait()
app2.wait()
_assert_pid_utilization_rate(systemObj, groupObj,appPid)
_assert_pid_utilization_rate(systemObj, groupObj,app2Pid)
## Make sure the stats can be fetched after the process is complete
for count in range(0,2):
time.sleep(1.0)
_assert_other_compute_pid_seen(systemObj, groupObj, appPid, app2Pid)
#Skip this test when running in injection-only mode
@test_utils.run_with_embedded_host_engine()
@test_utils.run_only_with_live_gpus()
@test_utils.run_only_if_mig_is_disabled() # This test relies on accounting data, which doesn't work with MIG mode
def test_dcgm_values_pid_stats_embedded(handle, gpuIds):
if test_utils.is_nvswitch_detected():
test_utils.skip_test("Skipping GPU Cuda tests on NvSwitch systems since they require the FM to be loaded")
helper_dcgm_values_pid_stats(handle, gpuIds)
#Skip this test when running in injection-only mode
@test_utils.run_with_standalone_host_engine(20)
@test_utils.run_with_initialized_client()
@test_utils.run_only_with_live_gpus()
@test_utils.run_only_if_mig_is_disabled() # This test relies on accounting data, which doesn't work with MIG mode
def test_dcgm_live_pid_stats_remote(handle, gpuIds):
if test_utils.is_nvswitch_detected():
test_utils.skip_test("Skipping GPU Cuda tests on NvSwitch systems since they require the FM to be loaded")
helper_dcgm_values_pid_stats(handle, gpuIds)
#Skip this test when running in injection-only mode
@test_utils.run_with_embedded_host_engine()
@test_utils.run_only_with_live_gpus()
@test_utils.run_only_if_mig_is_disabled() # This test relies on accounting data, which doesn't work with MIG mode
def test_dcgm_values_pid_stats_realtime_embedded(handle, gpuIds):
if test_utils.is_nvswitch_detected():
test_utils.skip_test("Skipping GPU Cuda tests on NvSwitch systems since they require the FM to be loaded")
helper_dcgm_values_pid_stats_realtime(handle, gpuIds)
#Skip this test when running in injection-only mode
@test_utils.run_with_standalone_host_engine(30)
@test_utils.run_with_initialized_client()
@test_utils.run_only_with_live_gpus()
@test_utils.run_only_if_mig_is_disabled() # This test relies on accounting data, which doesn't work with MIG mode
def test_dcgm_values_pid_stats_realtime_remote(handle, gpuIds):
if test_utils.is_nvswitch_detected():
test_utils.skip_test("Skipping GPU Cuda tests on NvSwitch systems since they require the FM to be loaded")
helper_dcgm_values_pid_stats_realtime(handle, gpuIds)
@test_utils.run_with_embedded_host_engine()
@test_utils.run_only_with_live_gpus()
def test_dcgm_values_job_stats_remove(handle, gpuIds):
if test_utils.is_nvswitch_detected():
test_utils.skip_test("Skipping GPU Cuda tests on NvSwitch systems since they require the FM to be loaded")
handleObj = pydcgm.DcgmHandle(handle=handle)
systemObj = handleObj.GetSystem()
groupObj = systemObj.GetEmptyGroup("test1")
jobId = "my_important_job"
#Fetch an unknown job
with test_utils.assert_raises(dcgmExceptionClass(dcgm_structs.DCGM_ST_NO_DATA)):
groupObj.stats.RemoveJob(jobId)
#Stop an unknown job
with test_utils.assert_raises(dcgmExceptionClass(dcgm_structs.DCGM_ST_NO_DATA)):
groupObj.stats.StopJobStats(jobId)
#Watch the underlying job fields so that the underlying job stats work
groupObj.stats.WatchJobFields(10000000, 3600.0, 0)
#Add the job then remove it
groupObj.stats.StartJobStats(jobId)
groupObj.stats.RemoveJob(jobId)
#Remove it again. This will fail
with test_utils.assert_raises(dcgmExceptionClass(dcgm_structs.DCGM_ST_NO_DATA)):
groupObj.stats.RemoveJob(jobId)
#Re-use the key again
groupObj.stats.StartJobStats(jobId)
groupObj.stats.StopJobStats(jobId)
#Use the mass-remove this time
groupObj.stats.RemoveAllJobs()
#Remove the job we deleted with RemoveAllJobs(). This should fail
with test_utils.assert_raises(dcgmExceptionClass(dcgm_structs.DCGM_ST_NO_DATA)):
groupObj.stats.RemoveJob(jobId)
@test_utils.run_with_embedded_host_engine()
@test_utils.run_only_with_live_gpus()
@test_utils.run_only_if_mig_is_disabled() # This test relies on accounting data, which doesn't work with MIG mode
def test_dcgm_values_job_stats_get(handle, gpuIds):
if test_utils.is_nvswitch_detected():
test_utils.skip_test("Skipping GPU Cuda tests on NvSwitch systems since they require the FM to be loaded")
handleObj = pydcgm.DcgmHandle(handle=handle)
systemObj = handleObj.GetSystem()
groupObj = systemObj.GetEmptyGroup("test1")
## Add first GPU to the group
gpuId = gpuIds[-1]
groupObj.AddGpu(gpuId)
gpuIds = groupObj.GetGpuIds() #Only reference GPUs we are testing against
#Get the busid of the GPU
fieldId = dcgm_fields.DCGM_FI_DEV_PCI_BUSID
updateFreq = 100000
maxKeepAge = 3600.0 #one hour
maxKeepEntries = 0 #no limit
dcgm_agent_internal.dcgmWatchFieldValue(handle, gpuId, fieldId, updateFreq, maxKeepAge, maxKeepEntries)
dcgm_agent.dcgmUpdateAllFields(handle, 1)
values = dcgm_agent_internal.dcgmGetLatestValuesForFields(handle, gpuId, [fieldId,])
busId = values[0].value.str
appTimeout = 2000 #Give the cache manager a chance to record stats
jobId = "jobIdTest"
## Notify DCGM to start collecting stats
try:
groupObj.stats.WatchJobFields(updateFreq, maxKeepAge, maxKeepEntries)
except dcgm_structs.dcgmExceptionClass(dcgm_structs.DCGM_ST_REQUIRES_ROOT) as e:
test_utils.skip_test("Skipping test for non-root due to accounting mode not being watched ahead of time.")
return
groupObj.stats.StartJobStats(jobId)
environ = test_utils.get_cuda_visible_devices_env(handle, gpuId)
#Start a few cuda apps so we have something to accounted
appParams = ["--ctxCreate", busId,
"--busyGpu", busId, str(appTimeout),
"--ctxDestroy", busId]
xidArgs = ["--ctxCreate", busId,
"--cuMemAlloc", busId, "200",
"--cuMemFree", busId,
"--assertGpu", busId, str(appTimeout)]
#app.start(appTimeout*2)
#app_xid.start(appTimeout*2)
appPid = []
appObjs = []
#Start all three apps at once
for i in range (0,3):
app = apps.CudaCtxCreateAdvancedApp(appParams, env=environ)
app.start(appTimeout*2)
app_xid = apps.RunCudaAssert(xidArgs, env=environ)
app_xid.start(appTimeout)
appPid.append(app.getpid())
appObjs.append(app)
appObjs.append(app_xid)
for app in appObjs:
app.wait()
for app_xid in appObjs:
app_xid.wait()
app_xid.terminate()
app_xid.validate()
#Wait for RM to think the app has exited. Querying immediately after app.wait() did not work
time.sleep(1.0)
## Notify DCGM to stop collecting stats
groupObj.stats.StopJobStats(jobId)
#Force an update after app exits
systemObj.UpdateAllFields(1)
# get job stats
jobInfo = groupObj.stats.GetJobStats(jobId)
pidFound = 0
assert jobInfo.summary.numComputePids >= 3, "Not all CUDA process ran captured during job. Expected 3, got %d" %jobInfo.summary.numComputePids
for i in range (0,2):
pidFound = 0
for j in range (0,dcgm_structs.DCGM_MAX_PID_INFO_NUM):
if appPid[i] == jobInfo.summary.computePids[j].pid:
pidFound = 1
break
assert pidFound, "CUDA Process PID not captured during job. Missing: %d" % appPid[i]
#Validate the values in the summary and the gpu Info
assert jobInfo.summary.energyConsumed >= jobInfo.gpus[0].energyConsumed, "energyConsumed in the job stat summary %d is less than the one consumed by a gpu %d" % \
(jobInfo.summary.energyConsumed, jobInfo.gpus[0].energyConsumed)
assert jobInfo.summary.pcieReplays >= jobInfo.gpus[0].pcieReplays, "pice replays in the job stat summary %d is less than the one found by a gpu %d" %\
(jobInfo.summary.pcieReplays, jobInfo.gpus[0].pcieReplays)
assert jobInfo.summary.startTime == jobInfo.gpus[0].startTime, "Start Time in the job stat summary %d is different than the one stored in gpu Info %d" %\
(jobInfo.summary.startTime, jobInfo.gpus[0].startTime)
assert jobInfo.summary.endTime == jobInfo.gpus[0].endTime, "End Time in the job stat summary %d is different than the one stored in gpu Info %d" %\
(jobInfo.summary.endTime, jobInfo.gpus[0].endTime)
assert jobInfo.summary.eccSingleBit >= jobInfo.gpus[0].eccSingleBit, "ecc single bit in the job stat summary %d is less than the one stored in a gpu Info %d" %\
(jobInfo.summary.eccSingleBit, jobInfo.gpus[0].eccSingleBit)
assert jobInfo.summary.eccDoubleBit >= jobInfo.gpus[0].eccDoubleBit, "ecc double bit in the job stat summary %d is less than the one stored in a gpu Info %d" %\
(jobInfo.summary.eccDoubleBit, jobInfo.gpus[0].eccDoubleBit)
assert jobInfo.summary.thermalViolationTime >= jobInfo.gpus[0].thermalViolationTime, "thermal violation time in the job stat summary %d is less than the one stored in a gpu Info %d" %\
(jobInfo.summary.thermalViolationTime, jobInfo.gpus[0].thermalViolationTime)
assert jobInfo.summary.powerViolationTime >= jobInfo.gpus[0].powerViolationTime, "power violation time in the job stat summary %d is less than the one stored in a gpu Info %d" %\
(jobInfo.summary.powerViolationTime, jobInfo.gpus[0].powerViolationTime)
assert jobInfo.summary.maxGpuMemoryUsed >= jobInfo.gpus[0].maxGpuMemoryUsed, "Max GPU memory used in the job stat summary %d is less than the one stored in a gpu Info %d" %\
(jobInfo.summary.maxGpuMemoryUsed, jobInfo.gpus[0].maxGpuMemoryUsed)
assert jobInfo.summary.syncBoostTime >= jobInfo.gpus[0].syncBoostTime, "Sync Boost time in the job stat summary %d is less than the one stored in a gpu Info %d" %\
(jobInfo.summary.syncBoostTime, jobInfo.gpus[0].syncBoostTime)
assert jobInfo.summary.overallHealth == jobInfo.gpus[0].overallHealth, "Over all Health in the job summary (%d) is different from the one in the gpu Info (%d)" %\
(jobInfo.summary.overallHealth, jobInfo.gpus[0].overallHealth)
assert jobInfo.summary.numXidCriticalErrors == jobInfo.gpus[0].numXidCriticalErrors, "At least (%d) Xid error should have been caught, but (%d) were found" %\
(jobInfo.summary.numXidCriticalErrors, jobInfo.gpus[0].numXidCriticalErrors)
assert jobInfo.summary.numXidCriticalErrors > 0, "At least one Xid error should have been caught, but (%d) were found" % jobInfo.summary.numXidCriticalErrors
for index in range(jobInfo.summary.numXidCriticalErrors):
assert jobInfo.summary.xidCriticalErrorsTs[index] != 0, "Unable to find a valid timestamp for the Xid Error %d" % index
logger.debug("Xid Timestamp: " + str(jobInfo.summary.xidCriticalErrorsTs[index]))
#Start another job with the same job ID and it should return DUPLICATE KEY Error
with test_utils.assert_raises(dcgmExceptionClass(dcgm_structs.DCGM_ST_DUPLICATE_KEY)):
groupObj.stats.StartJobStats(jobId)
#print str(jobInfo.summary)
#print ""
#print str(jobInfo.gpus[0])
@test_utils.run_with_embedded_host_engine()
def test_dcgm_field_by_id(handle):
handleObj = pydcgm.DcgmHandle(handle=handle)
systemObj = handleObj.GetSystem()
fieldInfo = systemObj.fields.GetFieldById(dcgm_fields.DCGM_FI_DEV_BRAND)
assert fieldInfo.fieldId == dcgm_fields.DCGM_FI_DEV_BRAND, "Field id %d" % fieldInfo.fieldId
assert fieldInfo.fieldType == dcgm_fields.DCGM_FT_STRING, "Field type %s" % fieldInfo.fieldType
assert fieldInfo.scope == dcgm_fields.DCGM_FS_DEVICE, "Field scope %d" % fieldInfo.scope
assert fieldInfo.tag == "brand", "Field tag %s" % fieldInfo.tag
bogusFieldInfo = systemObj.fields.GetFieldById(dcgm_fields.DCGM_FI_MAX_FIELDS)
assert bogusFieldInfo == None, "Expected null fieldInfo for dcgm_fields.DCGM_FI_MAX_FIELDS"
@test_utils.run_with_embedded_host_engine()
def test_dcgm_field_by_tag(handle):
handleObj = pydcgm.DcgmHandle(handle=handle)
systemObj = handleObj.GetSystem()
fieldInfo = systemObj.fields.GetFieldByTag('brand')
assert fieldInfo.fieldId == dcgm_fields.DCGM_FI_DEV_BRAND, "Field id %d" % fieldInfo.fieldId
assert fieldInfo.fieldType == dcgm_fields.DCGM_FT_STRING, "Field type %d" % fieldInfo.fieldType
assert fieldInfo.scope == dcgm_fields.DCGM_FS_DEVICE, "Field scope %d" % fieldInfo.scope
assert fieldInfo.tag == "brand", "Field tag %s" % fieldInfo.tag
bogusFieldInfo = systemObj.fields.GetFieldByTag('no_way_this_is_a_field')
assert bogusFieldInfo == None, "Expected null fieldInfo for bogus tag"
@test_utils.run_with_embedded_host_engine()
@test_utils.run_only_with_live_gpus()
def test_dcgm_fields_all_fieldids_valid(handle, gpuIds):
"""
Test that any field IDs that are defined are retrievable
"""
handleObj = pydcgm.DcgmHandle(handle=handle)
#Some field IDs don't generate data by default. For instance, accounting data only works if accounting
#mode is enabled and processes are running. Field IDs in this list fall into this category and have
#already been confirmed to generate data within the cache manager
exceptionFieldIds = [dcgm_fields_internal.DCGM_FI_DEV_COMPUTE_PIDS,
dcgm_fields.DCGM_FI_DEV_ACCOUNTING_DATA,
dcgm_fields_internal.DCGM_FI_DEV_GRAPHICS_PIDS,
dcgm_fields.DCGM_FI_DEV_XID_ERRORS,
dcgm_fields.DCGM_FI_DEV_VGPU_VM_ID,
dcgm_fields.DCGM_FI_DEV_VGPU_VM_NAME,
dcgm_fields.DCGM_FI_DEV_VGPU_TYPE,
dcgm_fields.DCGM_FI_DEV_VGPU_UUID,
dcgm_fields.DCGM_FI_DEV_VGPU_DRIVER_VERSION,
dcgm_fields.DCGM_FI_DEV_VGPU_MEMORY_USAGE,
dcgm_fields.DCGM_FI_DEV_VGPU_INSTANCE_LICENSE_STATE,
dcgm_fields.DCGM_FI_DEV_VGPU_FRAME_RATE_LIMIT,
dcgm_fields.DCGM_FI_DEV_VGPU_PCI_ID,
dcgm_fields.DCGM_FI_DEV_VGPU_ENC_STATS,
dcgm_fields.DCGM_FI_DEV_VGPU_ENC_SESSIONS_INFO,
dcgm_fields.DCGM_FI_DEV_VGPU_FBC_STATS,
dcgm_fields.DCGM_FI_DEV_VGPU_FBC_SESSIONS_INFO,
dcgm_fields.DCGM_FI_DEV_VGPU_VM_GPU_INSTANCE_ID,
dcgm_fields.DCGM_FI_DEV_GPU_NVLINK_ERRORS,
dcgm_fields_internal.DCGM_FI_DEV_GPU_UTIL_SAMPLES,
dcgm_fields_internal.DCGM_FI_DEV_MEM_COPY_UTIL_SAMPLES]
exceptionFieldIds.extend(g_profilingFieldIds)
migFieldIds = [dcgm_fields.DCGM_FI_DEV_MIG_GI_INFO,
dcgm_fields.DCGM_FI_DEV_MIG_CI_INFO,
dcgm_fields.DCGM_FI_DEV_MIG_ATTRIBUTES]
baseFieldIds = [dcgm_fields.DCGM_FI_DEV_FB_TOTAL,
dcgm_fields.DCGM_FI_DEV_FB_FREE]
gpuId = gpuIds[0]
fieldIdVars = {}
ignoreFieldIds = set(('DCGM_FI_MAX_FIELDS', 'DCGM_FI_UNKNOWN', 'DCGM_FI_FIRST_NVSWITCH_FIELD_ID',
'DCGM_FI_LAST_NVSWITCH_FIELD_ID'))
#Find all of the numerical field IDs by looking at the dcgm_fields module's attributes
for moduleAttribute in list(dcgm_fields.__dict__.keys()):
if moduleAttribute.find("DCGM_FI_") == 0 and moduleAttribute not in ignoreFieldIds:
fieldIdVars[moduleAttribute] = dcgm_fields.__dict__[moduleAttribute]
migModeEnabled = test_utils.is_mig_mode_enabled()
numErrors = 0
#Add watches on all known fieldIds
for fieldIdName in list(fieldIdVars.keys()):
fieldId = fieldIdVars[fieldIdName]
updateFreq = 10 * 1000000
maxKeepAge = 3600.0
maxKeepEntries = 100
try:
if migModeEnabled and fieldId == dcgm_fields.DCGM_FI_DEV_ACCOUNTING_DATA:
# We cannot enable accounting mode with MIG mode enabled - CUDANVML-153
continue
dcgm_agent_internal.dcgmWatchFieldValue(handle, gpuId, fieldId, updateFreq, maxKeepAge, maxKeepEntries)
except dcgm_structs.dcgmExceptionClass(dcgm_structs.DCGM_ST_REQUIRES_ROOT):
logger.info("Skipping field %d that requires root" % fieldId)
except dcgm_structs.dcgmExceptionClass(dcgm_structs.DCGM_ST_BADPARAM):
logger.error("Unable to watch field %s (id %d). Unknown field?" % (fieldIdName, fieldId))
numErrors += 1
#Force all fields to possibly update so we can fetch data for them
handleObj.GetSystem().UpdateAllFields(1)
#fieldIdVars = {'DCGM_FI_GPU_TOPOLOGY_NVLINK' : 61}
for fieldIdName in list(fieldIdVars.keys()):
fieldId = fieldIdVars[fieldIdName]
if (not migModeEnabled) and (fieldId in migFieldIds):
continue #Don't check MIG fields if MIG mode is disabled
#Verify that we can fetch field metadata. This call will throw an exception on error
fieldMeta = dcgm_fields.DcgmFieldGetById(fieldId)
#Fetch each fieldId individually so we can check for errors
fieldValue = dcgm_agent_internal.dcgmGetLatestValuesForFields(handle, gpuId, [fieldId, ])[0]
if fieldId in exceptionFieldIds:
continue #Don't check fields that are excluded from testing
#Skip NvSwitch fields since they are pushed from fabric manager rather than polled
if fieldId >= dcgm_fields.DCGM_FI_FIRST_NVSWITCH_FIELD_ID and fieldId <= dcgm_fields.DCGM_FI_LAST_NVSWITCH_FIELD_ID:
continue
if fieldValue.status == dcgm_structs.DCGM_ST_NOT_SUPPORTED:
#It's ok for fields to not be supported. We got a useful error code
logger.info("field %s (id %d) returned st DCGM_ST_NOT_SUPPORTED (OK)" % (fieldIdName, fieldId))
elif migModeEnabled and ((fieldId == dcgm_fields.DCGM_FI_DEV_MIG_CI_INFO) and (fieldValue.status == dcgm_structs.DCGM_ST_NO_DATA)):
logger.info("field %s (id %d) returned st DCGM_ST_NO_DATA (OK), no compute instances present" % (fieldIdName, fieldId))
elif fieldValue.status != dcgm_structs.DCGM_ST_OK:
logger.error("No value for field %s (id %d). status: %d" % (fieldIdName, fieldId, fieldValue.status))
numErrors += 1
# check certain baseline fields for actual values
if fieldId in baseFieldIds:
fv = dcgm_field_helpers.DcgmFieldValue(fieldValue)
assert fieldValue.value.i64 > 0 and not fv.isBlank, "base field %d is 0 or blank" % fieldId
assert numErrors == 0, "Got %d errors" % numErrors
def test_dcgm_verify_manual_mode_behavior():
"""
Test to verify that field values cannot be
retrieved automatically in manual operation mode
"""
# Gets the handle and set operation mode to manual
handleObj = pydcgm.DcgmHandle(ipAddress=None, opMode=dcgm_structs.DCGM_OPERATION_MODE_MANUAL)
# Creates a default group with all GPUs
systemObj = handleObj.GetSystem()
groupObj = systemObj.GetDefaultGroup()
gpuId = groupObj.GetGpuIds()[0]
fieldId = dcgm_fields.DCGM_FI_DEV_POWER_USAGE
updateFreq = 100000 #100 miliseconds
maxKeepAge = 3600.0 #one hour
maxKeepEntries = 0 #no limit
# watch the fieldvalues list
dcgm_agent_internal.dcgmWatchFieldValue(handleObj.handle, gpuId, fieldId, updateFreq, maxKeepAge, maxKeepEntries)
# trigger update for all fields once, wait for it complete and get timestamp
systemObj.UpdateAllFields(waitForUpdate=True)
initialValues = dcgm_agent_internal.dcgmGetLatestValuesForFields(handleObj.handle, gpuId, [fieldId,])
firstTimestamp = initialValues[0].ts
for i in range(1,10):
values = dcgm_agent_internal.dcgmGetLatestValuesForFields(handleObj.handle, gpuId, [fieldId,])
otherTimestamp = values[0].ts
time.sleep(300.0 / 1000.0) # sleep for 300 miliseconds
assert firstTimestamp == otherTimestamp, "Fields got updated automatically, that should not happen in MANUAL OPERATION MODE"
# trigger update manually to make sure fields got updated
# and have a different timestamp now
systemObj.UpdateAllFields(waitForUpdate=True)
time.sleep(300.0 / 1000.0)
postUpdateValues = dcgm_agent_internal.dcgmGetLatestValuesForFields(handleObj.handle, gpuId, [fieldId,])
latestTimestamp = postUpdateValues[0].ts
handleObj.Shutdown()
assert firstTimestamp != latestTimestamp, "Fields did not get updated after manually trigerring an update"
def test_dcgm_verify_auto_mode_behavior():
"""
Test to verify that field values can be retrieved
automatically in manual operation mode
"""
# Gets the handle and set operation mode to automatic
handleObj = pydcgm.DcgmHandle(ipAddress=None, opMode=dcgm_structs.DCGM_OPERATION_MODE_AUTO)
# Creates a default group with all GPUs
systemObj = handleObj.GetSystem()
groupObj = systemObj.GetDefaultGroup()
gpuId = groupObj.GetGpuIds()[0]
fieldId = dcgm_fields.DCGM_FI_DEV_POWER_USAGE
updateFreq = 100000 #100 miliseconds
maxKeepAge = 3600.0 #one hour
maxKeepEntries = 0 #no limit
# watch the fieldvalues list
dcgm_agent_internal.dcgmWatchFieldValue(handleObj.handle, gpuId, fieldId, updateFreq, maxKeepAge, maxKeepEntries)
# trigger update for all fields once, wait for it complete and get timestamp
systemObj.UpdateAllFields(waitForUpdate=True)
initialValues = dcgm_agent_internal.dcgmGetLatestValuesForFields(handleObj.handle, gpuId, [fieldId,])
firstTimestamp = initialValues[0].ts
time.sleep(300.0 / 1000.0) # sleep for 300 miliseconds
otherValues = dcgm_agent_internal.dcgmGetLatestValuesForFields(handleObj.handle, gpuId, [fieldId,])
nextTimestamp = otherValues[0].ts
handleObj.Shutdown()
assert firstTimestamp != nextTimestamp, "Failed to update Fields automatically, that should not happen in AUTO OPERATION MODE"
@test_utils.run_with_embedded_host_engine()
@test_utils.run_only_with_live_gpus()
def test_dcgm_device_attributes_v3(handle, gpuIds):
handleObj = pydcgm.DcgmHandle(handle=handle)
systemObj = handleObj.GetSystem()
for gpuId in gpuIds:
gpuAttrib = systemObj.discovery.GetGpuAttributes(gpuId)
#Validate field values
assert gpuAttrib.version != 0, "gpuAttrib.version == 0"
assert len(gpuAttrib.identifiers.brandName) > 0 and not dcgmvalue.DCGM_STR_IS_BLANK(gpuAttrib.identifiers.brandName), \
"gpuAttrib.identifiers.brandName: '%s'" % gpuAttrib.identifiers.brandName
assert len(gpuAttrib.identifiers.deviceName) > 0 and not dcgmvalue.DCGM_STR_IS_BLANK(gpuAttrib.identifiers.deviceName), \
"gpuAttrib.identifiers.deviceName: '%s'" % gpuAttrib.identifiers.deviceName
assert len(gpuAttrib.identifiers.pciBusId) > 0 and not dcgmvalue.DCGM_STR_IS_BLANK(gpuAttrib.identifiers.pciBusId), \
"gpuAttrib.identifiers.pciBusId: '%s'" % gpuAttrib.identifiers.pciBusId
assert len(gpuAttrib.identifiers.uuid) > 0 and not dcgmvalue.DCGM_STR_IS_BLANK(gpuAttrib.identifiers.uuid), \
"gpuAttrib.identifiers.uuid: '%s'" % gpuAttrib.identifiers.uuid
assert len(gpuAttrib.identifiers.vbios) > 0 and not dcgmvalue.DCGM_STR_IS_BLANK(gpuAttrib.identifiers.vbios), \
"gpuAttrib.identifiers.vbios: '%s'" % gpuAttrib.identifiers.vbios
assert len(gpuAttrib.identifiers.driverVersion) > 0 and not dcgmvalue.DCGM_STR_IS_BLANK(gpuAttrib.identifiers.driverVersion), \
"gpuAttrib.identifiers.driverVersion: '%s'" % gpuAttrib.identifiers.driverVersion
assert gpuAttrib.identifiers.pciDeviceId != 0, "gpuAttrib.identifiers.pciDeviceId: %08X" % gpuAttrib.identifiers.pciDeviceId
assert gpuAttrib.identifiers.pciSubSystemId != 0, "gpuAttrib.identifiers.pciSubSystemId: %08X" % gpuAttrib.identifiers.pciSubSystemId
assert gpuAttrib.settings.confidentialComputeMode >= 0, "gpuAttrib.settings.confidentialComputeMode: '%d'" % gpuAttrib.settings.confidentialComputeMode
@test_utils.run_with_embedded_host_engine()
@test_utils.run_only_with_live_gpus()
def test_dcgm_device_attributes_bad_gpuid(handle, gpuIds):
handleObj = pydcgm.DcgmHandle(handle=handle)
systemObj = handleObj.GetSystem()
gpuIds = [-1, dcgm_structs.DCGM_MAX_NUM_DEVICES]
#None of these should crash
for gpuId in gpuIds:
gpuAttrib = systemObj.discovery.GetGpuAttributes(gpuId)
@test_utils.run_with_embedded_host_engine()
@test_utils.run_only_with_live_gpus()
def test_dcgm_nvlink_bandwidth(handle, gpuIds):
handleObj = pydcgm.DcgmHandle(handle=handle)
systemObj = handleObj.GetSystem()
groupObj = systemObj.GetDefaultGroup()
gpuId = gpuIds[0]
fieldIds = [ dcgm_fields.DCGM_FI_DEV_NVLINK_BANDWIDTH_TOTAL, ]
fieldGroupObj = pydcgm.DcgmFieldGroup(handleObj, "my_group", fieldIds)
operationMode = dcgm_structs.DCGM_OPERATION_MODE_AUTO
updateFreq = 100000
maxKeepAge = 86400.0
maxKeepSamples = 0
fieldWatcher = dcgm_field_helpers.DcgmFieldGroupWatcher(handle, groupObj.GetId(), fieldGroupObj, operationMode, updateFreq, maxKeepAge, maxKeepSamples, 0)
assert len(fieldWatcher.values[gpuId]) == len(fieldIds), "Expected %d NVlink bandwidth values, got %d" % (len(fieldIds), len(fieldWatcher.values[gpuId]))
for fieldId in fieldIds:
for value in fieldWatcher.values[gpuId][fieldId]:
# Either the GPU supports NvLink, in which case a
# non-blank, actual value should be read, or the GPU does
# not support NvLink, in which case the field should be
# blank and the value should reflect that it's not
# supported.
assert ((value.isBlank == True) or (value.isBlank == False and value.value >= 0)), "Unexpected error reading field %d on GPU %d" % (fieldId, gpuId)
def helper_nvswitch_monitoring(handle, switchIds):
handleObj = pydcgm.DcgmHandle(handle=handle)
systemObj = handleObj.GetSystem()
entities = []
for switchId in switchIds:
entity = dcgm_structs.c_dcgmGroupEntityPair_t()
entity.entityGroupId = dcgm_fields.DCGM_FE_SWITCH
entity.entityId = switchId
entities.append(entity)
groupObj = systemObj.GetGroupWithEntities('SwitchGroup', entities)
fieldIds = []
for i in range(dcgm_fields.DCGM_FI_FIRST_NVSWITCH_FIELD_ID, dcgm_fields.DCGM_FI_LAST_NVSWITCH_FIELD_ID):
fieldMeta = dcgm_fields.DcgmFieldGetById(i)
if fieldMeta is not None:
fieldIds.append(i)
fieldGroupObj = pydcgm.DcgmFieldGroup(handleObj, "my_group", fieldIds)
switchId = switchIds[0]
operationMode = dcgm_structs.DCGM_OPERATION_MODE_AUTO
updateFreq = 100000
maxKeepAge = 86400.0
maxKeepSamples = 0
fieldWatcher = dcgm_field_helpers.DcgmFieldGroupEntityWatcher(handle, groupObj.GetId(), fieldGroupObj, operationMode, updateFreq, maxKeepAge, maxKeepSamples, 0)
msg = "Expected %d NVlink bandwidth values, got %d" % (len(fieldIds), len(fieldWatcher.values[dcgm_fields.DCGM_FE_SWITCH][switchId]))
assert len(fieldWatcher.values[dcgm_fields.DCGM_FE_SWITCH][switchId]) == len(fieldIds), msg
# Check that the values are the appropriate dummy values
for fieldId in fieldIds:
for value in fieldWatcher.values[dcgm_fields.DCGM_FE_SWITCH][switchId][fieldId]:
# For now, these should all be blank values. This test may be updated or deleted later
# when the NSCQ library exists
assert (value.isBlank == True), "Unexpected error reading field %d on Switch %d" % (fieldId, switchId)
@test_utils.run_with_embedded_host_engine()
@test_utils.run_with_injection_nvswitches(switchCount=2)
def test_nvswitch_monitoring_embedded(handle, switchIds):
helper_nvswitch_monitoring(handle, switchIds)
@test_utils.run_with_standalone_host_engine(30)
@test_utils.run_with_initialized_client()
@test_utils.run_with_injection_nvswitches(switchCount=2)
def test_nvswitch_monitoring_standalone(handle, switchIds):
helper_nvswitch_monitoring(handle, switchIds)
| DCGM-master | testing/python3/tests/test_field_values.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# test the policy manager for DCGM
import pydcgm
import dcgm_structs
import dcgm_agent_internal
import dcgm_structs_internal
import dcgm_agent
import logger
import test_utils
import dcgm_fields
import dcgmvalue
import time
import inspect
import apps
from subprocess import check_output
def helper_get_status_list(statusHandle):
"""
Helper method to get status list from the provided status handle
"""
errorList = list()
errorInfo = dcgm_agent.dcgmStatusPopError(statusHandle)
while (errorInfo != None):
errorList.append(errorInfo)
errorInfo = dcgm_agent.dcgmStatusPopError(statusHandle)
return errorList
'''
def helper_investigate_status(statusHandle):
"""
Helper method to investigate status handle
"""
errorCount = 0;
errorInfo = dcgm_agent.dcgmStatusPopError(statusHandle)
while (errorInfo != None):
errorCount += 1
print errorCount
print(" GPU Id: %d" % errorInfo.gpuId)
print(" Field ID: %d" % errorInfo.fieldId)
print(" Error: %d" % errorInfo.status)
errorInfo = dcgm_agent.dcgmStatusPopError(statusHandle)
'''
@test_utils.run_with_embedded_host_engine()
def test_dcgm_config_embedded_get_devices(handle):
"""
Verifies that DCGM Engine returns list of devices
"""
handleObj = pydcgm.DcgmHandle(handle=handle)
systemObj = handleObj.GetSystem()
gpuIdList = systemObj.discovery.GetAllGpuIds()
assert len(gpuIdList) >= 0, "Not able to find devices on the node for embedded case"
@test_utils.run_with_standalone_host_engine(20)
@test_utils.run_with_initialized_client()
def test_dcgm_config_standalone_get_devices(handle):
"""
Verifies that DCGM Engine returns list of devices
"""
handleObj = pydcgm.DcgmHandle(handle=handle)
systemObj = handleObj.GetSystem()
gpuIdList = systemObj.discovery.GetAllGpuIds()
assert len(gpuIdList) >= 0, "Not able to find devices for standalone case"
def helper_dcgm_config_get_attributes(handle):
handleObj = pydcgm.DcgmHandle(handle=handle)
systemObj = handleObj.GetSystem()
groupObj = systemObj.GetDefaultGroup()
gpuIdList = groupObj.GetGpuIds()
for gpuId in gpuIdList:
attributes = systemObj.discovery.GetGpuAttributes(gpuId)
assert (attributes.identifiers.deviceName != dcgmvalue.DCGM_STR_NOT_SUPPORTED
and attributes.identifiers.deviceName != dcgmvalue.DCGM_STR_NOT_FOUND
and attributes.identifiers.deviceName != dcgmvalue.DCGM_STR_NOT_SUPPORTED
and attributes.identifiers.deviceName != dcgmvalue.DCGM_STR_NOT_PERMISSIONED), "Not able to find attributes"
#We used to assert that attributes.clockSets.count was > 0. This was because the NVML internal API that provided it
#bypassed the SKU check. If nvidia-smi -q -d SUPPORTED_CLOCKS returns N/A, we will no longer have clockSets.
for i in range(attributes.clockSets.count):
memClock = attributes.clockSets.clockSet[i].memClock
smClock = attributes.clockSets.clockSet[i].smClock
assert memClock > 0 and memClock < 20000, "gpuId %d got memClock out of range 0 - 20000: %d" % (gpuId, memClock)
assert smClock > 0 and smClock < 10000, "gpuId %d got smClock out of range 0 - 10000: %d" % (gpuId, smClock)
@test_utils.run_with_embedded_host_engine()
def test_dcgm_config_embedded_get_attributes(handle):
"""
Get Device attributes for each GPU ID
"""
helper_dcgm_config_get_attributes(handle)
@test_utils.run_with_standalone_host_engine(20)
@test_utils.run_with_initialized_client()
def test_dcgm_config_standalone_get_attributes(handle):
"""
Get Device attributes for each GPU ID
"""
helper_dcgm_config_get_attributes(handle)
def helper_dcgm_config_set(handle):
handleObj = pydcgm.DcgmHandle(handle=handle)
systemObj = handleObj.GetSystem()
groupObj = systemObj.GetDefaultGroup()
config_values = dcgm_structs.c_dcgmDeviceConfig_v1()
config_values.mEccMode = dcgmvalue.DCGM_INT32_BLANK
config_values.mPerfState.syncBoost = dcgmvalue.DCGM_INT32_BLANK
config_values.mPerfState.targetClocks.memClock = dcgmvalue.DCGM_INT32_BLANK
config_values.mPerfState.targetClocks.smClock = dcgmvalue.DCGM_INT32_BLANK
config_values.mPowerLimit.val = dcgmvalue.DCGM_INT32_BLANK
config_values.mComputeMode = dcgmvalue.DCGM_INT32_BLANK
#Will throw an exception on error
groupObj.config.Set(config_values)
@test_utils.run_with_embedded_host_engine()
@test_utils.run_only_as_root()
def test_dcgm_config_set_embedded(handle):
"""
Verifies that the configuration can be set for a group
"""
helper_dcgm_config_set(handle)
@test_utils.run_with_standalone_host_engine(20)
@test_utils.run_with_initialized_client()
@test_utils.run_only_as_root()
def test_dcgm_config_set_standalone(handle):
"""
Verifies that the configuration can be set for a group
"""
helper_dcgm_config_set(handle)
def helper_dcgm_config_get(handle):
handleObj = pydcgm.DcgmHandle(handle=handle)
systemObj = handleObj.GetSystem()
groupObj = systemObj.GetDefaultGroup()
## Set the configuration first
config_values = dcgm_structs.c_dcgmDeviceConfig_v1()
config_values.mEccMode = dcgmvalue.DCGM_INT32_BLANK
config_values.mPerfState.syncBoost = dcgmvalue.DCGM_INT32_BLANK
config_values.mPerfState.targetClocks.memClock = dcgmvalue.DCGM_INT32_BLANK
config_values.mPerfState.targetClocks.smClock = dcgmvalue.DCGM_INT32_BLANK
config_values.mPowerLimit.val = dcgmvalue.DCGM_INT32_BLANK
config_values.mComputeMode = dcgmvalue.DCGM_INT32_BLANK
#Will throw exception on error
groupObj.config.Set(config_values)
## Get the target configuration to make sure that it's exact same as the one configured
config_values = groupObj.config.Get(dcgm_structs.DCGM_CONFIG_TARGET_STATE)
gpuIds = groupObj.GetGpuIds()
## Loop through config_values to to check for correctness of values fetched from the hostengine
for x in range(0, len(gpuIds)):
assert config_values[x].mEccMode == dcgmvalue.DCGM_INT32_BLANK, "Failed to get matching value for ecc mode. Expected: %d Received: %d" % (dcgmvalue.DCGM_INT32_BLANK, config_values[x].mEccMode)
assert config_values[x].mPerfState.targetClocks.memClock == dcgmvalue.DCGM_INT32_BLANK, "Failed to get matching value for mem app clk. Expected: %d Received: %d" % (dcgmvalue.DCGM_INT32_BLANK, config_values[x].mPerfState.targetClocks.memClock)
assert config_values[x].mPerfState.targetClocks.smClock == dcgmvalue.DCGM_INT32_BLANK, "Failed to get matching value for proc app clk. Expected: %d Received: %d" % (dcgmvalue.DCGM_INT32_BLANK, config_values[x].mPerfState.targetClocks.smClock)
assert config_values[x].mPowerLimit.val == dcgmvalue.DCGM_INT32_BLANK, "Failed to get matching value for power limit. Expected: %d Received: %d" % (dcgmvalue.DCGM_INT32_BLANK, config_values[x].mPowerLimit.val)
assert config_values[x].mComputeMode == dcgmvalue.DCGM_INT32_BLANK, "Failed to get matching value for power limit. Expected: %d Received: %d" % (dcgmvalue.DCGM_INT32_BLANK, config_values[x].mComputeMode)
pass
@test_utils.run_with_embedded_host_engine()
@test_utils.run_only_with_live_gpus()
@test_utils.run_only_as_root()
def test_dcgm_config_get_embedded(handle, gpuIds):
"""
Verifies "Get Configuration" Basic functionality
"""
helper_dcgm_config_get(handle)
@test_utils.run_with_standalone_host_engine(20)
@test_utils.run_with_initialized_client()
@test_utils.run_only_with_live_gpus()
@test_utils.run_only_as_root()
def test_dcgm_config_get_standalone(handle, gpuIds):
"""
Verifies "Get Configuration" Basic functionality
"""
helper_dcgm_config_get(handle)
def helper_dcgm_config_enforce(handle):
handleObj = pydcgm.DcgmHandle(handle=handle)
systemObj = handleObj.GetSystem()
groupObj = systemObj.GetDefaultGroup()
config_values = dcgm_structs.c_dcgmDeviceConfig_v1()
config_values.mEccMode = dcgmvalue.DCGM_INT32_BLANK
config_values.mPerfState.syncBoost = dcgmvalue.DCGM_INT32_BLANK
config_values.mPerfState.targetClocks.memClock = dcgmvalue.DCGM_INT32_BLANK
config_values.mPerfState.targetClocks.smClock = dcgmvalue.DCGM_INT32_BLANK
config_values.mPowerLimit.val = dcgmvalue.DCGM_INT32_BLANK
config_values.mComputeMode = dcgmvalue.DCGM_INT32_BLANK
#Will throw exception on error
groupObj.config.Set(config_values)
groupObj.config.Enforce()
@test_utils.run_with_embedded_host_engine()
@test_utils.run_only_with_live_gpus()
@test_utils.run_only_as_root()
def test_dcgm_config_enforce_embedded(handle, gpuIds):
"""
Verifies that the configuration can be enforced for a group
"""
helper_dcgm_config_enforce(handle)
@test_utils.run_with_standalone_host_engine(20)
@test_utils.run_with_initialized_client()
@test_utils.run_only_with_live_gpus()
@test_utils.run_only_as_root()
def test_dcgm_config_enforce_standalone(handle, gpuIds):
"""
Verifies that the configuration can be enforced for a group
"""
helper_dcgm_config_enforce(handle)
def helper_dcgm_config_powerbudget(handle, gpuIds):
handleObj = pydcgm.DcgmHandle(handle=handle)
systemObj = handleObj.GetSystem()
groupObj = systemObj.GetEmptyGroup("test1")
## Add first GPU to the group
groupObj.AddGpu(gpuIds[0])
gpuIds = groupObj.GetGpuIds() #Only reference GPUs we are testing against
## Get Min and Max Power limit on the group
attributes = dcgm_agent.dcgmGetDeviceAttributes(handle, gpuIds[0])
## Verify that power is supported on the GPUs in the group
if dcgmvalue.DCGM_INT32_IS_BLANK(attributes.powerLimits.maxPowerLimit):
test_utils.skip_test("Needs Power limit to be supported on the GPU")
powerLimit = int((attributes.powerLimits.maxPowerLimit + attributes.powerLimits.minPowerLimit)/2)
config_values = dcgm_structs.c_dcgmDeviceConfig_v1()
config_values.mEccMode = dcgmvalue.DCGM_INT32_BLANK
config_values.mPerfState.syncBoost = dcgmvalue.DCGM_INT32_BLANK
config_values.mPerfState.targetClocks.memClock = dcgmvalue.DCGM_INT32_BLANK
config_values.mPerfState.targetClocks.smClock = dcgmvalue.DCGM_INT32_BLANK
config_values.mComputeMode = dcgmvalue.DCGM_INT32_BLANK
config_values.mPowerLimit.type = dcgm_structs.DCGM_CONFIG_POWER_BUDGET_GROUP
config_values.mPowerLimit.val = powerLimit * len(gpuIds) #Assumes homogenous GPUs
groupObj.config.Set(config_values)
config_values = groupObj.config.Get(dcgm_structs.DCGM_CONFIG_CURRENT_STATE)
assert len(config_values) > 0, "Failed to get configuration using groupObj.config.Get"
for x in range(0, len(gpuIds)):
if (config_values[x].mPowerLimit.val != dcgmvalue.DCGM_INT32_NOT_SUPPORTED):
assert config_values[x].mPowerLimit.type == dcgm_structs.DCGM_CONFIG_POWER_CAP_INDIVIDUAL, "The power limit type for gpuId %d is incorrect. Returned: %d Expected :%d" % (x, config_values[x].mPowerLimit.type, dcgm_structs.DCGM_CONFIG_POWER_CAP_INDIVIDUAL)
assert config_values[x].mPowerLimit.val == powerLimit, "The power limit value for gpuID %d is incorrect. Returned: %d Expected: %s" % (x, config_values[x].mPowerLimit.val, powerLimit)
pass
@test_utils.run_with_embedded_host_engine()
@test_utils.run_only_with_live_gpus()
@test_utils.run_only_as_root()
def test_dcgm_config_powerbudget_embedded(handle, gpuIds):
"""
This method verfies setting power budget for a group of GPUs
"""
helper_dcgm_config_powerbudget(handle, gpuIds)
@test_utils.run_with_standalone_host_engine(20)
@test_utils.run_with_initialized_client()
@test_utils.run_only_with_live_gpus()
@test_utils.run_only_as_root()
def test_dcgm_config_powerbudget_standalone(handle, gpuIds):
"""
This method verfies setting power budget for a group of GPUs
"""
helper_dcgm_config_powerbudget(handle, gpuIds)
def helper_verify_power_value(groupObj, expected_power):
"""
Helper Method to verify power value
"""
gpuIds = groupObj.GetGpuIds()
config_values = groupObj.config.Get(dcgm_structs.DCGM_CONFIG_CURRENT_STATE)
assert len(config_values) > 0, "Failed to get configuration using dcgmClientConfigGet"
for x in range(0, len(gpuIds)):
if (config_values[x].mPowerLimit.val != dcgmvalue.DCGM_INT32_NOT_SUPPORTED):
assert config_values[x].mPowerLimit.type == dcgm_structs.DCGM_CONFIG_POWER_CAP_INDIVIDUAL, \
"The power limit type for gpuId %d is incorrect. Returned: %d Expected :%d" \
% (x, config_values[x].mPowerLimit.type, dcgm_structs.DCGM_CONFIG_POWER_CAP_INDIVIDUAL)
assert config_values[x].mPowerLimit.val == expected_power, "The power limit value for gpuID %d is incorrect. Returned: %d Expected: %d" \
% (x, config_values[x].mPowerLimit.val, expected_power)
pass
def helper_test_config_config_power_enforce(handle, gpuIds):
"""
Checks if DCGM can enforce the power settings if it's changed behind the scenes
"""
handleObj = pydcgm.DcgmHandle(handle=handle)
systemObj = handleObj.GetSystem()
groupObj = systemObj.GetEmptyGroup("test1")
## Add first GPU to the group
groupObj.AddGpu(gpuIds[0])
gpuIds = groupObj.GetGpuIds() #Only reference GPUs we are testing against
gpuId = gpuIds[0]
## Get Min and Max Power limit on the group
attributes = systemObj.discovery.GetGpuAttributes(gpuId)
## Verify that power is supported on the GPUs in the group
if dcgmvalue.DCGM_INT32_IS_BLANK(attributes.powerLimits.maxPowerLimit):
test_utils.skip_test("Needs Power limit to be supported on the GPU")
powerLimit_set_dcgmi = int((attributes.powerLimits.maxPowerLimit + attributes.powerLimits.minPowerLimit)/2)
powerLimit_set_nvsmi = attributes.powerLimits.maxPowerLimit
config_values = dcgm_structs.c_dcgmDeviceConfig_v1()
config_values.mEccMode = dcgmvalue.DCGM_INT32_BLANK
config_values.mPerfState.syncBoost = dcgmvalue.DCGM_INT32_BLANK
config_values.mPerfState.targetClocks.memClock = dcgmvalue.DCGM_INT32_BLANK
config_values.mPerfState.targetClocks.smClock = dcgmvalue.DCGM_INT32_BLANK
config_values.mComputeMode = dcgmvalue.DCGM_INT32_BLANK
config_values.mPowerLimit.type = dcgm_structs.DCGM_CONFIG_POWER_CAP_INDIVIDUAL
config_values.mPowerLimit.val = powerLimit_set_dcgmi
groupObj.config.Set(config_values)
logger.info("Verify if dcgmi configured value has taken effect")
helper_verify_power_value(groupObj, powerLimit_set_dcgmi)
## Change Power limit to max from external entity like nvidia-smi
assert 0 == apps.NvidiaSmiApp(["-pl", str(powerLimit_set_nvsmi), "-i", str(gpuIds[0])]).run(), \
"Nvidia smi couldn't set the power limit"
logger.info("Verify if nvsmi configured value has taken effect")
helper_verify_power_value(groupObj, powerLimit_set_nvsmi)
groupObj.config.Enforce()
logger.info("Verify if dcgmi enforced value has taken effect")
helper_verify_power_value(groupObj, powerLimit_set_dcgmi)
@test_utils.run_with_embedded_host_engine()
@test_utils.run_only_with_live_gpus()
@test_utils.run_only_as_root()
def test_dcgm_config_power_enforce_embedded(handle, gpuIds):
helper_test_config_config_power_enforce(handle, gpuIds)
@test_utils.run_with_standalone_host_engine(60)
@test_utils.run_with_initialized_client()
@test_utils.run_only_with_live_gpus()
@test_utils.run_only_as_root()
def test_dcgm_config_power_enforce_standalone(handle, gpuIds):
helper_test_config_config_power_enforce(handle, gpuIds)
@test_utils.run_with_standalone_host_engine(60)
@test_utils.run_with_initialized_client()
@test_utils.run_only_with_live_gpus()
@test_utils.run_only_as_root()
def test_dcgm_default_status_handler(handle, gpuIds):
handleObj = pydcgm.DcgmHandle(handle=handle)
systemObj = handleObj.GetSystem()
groupObj = systemObj.GetEmptyGroup("test1")
## Add first GPU to the group
groupObj.AddGpu(gpuIds[0])
gpuIds = groupObj.GetGpuIds() #Only reference GPUs we are testing against
config_values = dcgm_structs.c_dcgmDeviceConfig_v1()
config_values.mEccMode = dcgmvalue.DCGM_INT32_BLANK
config_values.mPerfState.syncBoost = dcgmvalue.DCGM_INT32_BLANK
config_values.mPerfState.targetClocks.memClock = dcgmvalue.DCGM_INT32_BLANK
config_values.mPerfState.targetClocks.smClock = dcgmvalue.DCGM_INT32_BLANK
config_values.mComputeMode = dcgmvalue.DCGM_INT32_BLANK
config_values.mPowerLimit.type = dcgm_structs.DCGM_CONFIG_POWER_CAP_INDIVIDUAL
config_values.mPowerLimit.val = dcgmvalue.DCGM_INT32_BLANK
groupObj.config.Set(config_values)
config_values = groupObj.config.Get(dcgm_structs.DCGM_CONFIG_CURRENT_STATE)
assert len(config_values) > 0, "Failed to work with NULL status handle"
groupObj.config.Enforce()
#No need to test policy set/get with default status here. this is covered by test_policy.py that passes None as the status handler
@test_utils.run_with_embedded_host_engine()
@test_utils.run_only_with_live_gpus()
@test_utils.run_only_as_root()
def test_dcgm_configure_ecc_mode(handle, gpuIds):
test_utils.skip_test("Skipping this test until bug 200377294 is fixed")
groupId = dcgm_agent.dcgmGroupCreate(handle, dcgm_structs.DCGM_GROUP_EMPTY, "test1")
validDevice = -1
for x in gpuIds:
fvSupported = dcgm_agent_internal.dcgmGetLatestValuesForFields(handle, x, [dcgm_fields.DCGM_FI_DEV_ECC_CURRENT, ])
if (fvSupported[0].value.i64 != dcgmvalue.DCGM_INT64_NOT_SUPPORTED):
validDevice = x
break
if (validDevice == -1):
test_utils.skip_test("Can only run if at least one GPU with ECC is present")
ret = dcgm_agent.dcgmGroupAddDevice(handle, groupId, validDevice)
assert (ret == dcgm_structs.DCGM_ST_OK),"Failed to add a device to the group %d. Return %d" % (groupId.value, ret)
groupInfo = dcgm_agent.dcgmGroupGetInfo(handle, groupId)
#Create a status handle
status_handle = dcgm_agent.dcgmStatusCreate()
## Get original ECC mode on the device
config_values = dcgm_agent.dcgmConfigGet(handle, groupId, dcgm_structs.DCGM_CONFIG_CURRENT_STATE, groupInfo.count, status_handle)
assert len(config_values) > 0, "Failed to get configuration using dcgmConfigGet"
eccmodeOnGroupExisting = config_values[0].mEccMode
if eccmodeOnGroupExisting == 0:
eccmodeOnGroupToSet = 1
else:
eccmodeOnGroupToSet = 0
#print eccmodeOnGroupExisting
#print eccmodeOnGroupToSet
## Toggle the ECC mode on the group
config_values = dcgm_structs.c_dcgmDeviceConfig_v1()
config_values.mEccMode = eccmodeOnGroupToSet
config_values.mPerfState.syncBoost = dcgmvalue.DCGM_INT32_BLANK
config_values.mPerfState.targetClocks.memClock = dcgmvalue.DCGM_INT32_BLANK
config_values.mPerfState.targetClocks.smClock = dcgmvalue.DCGM_INT32_BLANK
config_values.mComputeMode = dcgmvalue.DCGM_INT32_BLANK
config_values.mPowerLimit.type = dcgmvalue.DCGM_INT32_BLANK
config_values.mPowerLimit.val = dcgmvalue.DCGM_INT32_BLANK
#Clear the status handle to log the errors while setting the config
ret = dcgm_agent.dcgmStatusClear(status_handle)
assert ret == dcgm_structs.DCGM_ST_OK, "Failed to clear the status handle. Return %d" %ret
try:
ret = dcgm_agent.dcgmConfigSet(handle,groupId,config_values, status_handle)
except dcgm_structs.DCGMError as e:
pass
errors = helper_get_status_list(status_handle)
if len(errors) > 0:
for error in errors:
if error.status == dcgm_structs.DCGM_ST_RESET_REQUIRED:
test_utils.skip_test("Skipping the test - Unable to reset the Gpu, FieldId - %d, Return - %d" % (error.fieldId, error.status))
else:
test_utils.skip_test("Skipping the test - Unable to set the ECC mode. FieldId - %d, Return %d" % (error.fieldId,error.status))
#Sleep after reset
time.sleep(2)
#Clear the status handle to log the errors while setting the config
ret = dcgm_agent.dcgmStatusClear(status_handle)
assert ret == dcgm_structs.DCGM_ST_OK, "Failed to clear the status handle. Return %d" %ret
#Get the current configuration
config_values = dcgm_agent.dcgmConfigGet(handle, groupId, dcgm_structs.DCGM_CONFIG_CURRENT_STATE, groupInfo.count, status_handle)
assert len(config_values) > 0, "Failed to get configuration using dcgmConfigGet"
fvs = dcgm_agent_internal.dcgmGetLatestValuesForFields(handle, validDevice, [dcgm_fields.DCGM_FI_DEV_ECC_PENDING, dcgm_fields.DCGM_FI_DEV_ECC_CURRENT])
if fvs[0].value.i64 != fvs[1].value.i64:
logger.warning("Pending ECC %d != Current ECC %d for gpuId %d. Box probably needs a reboot" % (fvs[0].value.i64, fvs[1].value.i64, validDevice))
else:
assert config_values[0].mEccMode == (eccmodeOnGroupToSet), "ECC mode %d different from the set value %d" % \
(config_values[0].mEccMode, eccmodeOnGroupToSet)
@test_utils.run_with_standalone_host_engine(20, ["--port", "5545"])
@test_utils.run_with_initialized_client("127.0.0.1:5545")
@test_utils.run_only_with_live_gpus()
def test_dcgm_port_standalone(handle, gpuIds):
"""
Verifies that DCGM Engine works on different port
"""
gpuIdList = dcgm_agent.dcgmGetAllDevices(handle)
assert len(gpuIdList) >= 0, "Standalone host engine using different port number failed."
def helper_dcgm_verify_sync_boost_single_gpu(handle, gpuIds):
handleObj = pydcgm.DcgmHandle(handle=handle)
systemObj = handleObj.GetSystem()
groupObj = systemObj.GetEmptyGroup("test1")
## Add first GPU to the group
groupObj.AddGpu(gpuIds[0])
gpuIds = groupObj.GetGpuIds() #Only reference GPUs we are testing against
## Set the sync boost for the group
config_values = dcgm_structs.c_dcgmDeviceConfig_v1()
config_values.mEccMode = dcgmvalue.DCGM_INT32_BLANK
config_values.mPerfState.syncBoost = 1
config_values.mPerfState.targetClocks.memClock = dcgmvalue.DCGM_INT32_BLANK
config_values.mPerfState.targetClocks.smClock = dcgmvalue.DCGM_INT32_BLANK
config_values.mComputeMode = dcgmvalue.DCGM_INT32_BLANK
config_values.mPowerLimit.type = dcgmvalue.DCGM_INT32_BLANK
config_values.mPowerLimit.val = dcgmvalue.DCGM_INT32_BLANK
#Config Set must return DCGM_ST_BADPARAM since we only have a single GPU
with test_utils.assert_raises(dcgm_structs.dcgmExceptionClass(dcgm_structs.DCGM_ST_BADPARAM)):
groupObj.config.Set(config_values)
groupObj.Delete()
@test_utils.run_with_standalone_host_engine(60)
@test_utils.run_with_initialized_client()
@test_utils.run_only_with_live_gpus()
@test_utils.run_only_as_root()
def test_dcgm_verify_sync_boost_single_gpu_standalone(handle, gpuIds):
helper_dcgm_verify_sync_boost_single_gpu(handle, gpuIds)
@test_utils.run_with_embedded_host_engine()
@test_utils.run_only_with_live_gpus()
@test_utils.run_only_as_root()
def test_dcgm_verify_sync_boost_single_gpu_embedded(handle, gpuIds):
helper_dcgm_verify_sync_boost_single_gpu(handle, gpuIds)
def helper_dcgm_verify_sync_boost_multi_gpu(handle, gpuIds):
handleObj = pydcgm.DcgmHandle(handle=handle)
systemObj = handleObj.GetSystem()
groupObj = systemObj.GetEmptyGroup("test1")
if len(gpuIds) < 2:
test_utils.skip_test("This test only works with 2 or more identical GPUs")
## Add all identical GPUs to the group
for gpuId in gpuIds:
groupObj.AddGpu(gpuId)
gpuIds = groupObj.GetGpuIds() #Only reference GPUs we are testing against
## Set the sync boost for the group
config_values = dcgm_structs.c_dcgmDeviceConfig_v1()
config_values.mEccMode = dcgmvalue.DCGM_INT32_BLANK
config_values.mPerfState.syncBoost = 1
config_values.mPerfState.targetClocks.memClock = dcgmvalue.DCGM_INT32_BLANK
config_values.mPerfState.targetClocks.smClock = dcgmvalue.DCGM_INT32_BLANK
config_values.mComputeMode = dcgmvalue.DCGM_INT32_BLANK
config_values.mPowerLimit.type = dcgmvalue.DCGM_INT32_BLANK
config_values.mPowerLimit.val = dcgmvalue.DCGM_INT32_BLANK
#Enable sync boost - Will throw an exception on error
with test_utils.assert_raises(dcgm_structs.dcgmExceptionClass(dcgm_structs.DCGM_ST_NOT_SUPPORTED)):
groupObj.config.Set(config_values)
config_values.mPerfState.syncBoost = 0
#Disable sync boost - Will throw an exception on error
with test_utils.assert_raises(dcgm_structs.dcgmExceptionClass(dcgm_structs.DCGM_ST_NOT_SUPPORTED)):
groupObj.config.Set(config_values)
groupObj.Delete()
@test_utils.run_with_standalone_host_engine(60)
@test_utils.run_with_initialized_client()
@test_utils.run_only_with_live_gpus()
@test_utils.for_all_same_sku_gpus()
@test_utils.run_only_as_root()
def test_dcgm_verify_sync_boost_multi_gpu_standalone(handle, gpuIds):
helper_dcgm_verify_sync_boost_multi_gpu(handle, gpuIds)
@test_utils.run_with_embedded_host_engine()
@test_utils.run_only_with_live_gpus()
@test_utils.for_all_same_sku_gpus()
@test_utils.run_only_as_root()
def test_dcgm_verify_sync_boost_multi_gpu_embedded(handle, gpuIds):
helper_dcgm_verify_sync_boost_multi_gpu(handle, gpuIds)
| DCGM-master | testing/python3/tests/test_configmanager.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module runs all plugins (and the short,medium,long suites) through dcgmi and logs the JSON output to a file in
OUTPUT_DIR (test_plugin_sanity_out folder in the testing folder). The module does not actually perform any verification
(except for the implicit verification that dcgmi returns with status 0 for each of the plugins/test suite runs when
there are no errors inserted) - you must *manually* verify the output in the output directory makes sense.
Since this module runs all plugins and the short,medium,long suites, all tests in this plugin are marked developer tests
(the total runtime for this plugin is 1 - 2 hours). It is *NOT* recommended to run this suite on a system with
GPUs that have different SKUs because each test is run once for every SKU on the machine.
The main purpose of this module is to provide a very basic sanity test for large changes made to nvvs plugins.
Note: When DCGM and NVVS have been updated to use error codes, these tests can be updated to perform automatic
validation of the output based on the error codes. (The tests will need to be updated to use the API call instead of
running dcgmi).
When debugging or there is a need to run only a subset of the tests, the following filter can be helpful:
`test_plugin_sanity.(test_create|<TEST_NAME_HERE>).*`
-> Replace <TEST_NAME_HERE> with the test to run. In case of multiple tests, use the `|` separator.
-> All existing files in the output directory will be deleted - to prevent this, remove "test_create" from the filter
"""
from functools import wraps
import ctypes
import os
import shutil
import signal
import time
from apps.dcgmi_app import DcgmiApp
from apps.nv_hostengine_app import NvHostEngineApp
from dcgm_internal_helpers import check_nvvs_process
import dcgm_field_injection_helpers
import dcgm_internal_helpers
import dcgm_fields
import dcgm_structs
import logger
import option_parser
import test_utils
### Constants
OUTPUT_DIR = "./test_plugin_sanity_out"
DEV_MODE_MSG = "Manual test for verifying plugin output. Use developer mode to enable."
### Helpers
@test_utils.run_first()
def test_create_output_dir():
"""
Ensure we have a new results directory on every run. This "test" is called first when no filters are used
"""
if os.path.exists(OUTPUT_DIR):
shutil.rmtree(OUTPUT_DIR)
os.makedirs(OUTPUT_DIR)
def log_app_output_to_file(app, filename):
with open(filename, 'w') as f:
for line in app.stdout_lines:
f.write(line + "\n")
for line in app.stderr_lines:
f.write(line + "\n")
def log_app_output_to_stdout(app):
logger.info("app output:")
for line in app.stdout_lines:
logger.info(line)
for line in app.stderr_lines:
logger.info(line)
def copy_nvvs_log(nvvsLogFile, outputLogFile):
"""
Copy nvvs log file to the output dir. This method is needed because of path length limitations when using DCGM
to run NVVS.
"""
try:
if os.path.exists(nvvsLogFile):
shutil.copyfile(nvvsLogFile, outputLogFile)
except IOError as e:
logger.error("Could not copy nvvs log to output dir.")
logger.error(e)
# Main test helper methods
def no_errors_run(handle, gpuIds, name, testname, parms=None):
"""
Runs the given test (testname) without inserting errors, and ensures that dcgmi returns with a exit code of 0.
name is the name of the plugin in nvvs (e.g. constant_perf)
"""
output_file = OUTPUT_DIR + "/dcgmi_%s_no_err_%s.json" % (name, gpuIds[0])
log_file = OUTPUT_DIR + "/nvvs_%s_no_err_%s.log" % (name, gpuIds[0])
gpu_list = ",".join(map(str, gpuIds))
# Note: Although using the dcgmActionValidate api (via DcgmDiag.Execute()) would allow for some automatic
# verification, we use dcgmi diag and log output to a file for easier debugging when something goes wrong.
args = ["diag", "-r", "%s" % testname, "-i", gpu_list, "-j", "-v", "-d", "5", "--debugLogFile", "/tmp/nvvs.log"]
if parms != None:
args.extend(["-p", "%s" % parms])
dcgmi = DcgmiApp(args=args)
dcgmi.start(timeout=1500) # 25min timeout
logger.info("Started diag with args: %s" % args)
retcode = dcgmi.wait()
copy_nvvs_log("/tmp/nvvs.log", log_file)
if retcode != 0:
logger.error("dcgmi_%s_no_err failed with retcode: %s" % (name, retcode))
copy_nvvs_log("/tmp/nvvs.log", log_file)
log_app_output_to_file(dcgmi, output_file)
def with_error_run(handle, gpuIds, name, testname, parms=None):
"""
Runs the given test (testname) and inserts throttling / REPLAY_COUNTER errors depending on the test.
name is the name of the plugin in nvvs (e.g. constant_perf)
Logs an error (but does not fail the test) if the dcgmi return code is not 226 (lower 8 bits of
-30/DCGM_ST_NVVS_ERROR) which is expected since the test should fail due to inserted errors.
Since busgrind/PCIe does a diff for the REPLAY_COUNTER field we need to insert errors after busgrind has read
some zero values for the field. As a result, the hardcoded delay of 15 seconds must be adjusted on different
systems (currently a delay of 15 seconds works for the bstolle-dgx machine).
"""
output_file = OUTPUT_DIR + "/dcgmi_%s_with_err_%s.json" % (name, gpuIds[0])
log_file = OUTPUT_DIR + "/nvvs_%s_with_err_%s.log" % (name, gpuIds[0])
gpu_list = ",".join(map(str, gpuIds))
args = ["diag", "-r", "%s" % testname, "-i", gpu_list, "-j", "-v", "-d", "5", "--debugLogFile", "/tmp/nvvs.log"]
if parms != None:
args.extend(["-p", "%s" % parms])
dcgmi = DcgmiApp(args=args)
field_id = dcgm_fields.DCGM_FI_DEV_GPU_TEMP
value = 1000
delay = 0
if name == "busgrind":
field_id = dcgm_fields.DCGM_FI_DEV_PCIE_REPLAY_COUNTER
value = 1000
delay = 15
dcgm_field_injection_helpers.inject_value(handle, gpuIds[0], field_id, value, delay, repeatCount=20)
start = time.time()
dcgmi.start(timeout=1500) # 25min timeout
logger.info("Started diag with args: %s" % args)
retcode = dcgmi.wait()
copy_nvvs_log("/tmp/nvvs.log", log_file)
expected_retcode = ctypes.c_uint8(dcgm_structs.DCGM_ST_NVVS_ERROR).value
if retcode != expected_retcode:
logger.error("Expected retcode to be %s, but retcode of dcgmi is %s" % (expected_retcode, retcode))
dcgmi.validate() # Validate because dcgmi returns non zero when the diag fails (expected)
log_app_output_to_file(dcgmi, output_file)
### Tests
# busgrind
@test_utils.run_with_developer_mode(msg=DEV_MODE_MSG)
@test_utils.run_with_standalone_host_engine()
@test_utils.run_with_initialized_client()
@test_utils.run_only_with_live_gpus()
@test_utils.for_all_same_sku_gpus()
def test_busgrind_no_errors(handle, gpuIds):
no_errors_run(handle, gpuIds, "busgrind", "PCIe")
@test_utils.run_with_developer_mode(msg=DEV_MODE_MSG)
@test_utils.run_with_standalone_host_engine()
@test_utils.run_with_initialized_client()
@test_utils.run_only_with_live_gpus()
@test_utils.for_all_same_sku_gpus()
def test_busgrind_with_error(handle, gpuIds):
with_error_run(handle, gpuIds, "busgrind", "PCIe")
# constant perf / targeted stress
@test_utils.run_with_developer_mode(msg=DEV_MODE_MSG)
@test_utils.run_with_standalone_host_engine()
@test_utils.run_with_initialized_client()
@test_utils.run_only_with_live_gpus()
@test_utils.for_all_same_sku_gpus()
def test_constant_perf_no_errors(handle, gpuIds):
no_errors_run(handle, gpuIds, "constant_perf", "targeted stress")
@test_utils.run_with_developer_mode(msg=DEV_MODE_MSG)
@test_utils.run_with_standalone_host_engine()
@test_utils.run_with_initialized_client()
@test_utils.run_only_with_live_gpus()
@test_utils.for_all_same_sku_gpus()
def test_constant_perf_with_error(handle, gpuIds):
with_error_run(handle, gpuIds, "constant_perf", "targeted stress")
# constant power / targeted power
@test_utils.run_with_developer_mode(msg=DEV_MODE_MSG)
@test_utils.run_with_standalone_host_engine()
@test_utils.run_with_initialized_client()
@test_utils.run_only_with_live_gpus()
@test_utils.for_all_same_sku_gpus()
def test_constant_power_no_errors(handle, gpuIds):
no_errors_run(handle, gpuIds, "constant_power", "targeted power")
@test_utils.run_with_developer_mode(msg=DEV_MODE_MSG)
@test_utils.run_with_standalone_host_engine()
@test_utils.run_with_initialized_client()
@test_utils.run_only_with_live_gpus()
@test_utils.for_all_same_sku_gpus()
def test_constant_power_with_error(handle, gpuIds):
with_error_run(handle, gpuIds, "constant_power", "targeted power")
# context create
@test_utils.run_with_developer_mode(msg=DEV_MODE_MSG)
@test_utils.run_with_standalone_host_engine()
@test_utils.run_with_initialized_client()
@test_utils.run_only_with_live_gpus()
@test_utils.for_all_same_sku_gpus()
def test_context_create_no_errors(handle, gpuIds):
no_errors_run(handle, gpuIds, "context_create", "context create")
# gpuburn / diagnostic
@test_utils.run_with_developer_mode(msg=DEV_MODE_MSG)
@test_utils.run_with_standalone_host_engine()
@test_utils.run_with_initialized_client()
@test_utils.run_only_with_live_gpus()
@test_utils.for_all_same_sku_gpus()
def test_gpuburn_no_errors(handle, gpuIds):
no_errors_run(handle, gpuIds, "gpuburn", "diagnostic", "diagnostic.test_duration=60")
@test_utils.run_with_developer_mode(msg=DEV_MODE_MSG)
@test_utils.run_with_standalone_host_engine()
@test_utils.run_with_initialized_client()
@test_utils.run_only_with_live_gpus()
@test_utils.for_all_same_sku_gpus()
def test_gpuburn_with_error(handle, gpuIds):
with_error_run(handle, gpuIds, "gpuburn", "diagnostic", "diagnostic.test_duration=60")
# memory
@test_utils.run_with_developer_mode(msg=DEV_MODE_MSG)
@test_utils.run_with_standalone_host_engine()
@test_utils.run_with_initialized_client()
@test_utils.run_only_with_live_gpus()
@test_utils.for_all_same_sku_gpus()
def test_memory_no_errors(handle, gpuIds):
no_errors_run(handle, gpuIds, "memory", "memory")
# No run for memory plugin with inserted errors - memory test completes too quickly for reliably simulating a DBE
# For manual verification, a good WaR is to add a sleep(5) just before the memory plugin performs a memory allocation,
# create a temp build with this change, and then try inserting a DBE after launching the diag.
# memory bandwidth
@test_utils.run_with_developer_mode(msg=DEV_MODE_MSG)
@test_utils.run_with_standalone_host_engine()
@test_utils.run_with_initialized_client()
@test_utils.run_only_with_live_gpus()
@test_utils.for_all_same_sku_gpus()
def test_memory_bandwidth_no_errors(handle, gpuIds):
no_errors_run(handle, gpuIds, "memory_bandwidth", "memory bandwidth", "memory bandwidth.is_allowed=true")
@test_utils.run_with_developer_mode(msg=DEV_MODE_MSG)
@test_utils.run_with_standalone_host_engine()
@test_utils.run_with_initialized_client()
@test_utils.run_only_with_live_gpus()
@test_utils.for_all_same_sku_gpus()
def test_memory_bandwidth_with_error(handle, gpuIds):
with_error_run(handle, gpuIds, "memory_bandwidth", "memory bandwidth", "memory bandwidth.is_allowed=true")
# memtest
@test_utils.run_with_developer_mode(msg=DEV_MODE_MSG)
@test_utils.run_with_standalone_host_engine()
@test_utils.run_with_initialized_client()
@test_utils.run_only_with_live_gpus()
@test_utils.for_all_same_sku_gpus()
def test_memtest_no_errors(handle, gpuIds):
no_errors_run(handle, gpuIds, "memtest", "memtest")
@test_utils.run_with_developer_mode(msg=DEV_MODE_MSG)
@test_utils.run_with_standalone_host_engine()
@test_utils.run_with_initialized_client()
@test_utils.run_only_with_live_gpus()
@test_utils.for_all_same_sku_gpus()
def test_memtest_with_error(handle, gpuIds):
with_error_run(handle, gpuIds, "memtest", "memtest")
# short
@test_utils.run_with_developer_mode(msg=DEV_MODE_MSG)
@test_utils.run_with_standalone_host_engine()
@test_utils.run_with_initialized_client()
@test_utils.run_only_with_live_gpus()
@test_utils.for_all_same_sku_gpus()
def test_short_no_errors(handle, gpuIds):
no_errors_run(handle, gpuIds, "short", "short")
@test_utils.run_with_developer_mode(msg=DEV_MODE_MSG)
@test_utils.run_with_standalone_host_engine()
@test_utils.run_with_initialized_client()
@test_utils.run_only_with_live_gpus()
@test_utils.for_all_same_sku_gpus()
def test_short_with_error(handle, gpuIds):
with_error_run(handle, gpuIds, "short", "short")
# medium
@test_utils.run_with_developer_mode(msg=DEV_MODE_MSG)
@test_utils.run_with_standalone_host_engine()
@test_utils.run_with_initialized_client()
@test_utils.run_only_with_live_gpus()
@test_utils.for_all_same_sku_gpus()
def test_medium_no_errors(handle, gpuIds):
no_errors_run(handle, gpuIds, "medium", "medium")
@test_utils.run_with_developer_mode(msg=DEV_MODE_MSG)
@test_utils.run_with_standalone_host_engine()
@test_utils.run_with_initialized_client()
@test_utils.run_only_with_live_gpus()
@test_utils.for_all_same_sku_gpus()
def test_medium_with_error(handle, gpuIds):
with_error_run(handle, gpuIds, "medium", "medium")
# long
@test_utils.run_with_developer_mode(msg=DEV_MODE_MSG)
@test_utils.run_with_standalone_host_engine()
@test_utils.run_with_initialized_client()
@test_utils.run_only_with_live_gpus()
@test_utils.for_all_same_sku_gpus()
def test_long_no_errors(handle, gpuIds):
no_errors_run(handle, gpuIds, "long", "long")
@test_utils.run_with_developer_mode(msg=DEV_MODE_MSG)
@test_utils.run_with_standalone_host_engine()
@test_utils.run_with_initialized_client()
@test_utils.run_only_with_live_gpus()
@test_utils.for_all_same_sku_gpus()
def test_long_with_error(handle, gpuIds):
with_error_run(handle, gpuIds, "long", "long")
| DCGM-master | testing/python3/tests/test_plugin_sanity.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from DcgmReader import *
import pydcgm
import dcgm_structs
import dcgm_structs_internal
import dcgm_agent_internal
import dcgm_fields
import dcgm_fields_internal
from dcgm_structs import dcgmExceptionClass
import logger
import test_utils
import time
@test_utils.run_with_standalone_host_engine(20)
@test_utils.run_with_initialized_client()
def test_dcgm_reader_default(handle):
# pylint: disable=undefined-variable
dr = DcgmReader()
dr.SetHandle(handle)
latest = dr.GetLatestGpuValuesAsFieldNameDict()
for gpuId in latest:
# latest data might be less than the list, because blank values aren't included
# Defined in DcgmReader
# pylint: disable=undefined-variable
assert len(latest[gpuId]) <= len(defaultFieldIds)
# Make sure we get strings
for key in latest[gpuId]:
assert isinstance(key, str)
sample = dr.GetLatestGpuValuesAsFieldIdDict()
for gpuId in sample:
# Defined in DcgmReader
# pylint: disable=undefined-variable
assert len(sample[gpuId]) <= len(defaultFieldIds)
# Make sure we get valid integer field ids
for fieldId in sample[gpuId]:
assert isinstance(fieldId, int)
assert dcgm_fields.DcgmFieldGetById(fieldId) != None
@test_utils.run_with_standalone_host_engine(20)
@test_utils.run_with_initialized_client()
def test_dcgm_reader_specific_fields(handle):
specificFields = [dcgm_fields.DCGM_FI_DEV_POWER_USAGE, dcgm_fields.DCGM_FI_DEV_XID_ERRORS]
# pylint: disable=undefined-variable
dr = DcgmReader(fieldIds=specificFields)
dr.SetHandle(handle)
latest = dr.GetLatestGpuValuesAsFieldNameDict()
for gpuId in latest:
assert len(latest[gpuId]) <= len(specificFields)
@test_utils.run_with_standalone_host_engine(20)
@test_utils.run_with_initialized_client()
@test_utils.run_only_with_live_gpus()
def test_reading_specific_data(handle, gpuIds):
"""
Verifies that we can inject specific data and get that same data back
"""
dcgmHandle = pydcgm.DcgmHandle(handle)
dcgmSystem = dcgmHandle.GetSystem()
specificFieldIds = [ dcgm_fields.DCGM_FI_DEV_RETIRED_DBE,
dcgm_fields.DCGM_FI_DEV_POWER_VIOLATION,
dcgm_fields.DCGM_FI_DEV_THERMAL_VIOLATION,
]
fieldValues = [ 1,
1000,
9000,
]
for i in range(0, len(specificFieldIds)):
field = dcgm_structs_internal.c_dcgmInjectFieldValue_v1()
field.version = dcgm_structs_internal.dcgmInjectFieldValue_version1
field.fieldId = specificFieldIds[i]
field.status = 0
field.fieldType = ord(dcgm_fields.DCGM_FT_INT64)
field.ts = int((time.time()+10) * 1000000.0) # set the injected data into the future
field.value.i64 = fieldValues[i]
ret = dcgm_agent_internal.dcgmInjectFieldValue(handle, gpuIds[0], field)
assert (ret == dcgm_structs.DCGM_ST_OK)
# pylint: disable=undefined-variable
dr = DcgmReader(fieldIds=specificFieldIds)
dr.SetHandle(handle)
latest = dr.GetLatestGpuValuesAsFieldIdDict()
assert len(latest[gpuIds[0]]) == len(specificFieldIds)
for i in range(0, len(specificFieldIds)):
assert latest[gpuIds[0]][specificFieldIds[i]] == fieldValues[i]
@test_utils.run_with_standalone_host_engine(20)
@test_utils.run_with_initialized_client()
@test_utils.run_only_with_live_gpus()
@test_utils.run_with_cuda_app()
def test_reading_pid_fields(handle, gpuIds, cudaApp):
"""
Verifies that we can decode PID structs
"""
fieldTag = dcgm_fields_internal.DCGM_FI_DEV_COMPUTE_PIDS
pids = []
# pylint: disable=undefined-variable
dr = DcgmReader(fieldIds=[ fieldTag ], updateFrequency=100000)
logger.debug("Trying for 5 seconds")
exit_loop = False
for _ in range(25):
if (exit_loop):
break
data = dr.GetLatestGpuValuesAsFieldIdDict()
assert len(data) > 0
for gpuId in data:
gpuData = data[gpuId]
if fieldTag in gpuData:
pids.append(gpuData[fieldTag].pid)
if gpuData[fieldTag].pid == cudaApp.getpid():
# Found our PID. Exit the loop
exit_loop = True
time.sleep(0.2)
logger.debug("PIDs: %s. cudaApp PID: %d" % (str(pids), cudaApp.getpid()))
assert cudaApp.getpid() in pids, "could not find cudaApp PID"
def util_dcgm_reader_all_since_last_call(handle, flag, repeat):
"""
Test to ensure GetAllValuesAsDictSinceLastCall behaves. It was first used
for collectd integration to ensure it does not crash and also checks that
no unexpected fields are returned.
Arguments:
handle: DCGM handle
flag: argument for GetAllGpuValuesAsDictSinceLastCall
repeat: whether to repeat GetAllGpuValuesAsDictsSinceLastCall call
"""
specificFields = [dcgm_fields.DCGM_FI_DEV_POWER_USAGE, dcgm_fields.DCGM_FI_DEV_XID_ERRORS]
# pylint: disable=undefined-variable
dr = DcgmReader(fieldIds=specificFields)
dr.SetHandle(handle)
latest = dr.GetAllGpuValuesAsDictSinceLastCall(flag)
if repeat:
latest = dr.GetAllGpuValuesAsDictSinceLastCall(flag)
if flag == False:
dcgmHandle = pydcgm.DcgmHandle(handle)
dcgmSystem = dcgmHandle.GetSystem()
fieldTags = []
for fieldId in specificFields:
fieldTags.append(dcgmSystem.fields.GetFieldById(fieldId).tag)
for gpuId in latest:
# Latest data might be less than the list, because blank values aren't
# included. We basically try to ensure there is no crash and we don't
# return something absurd.
assert len(latest[gpuId]) <= len(specificFields)
for key in latest[gpuId].keys():
if flag == False:
assert key in fieldTags
else:
assert key in specificFields
@test_utils.run_with_standalone_host_engine(20)
@test_utils.run_with_initialized_client()
def test_dcgm_reader_all_since_last_call_false(handle):
util_dcgm_reader_all_since_last_call(handle, False, False)
@test_utils.run_with_standalone_host_engine(20)
@test_utils.run_with_initialized_client()
def test_dcgm_reader_all_since_last_call_true(handle):
util_dcgm_reader_all_since_last_call(handle, True, False)
@test_utils.run_with_standalone_host_engine(20)
@test_utils.run_with_initialized_client()
def test_dcgm_reader_all_since_last_call_false_repeat(handle):
util_dcgm_reader_all_since_last_call(handle, False, True)
@test_utils.run_with_standalone_host_engine(20)
@test_utils.run_with_initialized_client()
def test_dcgm_reader_all_since_last_call_true_repeat(handle):
util_dcgm_reader_all_since_last_call(handle, True, True)
| DCGM-master | testing/python3/tests/test_dcgm_reader.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# test the policy manager for DCGM
import pydcgm
import dcgm_structs
import dcgm_agent_internal
import dcgm_structs_internal
import dcgm_agent
import logger
import test_utils
import dcgm_fields
import dcgmvalue
import time
import inspect
import apps
from subprocess import check_output
def helper_inject_vgpu_configuration(handle, gpuId, eccModeVal, powerLimitVal, computeModeVal):
"""
Helper method to inject configuration to Cachemanager
"""
if (eccModeVal != None):
# inject an error into Ecc Mode
eccMode = dcgm_structs_internal.c_dcgmInjectFieldValue_v1()
eccMode.version = dcgm_structs_internal.dcgmInjectFieldValue_version1
eccMode.fieldId = dcgm_fields.DCGM_FI_DEV_ECC_CURRENT
eccMode.status = 0
eccMode.fieldType = ord(dcgm_fields.DCGM_FT_INT64)
eccMode.ts = int((time.time()+1) * 1000000.0) # set the injected data into the future
eccMode.value.i64 = eccModeVal
ret = dcgm_agent_internal.dcgmInjectFieldValue(handle, gpuId, eccMode)
assert (ret == dcgm_structs.DCGM_ST_OK)
if (powerLimitVal != None):
powerLimit = dcgm_structs_internal.c_dcgmInjectFieldValue_v1()
powerLimit.version = dcgm_structs_internal.dcgmInjectFieldValue_version1
powerLimit.fieldId = dcgm_fields.DCGM_FI_DEV_POWER_MGMT_LIMIT
powerLimit.status = 0
powerLimit.fieldType = ord(dcgm_fields.DCGM_FT_DOUBLE)
powerLimit.ts = int((time.time()+1) * 1000000.0) # set the injected data into the future
powerLimit.value.dbl = powerLimitVal
ret = dcgm_agent_internal.dcgmInjectFieldValue(handle, gpuId, powerLimit)
assert (ret == dcgm_structs.DCGM_ST_OK)
if (computeModeVal != None):
computeMode = dcgm_structs_internal.c_dcgmInjectFieldValue_v1()
computeMode.version = dcgm_structs_internal.dcgmInjectFieldValue_version1
computeMode.fieldId = dcgm_fields.DCGM_FI_DEV_COMPUTE_MODE
computeMode.status = 0
computeMode.fieldType = ord(dcgm_fields.DCGM_FT_INT64)
computeMode.ts = int((time.time()+1) * 1000000.0) # set the injected data into the future
computeMode.value.i64 = computeModeVal
ret = dcgm_agent_internal.dcgmInjectFieldValue(handle, gpuId, computeMode)
assert (ret == dcgm_structs.DCGM_ST_OK)
def helper_get_status_list(statusHandle):
"""
Helper method to get status list from the provided status handle
"""
errorList = list()
errorInfo = dcgm_agent.dcgmStatusPopError(statusHandle)
while (errorInfo != None):
errorList.append(errorInfo)
errorInfo = dcgm_agent.dcgmStatusPopError(statusHandle)
return errorList
'''
def helper_investigate_status(statusHandle):
"""
Helper method to investigate status handle
"""
errorCount = 0;
errorInfo = dcgm_agent.dcgmStatusPopError(statusHandle)
while (errorInfo != None):
errorCount += 1
print errorCount
print(" GPU Id: %d" % errorInfo.gpuId)
print(" Field ID: %d" % errorInfo.fieldId)
print(" Error: %d" % errorInfo.status)
errorInfo = dcgm_agent.dcgmStatusPopError(statusHandle)
'''
@test_utils.run_with_embedded_host_engine()
def test_dcgm_vgpu_config_embedded_get_devices(handle):
"""
Verifies that DCGM Engine returns list of devices
"""
handleObj = pydcgm.DcgmHandle(handle=handle)
systemObj = handleObj.GetSystem()
gpuIdList = systemObj.discovery.GetAllGpuIds()
assert len(gpuIdList) >= 0, "Not able to find devices on the node for embedded case"
@test_utils.run_with_standalone_host_engine(20)
@test_utils.run_with_initialized_client()
def test_dcgm_vgpu_config_standalone_get_devices(handle):
"""
Verifies that DCGM Engine returns list of devices
"""
handleObj = pydcgm.DcgmHandle(handle=handle)
systemObj = handleObj.GetSystem()
gpuIdList = systemObj.discovery.GetAllGpuIds()
assert len(gpuIdList) >= 0, "Not able to find devices for standalone case"
def helper_dcgm_vgpu_config_get_attributes(handle):
handleObj = pydcgm.DcgmHandle(handle=handle)
systemObj = handleObj.GetSystem()
groupObj = systemObj.GetDefaultGroup()
gpuIdList = groupObj.GetGpuIds()
for gpuId in gpuIdList:
attributes = systemObj.discovery.GetGpuAttributes(gpuId)
assert (attributes.identifiers.deviceName != dcgmvalue.DCGM_STR_NOT_SUPPORTED
and attributes.identifiers.deviceName != dcgmvalue.DCGM_STR_NOT_FOUND
and attributes.identifiers.deviceName != dcgmvalue.DCGM_STR_NOT_SUPPORTED
and attributes.identifiers.deviceName != dcgmvalue.DCGM_STR_NOT_PERMISSIONED), "Not able to find attributes"
@test_utils.run_with_embedded_host_engine()
def test_dcgm_vgpu_config_embedded_get_attributes(handle):
"""
Get Device attributes for each GPU ID
"""
helper_dcgm_vgpu_config_get_attributes(handle)
@test_utils.run_with_standalone_host_engine(20)
@test_utils.run_with_initialized_client()
def test_dcgm_vgpu_config_standalone_get_attributes(handle):
"""
Get Device attributes for each GPU ID
"""
helper_dcgm_vgpu_config_get_attributes(handle)
'''
def helper_dcgm_vgpu_config_set(handle):
handleObj = pydcgm.DcgmHandle(handle=handle)
systemObj = handleObj.GetSystem()
groupObj = systemObj.GetDefaultGroup()
vgpu_config_values = dcgm_structs.c_dcgmDeviceVgpuConfig_v1()
vgpu_config_values.SetBlank()
#Will throw an exception on error
groupObj.vgpu.Set(vgpu_config_values)
@test_utils.run_with_embedded_host_engine()
@test_utils.run_only_as_root()
def test_dcgm_vgpu_config_set_embedded(handle):
"""
Verifies that the configuration can be set for a group
"""
helper_dcgm_vgpu_config_set(handle)
@test_utils.run_with_standalone_host_engine(20)
@test_utils.run_with_initialized_client()
@test_utils.run_only_as_root()
def test_dcgm_vgpu_config_set_standalone(handle):
"""
Verifies that the vGPU configuration can be set for a group
"""
helper_dcgm_vgpu_config_set(handle)
def helper_dcgm_vgpu_config_get(handle):
handleObj = pydcgm.DcgmHandle(handle=handle)
systemObj = handleObj.GetSystem()
groupObj = systemObj.GetDefaultGroup()
## Set the configuration first
vgpu_config_values = dcgm_structs.c_dcgmDeviceVgpuConfig_v1()
vgpu_config_values.SetBlank()
#Will throw exception on error
groupObj.vgpu.Set(vgpu_config_values)
## Get the target vGPU configuration to make sure that it's exact same as the one configured
config_values = groupObj.vgpu.Get(dcgm_structs.DCGM_CONFIG_TARGET_STATE)
gpuIds = groupObj.GetGpuIds()
## Loop through config_values to to check for correctness of values fetched from the hostengine
for x in xrange(0, len(gpuIds)):
assert config_values[x].mEccMode == dcgmvalue.DCGM_INT32_BLANK, "Failed to get matching value for ecc mode. Expected: %d Received: %d" % (dcgmvalue.DCGM_INT32_BLANK, vgpu_config_values[x].mEccMode)
assert config_values[x].mPowerLimit.val == dcgmvalue.DCGM_INT32_BLANK, "Failed to get matching value for power limit. Expected: %d Received: %d" % (dcgmvalue.DCGM_INT32_BLANK, vgpu_config_values[x].mPowerLimit.val)
assert config_values[x].mComputeMode == dcgmvalue.DCGM_INT32_BLANK, "Failed to get matching value for power limit. Expected: %d Received: %d" % (dcgmvalue.DCGM_INT32_BLANK, vgpu_config_values[x].mComputeMode)
pass
@test_utils.run_with_embedded_host_engine()
@test_utils.run_only_with_live_gpus()
@test_utils.run_only_as_root()
def test_dcgm_vgpu_config_get_embedded(handle, gpuIds):
"""
Verifies "Get vGPU Configuration" Basic functionality
"""
helper_dcgm_vgpu_config_get(handle)
@test_utils.run_with_standalone_host_engine(20)
@test_utils.run_with_initialized_client()
@test_utils.run_only_with_live_gpus()
@test_utils.run_only_as_root()
def test_dcgm_vgpu_config_get_standalone(handle, gpuIds):
"""
Verifies "Get vGPU Configuration" Basic functionality
"""
helper_dcgm_vgpu_config_get(handle)
def helper_dcgm_vgpu_config_enforce(handle):
handleObj = pydcgm.DcgmHandle(handle=handle)
systemObj = handleObj.GetSystem()
groupObj = systemObj.GetDefaultGroup()
vgpu_config_values = dcgm_structs.c_dcgmDeviceVgpuConfig_v1()
vgpu_config_values.SetBlank()
#Will throw exception on error
groupObj.vgpu.Set(vgpu_config_values)
groupObj.vgpu.Enforce()
@test_utils.run_with_embedded_host_engine()
@test_utils.run_only_with_live_gpus()
@test_utils.run_only_as_root()
def test_dcgm_vgpu_config_enforce_embedded(handle, gpuIds):
"""
Verifies that the vGPU configuration can be enforced for a group
"""
helper_dcgm_vgpu_config_enforce(handle)
@test_utils.run_with_standalone_host_engine(20)
@test_utils.run_with_initialized_client()
@test_utils.run_only_with_live_gpus()
@test_utils.run_only_as_root()
def test_dcgm_vgpu_config_enforce_standalone(handle, gpuIds):
"""
Verifies that the vGPU configuration can be enforced for a group
"""
helper_dcgm_vgpu_config_enforce(handle)
def helper_dcgm_vgpu_config_injection(handle):
handleObj = pydcgm.DcgmHandle(handle=handle)
systemObj = handleObj.GetSystem()
groupObj = systemObj.GetDefaultGroup()
gpuIds = groupObj.GetGpuIds()
## Loop through vgpu_config_values to to check for correctness of values fetched from the hostengine
for x in xrange(0, len(gpuIds)):
helper_inject_vgpu_configuration(handle, gpuIds[x], dcgmvalue.DCGM_INT32_NOT_SUPPORTED,
dcgmvalue.DCGM_FP64_NOT_SUPPORTED, dcgmvalue.DCGM_INT32_NOT_SUPPORTED)
pass
## Get the target vGPU configuration to make sure that it's exact same as the one configured
vgpu_config_values = groupObj.vgpu.Get(dcgm_structs.DCGM_CONFIG_CURRENT_STATE)
assert len(vgpu_config_values) > 0, "Failed to get vGPU configuration using groupObj.vgpu.Get"
## Loop through vgpu_config_values to to check for correctness of values fetched from the hostengine
for x in xrange(0, len(gpuIds)):
assert vgpu_config_values[x].mEccMode == dcgmvalue.DCGM_INT32_NOT_SUPPORTED, "Failed to get matching value for ecc mode. Expected: %d Received: %d" % (dcgmvalue.DCGM_INT32_NOT_SUPPORTED, vgpu_config_values[x].mEccMode)
assert vgpu_config_values[x].mComputeMode == dcgmvalue.DCGM_INT32_NOT_SUPPORTED, "Failed to get matching value for compute mode. Expected: %d Received: %d" % (dcgmvalue.DCGM_INT32_NOT_SUPPORTED, vgpu_config_values[x].mComputeMode)
assert vgpu_config_values[x].mPowerLimit.val == dcgmvalue.DCGM_INT32_NOT_SUPPORTED, "Failed to get matching value for power limit. Expected: %d Received: %d" % (dcgmvalue.DCGM_INT32_NOT_SUPPORTED, vgpu_config_values[x].mPowerLimit.val)
pass
valToInsert = 100
## Loop through vgpu_config_values to to check for correctness of values fetched from the hostengine
for x in xrange(0, len(gpuIds)):
helper_inject_vgpu_configuration(handle, gpuIds[x], valToInsert,
valToInsert, valToInsert)
pass
## Get the target vGPU configuration to make sure that it's exact same as the one configured
vgpu_config_values = groupObj.vgpu.Get(dcgm_structs.DCGM_CONFIG_CURRENT_STATE)
assert len(vgpu_config_values) > 0, "Failed to get vGPU configuration using dcgmClientVgpuConfigGet"
## Loop through vgpu_config_values to to check for correctness of values fetched from the hostengine
for x in xrange(0, len(gpuIds)):
assert vgpu_config_values[x].mEccMode == valToInsert, "Failed to get matching value for ecc mode. Expected: %d Received: %d" % (valToInsert, vgpu_config_values[x].mEccMode)
assert vgpu_config_values[x].mComputeMode == valToInsert, "Failed to get matching value for Compute mode. Expected: %d Received: %d" % (valToInsert, vgpu_config_values[x].mComputeMode)
assert vgpu_config_values[x].mPowerLimit.val == valToInsert, "Failed to get matching value for power limit. Expected: %d Received: %d" % (valToInsert, vgpu_config_values[x].mPowerLimit.val)
pass
@test_utils.run_with_embedded_host_engine()
@test_utils.run_only_with_live_gpus()
@test_utils.run_only_as_root()
def test_dcgm_vgpu_config_injection_embedded(handle, gpuIds):
"""
Injects values to the Cache manager and verifies if Config Manager can fetch those values
"""
helper_dcgm_vgpu_config_injection(handle)
@test_utils.run_with_standalone_host_engine(20)
@test_utils.run_with_initialized_client()
@test_utils.run_only_with_live_gpus()
@test_utils.run_only_as_root()
def test_dcgm_vgpu_config_injection_standalone(handle, gpuIds):
"""
Injects values to the Cache manager and verifies if Config Manager can fetch those values
"""
helper_dcgm_vgpu_config_injection(handle)
def helper_dcgm_vgpu_config_powerbudget(handle, gpuIds):
handleObj = pydcgm.DcgmHandle(handle=handle)
systemObj = handleObj.GetSystem()
groupObj = systemObj.GetEmptyGroup("test1")
## Add first GPU to the group
groupObj.AddGpu(gpuIds[0])
gpuIds = groupObj.GetGpuIds() #Only reference GPUs we are testing against
## Get Min and Max Power limit on the group
attributes = dcgm_agent.dcgmGetDeviceAttributes(handle, gpuIds[0])
## Verify that power is supported on the GPUs in the group
if dcgmvalue.DCGM_INT32_IS_BLANK(attributes.powerLimits.maxPowerLimit):
test_utils.skip_test("Needs Power limit to be supported on the GPU")
powerLimit = (attributes.powerLimits.maxPowerLimit + attributes.powerLimits.minPowerLimit)/2
vgpu_config_values = dcgm_structs.c_dcgmDeviceVgpuConfig_v1()
vgpu_config_values.SetBlank()
vgpu_config_values.mPowerLimit.type = dcgm_structs.DCGM_CONFIG_POWER_BUDGET_GROUP
vgpu_config_values.mPowerLimit.val = powerLimit * len(gpuIds) #Assumes homogenous GPUs
groupObj.vgpu.Set(vgpu_config_values)
vgpu_config_values = groupObj.vgpu.Get(dcgm_structs.DCGM_CONFIG_CURRENT_STATE)
assert len(vgpu_config_values) > 0, "Failed to get vGPU configuration using groupObj.vgpu.Get"
for x in xrange(0, len(gpuIds)):
if (vgpu_config_values[x].mPowerLimit.val != dcgmvalue.DCGM_INT32_NOT_SUPPORTED):
assert vgpu_config_values[x].mPowerLimit.type == dcgm_structs.DCGM_CONFIG_POWER_CAP_INDIVIDUAL, "The power limit type for gpuId %d is incorrect. Returned: %d Expected :%d" % (x, vgpu_config_values[x].mPowerLimit.type, dcgm_structs.DCGM_CONFIG_POWER_CAP_INDIVIDUAL)
assert vgpu_config_values[x].mPowerLimit.val == powerLimit, "The power limit value for gpuID %d is incorrect. Returned: %d Expected: %s" % (x, vgpu_config_values[x].mPowerLimit.val, powerLimit)
pass
@test_utils.run_with_embedded_host_engine()
@test_utils.run_only_with_live_gpus()
@test_utils.run_only_as_root()
def test_dcgm_vgpu_config_powerbudget_embedded(handle, gpuIds):
"""
This method verfies setting power budget for a group of GPUs
"""
helper_dcgm_vgpu_config_powerbudget(handle, gpuIds)
@test_utils.run_with_standalone_host_engine(20)
@test_utils.run_with_initialized_client()
@test_utils.run_only_with_live_gpus()
@test_utils.run_only_as_root()
def test_dcgm_vgpu_config_powerbudget_standalone(handle, gpuIds):
"""
This method verfies setting power budget for a group of GPUs
"""
helper_dcgm_vgpu_config_powerbudget(handle, gpuIds)
def helper_verify_power_value_standalone(groupObj, expected_power):
"""
Helper Method to verify power value
"""
gpuIds = groupObj.GetGpuIds()
vgpu_config_values = groupObj.vgpu.Get(dcgm_structs.DCGM_CONFIG_CURRENT_STATE)
assert len(vgpu_config_values) > 0, "Failed to get vGPU configuration using dcgmClientVgpuConfigGet"
for x in xrange(0, len(gpuIds)):
if (vgpu_config_values[x].mPowerLimit.val != dcgmvalue.DCGM_INT32_NOT_SUPPORTED):
assert vgpu_config_values[x].mPowerLimit.type == dcgm_structs.DCGM_CONFIG_POWER_CAP_INDIVIDUAL, \
"The power limit type for gpuId %d is incorrect. Returned: %d Expected :%d" \
% (x, vgpu_config_values[x].mPowerLimit.type, dcgm_structs.DCGM_CONFIG_POWER_CAP_INDIVIDUAL)
assert vgpu_config_values[x].mPowerLimit.val == expected_power, "The power limit value for gpuID %d is incorrect. Returned: %d Expected: %d" \
% (x, vgpu_config_values[x].mPowerLimit.val, expected_power)
pass
@test_utils.run_with_standalone_host_engine(60)
@test_utils.run_with_initialized_client()
@test_utils.run_only_with_live_gpus()
@test_utils.run_only_as_root()
def test_dcgm_vgpu_config_power_enforce_standalone(handle, gpuIds):
"""
Checks if DCGM can enforce the power settings if it's changed behind the scenes
"""
handleObj = pydcgm.DcgmHandle(handle=handle)
systemObj = handleObj.GetSystem()
groupObj = systemObj.GetEmptyGroup("test1")
## Add first GPU to the group
groupObj.AddGpu(gpuIds[0])
gpuIds = groupObj.GetGpuIds() #Only reference GPUs we are testing against
gpuId = gpuIds[0]
#Make sure that the power management limit is updating fast enough to look at
fieldInfo = dcgm_agent_internal.dcgmGetCacheManagerFieldInfo(handleObj.handle, gpuId, dcgm_fields.DCGM_FE_GPU, dcgm_fields.DCGM_FI_DEV_POWER_MGMT_LIMIT)
sleepTime = 1.2 * (fieldInfo.monitorIntervalUsec / 1000000.0)
## Get Min and Max Power limit on the group
attributes = systemObj.discovery.GetGpuAttributes(gpuId)
## Verify that power is supported on the GPUs in the group
if dcgmvalue.DCGM_INT32_IS_BLANK(attributes.powerLimits.maxPowerLimit):
test_utils.skip_test("Needs Power limit to be supported on the GPU")
powerLimit_set_dcgmi = (attributes.powerLimits.maxPowerLimit + attributes.powerLimits.minPowerLimit)/2
powerLimit_set_nvsmi = attributes.powerLimits.maxPowerLimit
vgpu_config_values = dcgm_structs.c_dcgmDeviceVgpuConfig_v1()
vgpu_config_values.SetBlank()
vgpu_config_values.mPowerLimit.type = dcgm_structs.DCGM_CONFIG_POWER_CAP_INDIVIDUAL
vgpu_config_values.mPowerLimit.val = powerLimit_set_dcgmi
groupObj.vgpu.Set(vgpu_config_values)
logger.info("Verify if dcgmi configured value has taken effect")
helper_verify_power_value_standalone(groupObj, powerLimit_set_dcgmi)
## Change Power limit to max from external entity like nvidia-smi
assert 0 == apps.NvidiaSmiApp(["-pl", str(powerLimit_set_nvsmi), "-i", str(gpuIds[0])]).run(), \
"Nvidia smi couldn't set the power limit"
systemObj.UpdateAllFields(1)
logger.info("Sleeping for %f seconds to allow the power limit to update in the cache" % sleepTime)
time.sleep(sleepTime)
logger.info("Verify if nvsmi configured value has taken effect")
helper_verify_power_value_standalone(groupObj, powerLimit_set_nvsmi)
groupObj.vgpu.Enforce()
logger.info("Verify if dcgmi enforced value has taken effect")
helper_verify_power_value_standalone(groupObj, powerLimit_set_dcgmi)
@test_utils.run_with_standalone_host_engine(60)
@test_utils.run_with_initialized_client()
@test_utils.run_only_with_live_gpus()
@test_utils.run_only_as_root()
def test_dcgm_vgpu_default_status_handler(handle, gpuIds):
handleObj = pydcgm.DcgmHandle(handle=handle)
systemObj = handleObj.GetSystem()
groupObj = systemObj.GetEmptyGroup("test1")
## Add first GPU to the group
groupObj.AddGpu(gpuIds[0])
gpuIds = groupObj.GetGpuIds() #Only reference GPUs we are testing against
vgpu_config_values = dcgm_structs.c_dcgmDeviceVgpuConfig_v1()
vgpu_config_values.SetBlank()
groupObj.vgpu.Set(vgpu_config_values)
vgpu_config_values = groupObj.vgpu.Get(dcgm_structs.DCGM_CONFIG_CURRENT_STATE)
assert len(vgpu_config_values) > 0, "Failed to work with NULL status handle"
vgpu_config_values = groupObj.vgpu.Enforce()
newPolicy = dcgm_structs.c_dcgmPolicy_v1()
newPolicy.version = dcgm_structs.dcgmPolicy_version1
newPolicy.condition = dcgm_structs.DCGM_POLICY_COND_MAX_PAGES_RETIRED
newPolicy.parms[2].tag = 1
newPolicy.parms[2].val.llval = 5
ret = dcgm_agent.dcgmPolicySet(handle, groupObj.GetId(), newPolicy, 0)
assert (ret != dcgm_structs.DCGM_ST_BADPARAM), "Failed to work with NULL status handle: %d" % ret
policy = dcgm_agent.dcgmPolicyGet(handle, groupObj.GetId(), len(gpuIds), 0)
'''
@test_utils.run_with_standalone_host_engine(60)
@test_utils.run_with_initialized_client()
@test_utils.run_only_with_live_gpus()
@test_utils.run_only_as_root()
def test_dcgm_vgpu_configure_ecc_mode(handle, gpuIds):
test_utils.skip_test("Skipping this test until bug 200377294 is fixed")
groupId = dcgm_agent.dcgmGroupCreate(handle, dcgm_structs.DCGM_GROUP_EMPTY, "test1")
validDevice = -1
for x in gpuIds:
fvSupported = dcgm_agent_internal.dcgmGetLatestValuesForFields(handle, x, [dcgm_fields.DCGM_FI_DEV_RETIRED_DBE])
if (fvSupported[0].value.i64 != dcgmvalue.DCGM_INT64_NOT_SUPPORTED):
validDevice = x
break
if (validDevice == -1):
test_utils.skip_test("Can only run if at least one GPU with ECC is present")
ret = dcgm_agent.dcgmGroupAddDevice(handle, groupId, validDevice)
assert (ret == dcgm_structs.DCGM_ST_OK),"Failed to add a device to the group %d. Return %d" % (groupId.value, ret)
groupInfo = dcgm_agent.dcgmGroupGetInfo(handle, groupId)
#Create a status handle
status_handle = dcgm_agent.dcgmStatusCreate()
## Get original ECC mode on the device
vgpu_config_values = dcgm_agent_internal.dcgmVgpuConfigGet(handle, groupId, dcgm_structs.DCGM_CONFIG_CURRENT_STATE, groupInfo.count, status_handle)
assert len(vgpu_config_values) > 0, "Failed to work with NULL status handle"
eccmodeOnGroupExisting = vgpu_config_values[0].mEccMode
if eccmodeOnGroupExisting == 0:
eccmodeOnGroupToSet = 1
else:
eccmodeOnGroupToSet = 0
#print eccmodeOnGroupExisting
#print eccmodeOnGroupToSet
## Toggle the ECC mode on the group
vgpu_config_values = dcgm_structs.c_dcgmDeviceVgpuConfig_v1()
vgpu_config_values.mEccMode = eccmodeOnGroupToSet
vgpu_config_values.mComputeMode = dcgmvalue.DCGM_INT32_BLANK
vgpu_config_values.mPowerLimit.type = dcgmvalue.DCGM_INT32_BLANK
vgpu_config_values.mPowerLimit.val = dcgmvalue.DCGM_INT32_BLANK
#Clear the status handle to log the errors while setting the config
ret = dcgm_agent.dcgmStatusClear(status_handle)
assert ret == dcgm_structs.DCGM_ST_OK, "Failed to clear the status handle. Return %d" %ret
try:
ret = dcgm_agent_internal.dcgmVgpuConfigSet(handle, groupId, vgpu_config_values, status_handle)
except dcgm_structs.DCGMError as e:
pass
errors = helper_get_status_list(status_handle)
if len(errors) > 0:
for error in errors:
if error.status == dcgm_structs.DCGM_ST_RESET_REQUIRED:
test_utils.skip_test("Skipping the test - Unable to reset the Gpu, FieldId - %d, Return - %d" % (error.fieldId, error.status))
else:
test_utils.skip_test("Skipping the test - Unable to set the ECC mode. FieldId - %d, Return %d" % (error.fieldId,error.status))
#Sleep after reset and then apply update for it to occur
time.sleep(2)
dcgm_agent.dcgmUpdateAllFields(handle, 1)
#Clear the status handle to log the errors while setting the config
ret = dcgm_agent.dcgmStatusClear(status_handle)
assert ret == dcgm_structs.DCGM_ST_OK, "Failed to clear the status handle. Return %d" %ret
#Get the current configuration
config_values = dcgm_agent_internal.dcgmVgpuConfigGet(handle, groupId, dcgm_structs.DCGM_CONFIG_CURRENT_STATE, groupInfo.count, status_handle)
assert len(config_values) > 0, "Failed to get configuration using dcgmiVgpuConfigGet"
assert config_values[0].mEccMode == (eccmodeOnGroupToSet), "ECC mode different from the set value"
@test_utils.run_with_standalone_host_engine(60)
@test_utils.run_with_initialized_client()
@test_utils.run_only_with_live_gpus()
def test_dcgm_vgpu_attributes(handle, gpuIds):
"""
Verifies that vGPU attributes are properly queried
"""
vgpuAttributes = dcgm_agent_internal.dcgmGetVgpuDeviceAttributes(handle, gpuIds[0])
assert vgpuAttributes.activeVgpuInstanceCount >= 0, "Active vGPU instance count is negative!"
if (vgpuAttributes.activeVgpuInstanceCount > 0):
vgpuInstanceAttributes = dcgm_agent_internal.dcgmGetVgpuInstanceAttributes(handle, vgpuAttributes.activeVgpuInstanceIds[0])
assert len(vgpuInstanceAttributes.vmName) > 0, "Active vGPU VM name is blank!"
| DCGM-master | testing/python3/tests/test_vgpu.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import dcgm_structs
import dcgm_structs_internal
import dcgm_agent
import dcgm_agent_internal
import logger
import test_utils
import dcgm_fields
import pydcgm
from dcgm_structs import dcgmExceptionClass
import time
def get_usec_since_1970():
sec = time.time()
return int(sec * 1000000.0)
def helper_verify_fv_equal(fv1, fv2):
'''
Helper function to verify that fv1 == fv2 with useful errors if they are not equal. An
assertion is thrown from this if they are not equal
'''
#assert fv1.version == fv2.version #Don't check version. We may be comparing injected values to retrieved ones
assert fv1.fieldId == fv2.fieldId, "%d != %d" % (fv1.value.fieldId, fv2.value.fieldId)
assert fv1.status == fv2.status, "%d != %d" % (fv1.value.status, fv2.value.status)
assert fv1.fieldType == fv2.fieldType, "%d != %d" % (fv1.value.fieldType, fv2.value.fieldType)
assert fv1.ts == fv2.ts, "%d != %d" % (fv1.value.ts, fv2.value.ts)
assert fv1.value.i64 == fv2.value.i64, "%d != %d" % (fv1.value.i64, fv2.value.i64)
@test_utils.run_with_embedded_host_engine()
@test_utils.run_with_injection_gpus(1)
def test_dcgm_injection_agent(handle, gpuIds):
"""
Verifies that injection works with the agent host engine
"""
gpuId = gpuIds[0]
#Make a base value that is good for starters
fvGood = dcgm_structs_internal.c_dcgmInjectFieldValue_v1()
fvGood.version = dcgm_structs_internal.dcgmInjectFieldValue_version1
fvGood.fieldId = dcgm_fields.DCGM_FI_DEV_ECC_CURRENT
fvGood.status = 0
fvGood.fieldType = ord(dcgm_fields.DCGM_FT_INT64)
fvGood.ts = get_usec_since_1970()
fvGood.value.i64 = 1
fieldInfoBefore = dcgm_agent_internal.dcgmGetCacheManagerFieldInfo(handle, gpuId, dcgm_fields.DCGM_FE_GPU, fvGood.fieldId)
countBefore = fieldInfoBefore.numSamples
#This will throw an exception if it fails
dcgm_agent_internal.dcgmInjectFieldValue(handle, gpuId, fvGood)
fieldInfoAfter = dcgm_agent_internal.dcgmGetCacheManagerFieldInfo(handle, gpuId, dcgm_fields.DCGM_FE_GPU, fvGood.fieldId)
countAfter = fieldInfoAfter.numSamples
assert countAfter > countBefore, "Expected countAfter %d > countBefore %d after injection" % (countAfter, countBefore)
#Fetch the value we just inserted and verify its attributes are the same
fvFetched = dcgm_agent_internal.dcgmGetLatestValuesForFields(handle, gpuId, [fvGood.fieldId, ])[0]
helper_verify_fv_equal(fvFetched, fvGood)
#Should be able to insert a null timestamp. The agent will just use "now"
fvAlsoGood = fvGood
fvAlsoGood.ts = 0
#This will thrown an exception if it fails
dcgm_agent_internal.dcgmInjectFieldValue(handle, gpuId, fvAlsoGood)
#Now make some attributes bad and expect an error
fvBad = fvGood
fvBad.fieldType = ord(dcgm_fields.DCGM_FT_DOUBLE)
with test_utils.assert_raises(dcgmExceptionClass(dcgm_structs.DCGM_ST_BADPARAM)):
dcgm_agent_internal.dcgmInjectFieldValue(handle, gpuId, fvBad)
fvGood.fieldType = ord(dcgm_fields.DCGM_FT_INT64)
""" TODO: DCGM-2130 - Restore this test when protobuf is removed
#Now make some attributes bad and expect an error
fvBad = fvGood
fvBad.version = 0
with test_utils.assert_raises(dcgmExceptionClass(dcgm_structs.DCGM_ST_VER_MISMATCH)):
dcgm_agent_internal.dcgmInjectFieldValue(handle, gpuId, fvBad)
fvGood.version = dcgm_structs_internal.dcgmInjectFieldValue_version1
"""
fvBad = fvGood
fvBad.fieldId = dcgm_fields.DCGM_FI_MAX_FIELDS + 100
with test_utils.assert_raises(dcgmExceptionClass(dcgm_structs.DCGM_ST_BADPARAM)):
dcgm_agent_internal.dcgmInjectFieldValue(handle, gpuId, fvBad)
@test_utils.run_with_standalone_host_engine()
@test_utils.run_with_initialized_client()
@test_utils.run_with_injection_gpus(1)
def test_dcgm_injection_remote(handle, gpuIds):
"""
Verifies that injection works with the remote host engine
"""
gpuId = gpuIds[0]
#Make a base value that is good for starters
fvGood = dcgm_structs_internal.c_dcgmInjectFieldValue_v1()
fvGood.version = dcgm_structs_internal.dcgmInjectFieldValue_version1
fvGood.fieldId = dcgm_fields.DCGM_FI_DEV_ECC_CURRENT
fvGood.status = 0
fvGood.fieldType = ord(dcgm_fields.DCGM_FT_INT64)
fvGood.ts = get_usec_since_1970()
fvGood.value.i64 = 1
fieldInfoBefore = dcgm_agent_internal.dcgmGetCacheManagerFieldInfo(handle, gpuId, dcgm_fields.DCGM_FE_GPU, fvGood.fieldId)
countBefore = fieldInfoBefore.numSamples
#This will throw an exception if it fails
dcgm_agent_internal.dcgmInjectFieldValue(handle, gpuId, fvGood)
fieldInfoAfter = dcgm_agent_internal.dcgmGetCacheManagerFieldInfo(handle, gpuId, dcgm_fields.DCGM_FE_GPU, fvGood.fieldId)
countAfter = fieldInfoAfter.numSamples
assert countAfter > countBefore, "Expected countAfter %d > countBefore %d after injection" % (countAfter, countBefore)
#Fetch the value we just inserted and verify its attributes are the same
fvFetched = dcgm_agent_internal.dcgmGetLatestValuesForFields(handle, gpuId, [fvGood.fieldId, ])[0]
helper_verify_fv_equal(fvFetched, fvGood)
#Should be able to insert a null timestamp. The agent will just use "now"
fvAlsoGood = fvGood
fvAlsoGood.ts = 0
#This will thrown an exception if it fails
dcgm_agent_internal.dcgmInjectFieldValue(handle, gpuId, fvAlsoGood)
#Now make some attributes bad and expect an error
fvBad = fvGood
fvBad.fieldType = ord(dcgm_fields.DCGM_FT_DOUBLE)
with test_utils.assert_raises(dcgmExceptionClass(dcgm_structs.DCGM_ST_BADPARAM)):
dcgm_agent_internal.dcgmInjectFieldValue(handle, gpuId, fvBad)
fvGood.fieldType = ord(dcgm_fields.DCGM_FT_INT64)
""" TODO: DCGM-2130 - Restore this test when protobuf is removed
#Now make some attributes bad and expect an error
fvBad = fvGood
fvBad.version = 0
with test_utils.assert_raises(dcgmExceptionClass(dcgm_structs.DCGM_ST_VER_MISMATCH)):
dcgm_agent_internal.dcgmInjectFieldValue(handle, gpuId, fvBad)
fvGood.version = dcgm_structs_internal.dcgmInjectFieldValue_version1
"""
fvBad = fvGood
fvBad.fieldId = dcgm_fields.DCGM_FI_MAX_FIELDS + 100
with test_utils.assert_raises(dcgmExceptionClass(dcgm_structs.DCGM_ST_BADPARAM)):
dcgm_agent_internal.dcgmInjectFieldValue(handle, gpuId, fvBad)
def helper_verify_multi_values(fieldValues, order, injectedValues):
"""
Helper to verify that a returned list of values is internally consistent
The 'values' parameter is expected to be the result of a
dcgm*GetMultipleValuesForField request
This function will assert if there is a problem detected
"""
assert order == dcgm_structs.DCGM_ORDER_ASCENDING or order == dcgm_structs.DCGM_ORDER_DESCENDING, "Invalid order %d" % order
if len(fieldValues) < 2:
return
if order == dcgm_structs.DCGM_ORDER_DESCENDING:
injectedValues = list(reversed(injectedValues))
Nerrors = 0
for i, fv in enumerate(fieldValues):
injectedFv = injectedValues[i]
if fv.ts == 0:
logger.error("Null timestamp at index %d" % i)
Nerrors += 1
if fv.ts != injectedFv.ts:
logger.error("Timestamp mismatch at index %d: read %d. injected %d" % (i, fv.ts, injectedFv.ts))
Nerrors += 1
if fv.value.i64 != injectedFv.value.i64:
logger.error("Value mismatch at index %d: read %d. injected %d" % (i, fv.value.i64, injectedFv.value.i64))
Nerrors += 1
#Don't compare against previous until we are > 1
if i < 1:
continue
fvPrev = fieldValues[i-1]
if order == dcgm_structs.DCGM_ORDER_ASCENDING:
if fv.ts <= fvPrev.ts:
logger.error("Out of order ASC timestamp at index %d, fv.ts %d, fvPrev.ts %d" % (i, fv.ts, fvPrev.ts))
Nerrors += 1
else: #Descending
if fv.ts >= fvPrev.ts:
logger.error("Out of order DESC timestamp at index %d, fv.ts %d, fvPrev.ts %d" % (i, fv.ts, fvPrev.ts))
Nerrors += 1
assert Nerrors < 1, "Comparison errors occurred"
@test_utils.run_with_embedded_host_engine()
@test_utils.run_with_injection_gpus(1)
def test_dcgm_injection_multi_fetch_agent(handle, gpuIds):
"""
Verify that multi-fetches work with the agent
"""
gpuId = gpuIds[0]
NinjectValues = 10
firstTs = get_usec_since_1970()
lastTs = 0
injectedValues = []
#Make a base value that is good for starters
#Inject the values we're going to fetch
for i in range(NinjectValues):
fvGood = dcgm_structs_internal.c_dcgmInjectFieldValue_v1()
fvGood.version = dcgm_structs_internal.dcgmInjectFieldValue_version1
fvGood.fieldId = dcgm_fields.DCGM_FI_DEV_ECC_PENDING
fvGood.status = 0
fvGood.fieldType = ord(dcgm_fields.DCGM_FT_INT64)
fvGood.ts = firstTs + i
fvGood.value.i64 = 1 + i
#This will throw an exception if it fails
dcgm_agent_internal.dcgmInjectFieldValue(handle, gpuId, fvGood)
injectedValues.append(fvGood)
#Fetch in forward order with no timestamp. Verify
startTs = 0
endTs = 0
maxCount = 2 * NinjectValues #Pick a bigger number so we can verify only NinjectValues come back
order = dcgm_structs.DCGM_ORDER_ASCENDING
fvFetched = dcgm_agent_internal.dcgmGetMultipleValuesForField(handle, gpuId, fvGood.fieldId, maxCount, startTs, endTs, order)
assert len(fvFetched) == NinjectValues, "Expected %d rows. Got %d" % (NinjectValues, len(fvFetched))
helper_verify_multi_values(fvFetched, order, injectedValues)
#Now do the same fetch with descending values
startTs = 0
endTs = 0
maxCount = 2 * NinjectValues #Pick a bigger number so we can verify only NinjectValues come back
order = dcgm_structs.DCGM_ORDER_DESCENDING
fvFetched = dcgm_agent_internal.dcgmGetMultipleValuesForField(handle, gpuId, fvGood.fieldId, maxCount, startTs, endTs, order)
assert len(fvFetched) == NinjectValues, "Expected %d rows. Got %d" % (NinjectValues, len(fvFetched))
helper_verify_multi_values(fvFetched, order, injectedValues)
@test_utils.run_with_standalone_host_engine()
@test_utils.run_with_initialized_client()
@test_utils.run_with_injection_gpus(1)
def test_dcgm_injection_multi_fetch_remote(handle, gpuIds):
"""
Verify that multi-fetches work with the agent
"""
gpuId = gpuIds[0]
NinjectValues = 10
firstTs = get_usec_since_1970()
lastTs = 0
injectedValues = []
#Inject the values we're going to fetch
for i in range(NinjectValues):
fvGood = dcgm_structs_internal.c_dcgmInjectFieldValue_v1()
fvGood.version = dcgm_structs_internal.dcgmInjectFieldValue_version1
fvGood.fieldId = dcgm_fields.DCGM_FI_DEV_ECC_PENDING
fvGood.status = 0
fvGood.fieldType = ord(dcgm_fields.DCGM_FT_INT64)
fvGood.ts = firstTs + i
fvGood.value.i64 = 1 + i
#This will throw an exception if it fails
dcgm_agent_internal.dcgmInjectFieldValue(handle, gpuId, fvGood)
injectedValues.append(fvGood)
lastTs = fvGood.ts
#Fetch in forward order with no timestamp. Verify
startTs = 0
endTs = 0
maxCount = 2 * NinjectValues #Pick a bigger number so we can verify only NinjectValues come back
order = dcgm_structs.DCGM_ORDER_ASCENDING
fvFetched = dcgm_agent_internal.dcgmGetMultipleValuesForField(handle, gpuId, fvGood.fieldId, maxCount, startTs, endTs, order)
assert len(fvFetched) == NinjectValues, "Expected %d rows. Got %d" % (NinjectValues, len(fvFetched))
helper_verify_multi_values(fvFetched, order, injectedValues)
#Now do the same fetch with descending values
startTs = 0
endTs = 0
maxCount = 2 * NinjectValues #Pick a bigger number so we can verify only NinjectValues come back
order = dcgm_structs.DCGM_ORDER_DESCENDING
fvFetched = dcgm_agent_internal.dcgmGetMultipleValuesForField(handle, gpuId, fvGood.fieldId, maxCount, startTs, endTs, order)
assert len(fvFetched) == NinjectValues, "Expected %d rows. Got %d" % (NinjectValues, len(fvFetched))
helper_verify_multi_values(fvFetched, order, injectedValues)
def helper_test_dcgm_injection_summaries(handle, gpuIds):
gpuId = gpuIds[0]
# Watch the field we're inserting into
dcgm_agent_internal.dcgmWatchFieldValue(handle, gpuId, dcgm_fields.DCGM_FI_DEV_ECC_SBE_AGG_TOTAL, 1, 3600.0,
10000)
handleObj = pydcgm.DcgmHandle(handle=handle)
systemObj = handleObj.GetSystem()
#Make a base value that is good for starters
field = dcgm_structs_internal.c_dcgmInjectFieldValue_v1()
field.version = dcgm_structs_internal.dcgmInjectFieldValue_version1
field.fieldId = dcgm_fields.DCGM_FI_DEV_ECC_SBE_AGG_TOTAL
field.status = 0
field.fieldType = ord(dcgm_fields.DCGM_FT_INT64)
baseTime = get_usec_since_1970()
for i in range(0, 10):
field.ts = baseTime + i
field.value.i64 = i
ret = dcgm_agent_internal.dcgmInjectFieldValue(handle, gpuId, field)
assert (ret == dcgm_structs.DCGM_ST_OK)
time.sleep(1)
systemObj.UpdateAllFields(1)
tmpMask = dcgm_structs.DCGM_SUMMARY_MIN | dcgm_structs.DCGM_SUMMARY_MAX
tmpMask = tmpMask | dcgm_structs.DCGM_SUMMARY_AVG | dcgm_structs.DCGM_SUMMARY_DIFF
# Pass baseTime for the start to get nothing from the first query
with test_utils.assert_raises(dcgm_structs.dcgmExceptionClass(dcgm_structs.DCGM_ST_NO_DATA)):
request = dcgm_agent.dcgmGetFieldSummary(handle, dcgm_fields.DCGM_FI_DEV_ECC_SBE_AGG_TOTAL,
dcgm_fields.DCGM_FE_GPU, gpuId, tmpMask, baseTime - 60,
baseTime - 30)
# Now adjust the time so we get values
request = dcgm_agent.dcgmGetFieldSummary(handle, dcgm_fields.DCGM_FI_DEV_ECC_SBE_AGG_TOTAL,
dcgm_fields.DCGM_FE_GPU, gpuId, tmpMask, 0, 0)
assert (request.response.values[0].i64 == 0)
assert (request.response.values[1].i64 == 9)
assert (request.response.values[2].i64 == 4)
assert (request.response.values[3].i64 == 9)
@test_utils.run_with_embedded_host_engine()
@test_utils.run_with_injection_gpus(1)
def test_dcgm_injection_summaries_embedded(handle, gpuIds):
"""
Verifies that inject works and we can get summaries of that data
"""
helper_test_dcgm_injection_summaries(handle, gpuIds)
| DCGM-master | testing/python3/tests/test_injection.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pydcgm
import test_utils
import dcgm_structs
import dcgm_fields
import dcgm_agent
from dcgm_structs import dcgmExceptionClass
@test_utils.run_with_embedded_host_engine()
def test_dcgm_field_group_duplicate_name(handle):
fieldIds = [dcgm_fields.DCGM_FI_DRIVER_VERSION, ]
handle = pydcgm.DcgmHandle(handle)
fieldGroup = pydcgm.DcgmFieldGroup(handle, "dupeme", fieldIds)
with test_utils.assert_raises(dcgmExceptionClass(dcgm_structs.DCGM_ST_DUPLICATE_KEY)):
fieldGroup2 = pydcgm.DcgmFieldGroup(handle, "dupeme", fieldIds)
@test_utils.run_with_embedded_host_engine()
def test_dcgm_field_group_add_remove(handle):
fieldIds = [dcgm_fields.DCGM_FI_DRIVER_VERSION, dcgm_fields.DCGM_FI_DEV_NAME, dcgm_fields.DCGM_FI_DEV_BRAND]
handle = pydcgm.DcgmHandle(handle)
fieldGroup = pydcgm.DcgmFieldGroup(handle, "mygroup", fieldIds)
#Save this ID before we mess with the object
fieldGroupId = fieldGroup.fieldGroupId
#This will assert on error
fieldGroupInfo = dcgm_agent.dcgmFieldGroupGetInfo(handle.handle, fieldGroupId)
#Delete the field group and make sure it's gone from the host engine
del(fieldGroup)
fieldGroup = None
with test_utils.assert_raises(dcgmExceptionClass(dcgm_structs.DCGM_ST_NO_DATA)):
fieldGroupInfo = dcgm_agent.dcgmFieldGroupGetInfo(handle.handle, fieldGroupId)
@test_utils.run_with_embedded_host_engine()
def test_dcgm_field_group_info(handle):
fieldIds = [dcgm_fields.DCGM_FI_DRIVER_VERSION, dcgm_fields.DCGM_FI_DEV_NAME, dcgm_fields.DCGM_FI_DEV_BRAND]
handle = pydcgm.DcgmHandle(handle)
fieldGroup = pydcgm.DcgmFieldGroup(handle, "mygroup", fieldIds)
#Get the field group we just added to verify it was added and the metadata is correct
fieldGroupInfo = dcgm_agent.dcgmFieldGroupGetInfo(handle.handle, fieldGroup.fieldGroupId)
assert fieldGroupInfo.version == dcgm_structs.dcgmFieldGroupInfo_version1, fieldGroupInfo.version
assert fieldGroupInfo.fieldGroupId == int(fieldGroup.fieldGroupId.value), "%s != %s" %(str(fieldGroupInfo.fieldGroupId), str(fieldGroup.fieldGroupId))
assert fieldGroupInfo.fieldGroupName == fieldGroup.name, str(fieldGroupInfo.name)
assert fieldGroupInfo.numFieldIds == len(fieldIds), fieldGroupInfo.numFieldIds
for i, fieldId in enumerate(fieldIds):
assert fieldGroupInfo.fieldIds[i] == fieldId, "i = %d, %d != %d" % (i, fieldGroupInfo.fieldIds[i], fieldId)
@test_utils.run_with_embedded_host_engine()
def test_dcgm_field_group_get_by_name(handle):
fieldIds = [dcgm_fields.DCGM_FI_DRIVER_VERSION, dcgm_fields.DCGM_FI_DEV_NAME, dcgm_fields.DCGM_FI_DEV_BRAND]
handle = pydcgm.DcgmHandle(handle)
fieldGroupName = "mygroup"
fieldGroupObj = pydcgm.DcgmFieldGroup(handle, "mygroup", fieldIds)
findByNameId = handle.GetSystem().GetFieldGroupIdByName(fieldGroupName)
assert findByNameId is not None, "Expected field group ID. Got None"
assert int(findByNameId.value) == int(fieldGroupObj.fieldGroupId.value), "Got field group ID handle mismatch %s != %s" % (findByNameId, fieldGroupObj.fieldGroupId)
#Make sure we can create an object from our found id and delete it
fieldGroupObj2 = pydcgm.DcgmFieldGroup(dcgmHandle=handle, fieldGroupId=findByNameId)
fieldGroupObj2.Delete()
| DCGM-master | testing/python3/tests/test_field_group.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.