python_code
stringlengths 0
679k
| repo_name
stringlengths 9
41
| file_path
stringlengths 6
149
|
---|---|---|
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# test the health module for DCGM
import pydcgm
import dcgm_structs
import dcgm_structs_internal
import dcgm_agent
import dcgm_agent_internal
import dcgmvalue
import logger
import test_utils
import dcgm_fields
import time
from ctypes import *
import sys
import os
import pprint
import dcgm_internal_helpers
import dcgm_field_injection_helpers
import dcgm_errors
def skip_test_if_unhealthy(groupObj):
# Skip the test if the GPU is already failing health checks
responseV4 = groupObj.health.Check(dcgm_structs.dcgmHealthResponse_version4)
if responseV4.overallHealth != dcgm_structs.DCGM_HEALTH_RESULT_PASS:
msg = "Skipping health check test because we are already unhealthy: "
for i in range(0, responseV4.incidentCount):
if i == 0:
msg += "%s" % responseV4.incidents[i].error.msg
else:
msg += ", %s" % responseV4.incidents[i].error.msg
test_utils.skip_test(msg)
def helper_dcgm_health_set_pcie(handle):
"""
Verifies that the set/get path for the health monitor is working
Checks for call errors are done in the bindings
"""
handleObj = pydcgm.DcgmHandle(handle=handle)
systemObj = handleObj.GetSystem()
groupObj = systemObj.GetDefaultGroup()
groupObj.health.Set(0)
currentSystems = groupObj.health.Get()
assert (currentSystems == 0)
newSystems = currentSystems | dcgm_structs.DCGM_HEALTH_WATCH_PCIE
groupObj.health.Set(newSystems)
currentSystems = groupObj.health.Get()
assert (currentSystems == newSystems)
#Set it back to 0 and validate it
groupObj.health.Set(0)
currentSystems = groupObj.health.Get()
assert (currentSystems == 0)
@test_utils.run_with_embedded_host_engine()
def test_dcgm_health_set_pcie_embedded(handle):
helper_dcgm_health_set_pcie(handle)
@test_utils.run_with_standalone_host_engine(20)
@test_utils.run_with_initialized_client()
def test_dcgm_health_set_pcie_standalone(handle):
helper_dcgm_health_set_pcie(handle)
@test_utils.run_with_embedded_host_engine()
def test_dcgm_health_invalid_group_embedded(handle):
'''
Validate that group operations fail if a bogus group ID is provided
'''
invalidGroupId = c_void_p(99)
handleObj = pydcgm.DcgmHandle(handle=handle)
systemObj = handleObj.GetSystem()
groupObj = pydcgm.DcgmGroup(handleObj, groupId=invalidGroupId)
with test_utils.assert_raises(dcgm_structs.dcgmExceptionClass(dcgm_structs.DCGM_ST_NOT_CONFIGURED)):
groupObj.health.Set(dcgm_structs.DCGM_HEALTH_WATCH_PCIE)
with test_utils.assert_raises(dcgm_structs.dcgmExceptionClass(dcgm_structs.DCGM_ST_NOT_CONFIGURED)):
groupObj.health.Get()
with test_utils.assert_raises(dcgm_structs.dcgmExceptionClass(dcgm_structs.DCGM_ST_NOT_CONFIGURED)):
groupObj.health.Check(dcgm_structs.dcgmHealthResponse_version4)
def helper_dcgm_health_check_pcie(handle, gpuIds):
"""
Verifies that a check error occurs when an error is injected
Checks for call errors are done in the bindings except dcgmClientHealthCheck
"""
handleObj = pydcgm.DcgmHandle(handle=handle)
systemObj = handleObj.GetSystem()
groupObj = systemObj.GetEmptyGroup("test1")
groupObj.AddGpu(gpuIds[0])
gpuIds = groupObj.GetGpuIds() #Limit gpuIds to GPUs in our group
gpuId = gpuIds[0]
newSystems = dcgm_structs.DCGM_HEALTH_WATCH_PCIE
groupObj.health.Set(newSystems)
skip_test_if_unhealthy(groupObj)
ret = dcgm_field_injection_helpers.inject_field_value_i64(handle, gpuId, dcgm_fields.DCGM_FI_DEV_PCIE_REPLAY_COUNTER,
0, -50)
assert (ret == dcgm_structs.DCGM_ST_OK)
response = groupObj.health.Check()
# we expect that there will be no data here
# inject an error into PCI
ret = dcgm_field_injection_helpers.inject_field_value_i64(handle, gpuId, dcgm_fields.DCGM_FI_DEV_PCIE_REPLAY_COUNTER,
10, 100) # set the injected data into the future
assert (ret == dcgm_structs.DCGM_ST_OK)
responseV4 = groupObj.health.Check(dcgm_structs.dcgmHealthResponse_version4)
assert (responseV4.incidentCount == 1)
assert (responseV4.incidents[0].entityInfo.entityId == gpuId)
assert (responseV4.incidents[0].system == dcgm_structs.DCGM_HEALTH_WATCH_PCIE)
assert (responseV4.incidents[0].error.code == dcgm_errors.DCGM_FR_PCI_REPLAY_RATE)
@test_utils.run_with_embedded_host_engine()
@test_utils.run_with_injection_gpus()
def test_dcgm_health_check_pcie_embedded(handle, gpuIds):
helper_dcgm_health_check_pcie(handle, gpuIds)
@test_utils.run_with_standalone_host_engine(120)
@test_utils.run_with_initialized_client()
@test_utils.run_with_injection_gpus()
def test_dcgm_health_check_pcie_standalone(handle, gpuIds):
helper_dcgm_health_check_pcie(handle, gpuIds)
def helper_test_dcgm_health_check_mem_dbe(handle, gpuIds):
"""
Verifies that the health check will fail if there's 1 DBE and it continues to be
reported
"""
handleObj = pydcgm.DcgmHandle(handle=handle)
systemObj = handleObj.GetSystem()
groupObj = systemObj.GetEmptyGroup("test1")
groupObj.AddGpu(gpuIds[0])
gpuIds = groupObj.GetGpuIds() #Limit gpuIds to GPUs in our group
gpuId = gpuIds[0]
newSystems = dcgm_structs.DCGM_HEALTH_WATCH_MEM
groupObj.health.Set(newSystems)
skip_test_if_unhealthy(groupObj)
ret = dcgm_field_injection_helpers.inject_field_value_i64(handle, gpuId, dcgm_fields.DCGM_FI_DEV_ECC_DBE_VOL_TOTAL,
2, -50) # set the injected data to 50 seconds ago
assert (ret == dcgm_structs.DCGM_ST_OK)
responseV4 = groupObj.health.Check(dcgm_structs.dcgmHealthResponse_version4)
assert (responseV4.overallHealth == dcgm_structs.DCGM_HEALTH_RESULT_FAIL)
assert (responseV4.incidentCount == 1)
assert (responseV4.incidents[0].entityInfo.entityId == gpuId)
assert (responseV4.incidents[0].entityInfo.entityGroupId == dcgm_fields.DCGM_FE_GPU)
assert (responseV4.incidents[0].system == dcgm_structs.DCGM_HEALTH_WATCH_MEM)
assert (responseV4.incidents[0].health == dcgm_structs.DCGM_HEALTH_RESULT_FAIL)
assert (responseV4.incidents[0].error.code == dcgm_errors.DCGM_FR_VOLATILE_DBE_DETECTED)
# Give it the same failure 45 seconds ago and make sure we fail again
ret = dcgm_field_injection_helpers.inject_field_value_i64(handle, gpuId, dcgm_fields.DCGM_FI_DEV_ECC_DBE_VOL_TOTAL,
2, -45)
assert (ret == dcgm_structs.DCGM_ST_OK)
responseV4 = groupObj.health.Check(dcgm_structs.dcgmHealthResponse_version4)
assert (responseV4.overallHealth == dcgm_structs.DCGM_HEALTH_RESULT_FAIL)
assert (responseV4.incidentCount == 1)
assert (responseV4.incidents[0].entityInfo.entityId == gpuId)
assert (responseV4.incidents[0].entityInfo.entityGroupId == dcgm_fields.DCGM_FE_GPU)
assert (responseV4.incidents[0].system == dcgm_structs.DCGM_HEALTH_WATCH_MEM)
assert (responseV4.incidents[0].health == dcgm_structs.DCGM_HEALTH_RESULT_FAIL)
assert (responseV4.incidents[0].error.code == dcgm_errors.DCGM_FR_VOLATILE_DBE_DETECTED)
# Make the failure count go down to zero. This should clear the error
ret = dcgm_field_injection_helpers.inject_field_value_i64(handle, gpuId, dcgm_fields.DCGM_FI_DEV_ECC_DBE_VOL_TOTAL,
0, -40)
assert (ret == dcgm_structs.DCGM_ST_OK)
responseV4 = groupObj.health.Check(dcgm_structs.dcgmHealthResponse_version4)
assert (responseV4.overallHealth == dcgm_structs.DCGM_HEALTH_RESULT_PASS)
@test_utils.run_with_standalone_host_engine(120)
@test_utils.run_with_initialized_client()
@test_utils.run_with_injection_gpus()
def test_dcgm_health_check_mem_dbe(handle, gpuIds):
helper_test_dcgm_health_check_mem_dbe(handle, gpuIds)
def helper_verify_dcgm_health_watch_mem_result(groupObj, errorCode, verifyFail=False, gpuId=0):
"""
Verify that memory health check result is what was expected. If verifyFail is False, verify a pass result,
otherwise verify a failure occurred.
"""
responseV4 = groupObj.health.Check(dcgm_structs.dcgmHealthResponse_version4)
if not verifyFail:
assert (responseV4.overallHealth == dcgm_structs.DCGM_HEALTH_RESULT_PASS)
return
assert (responseV4.overallHealth == dcgm_structs.DCGM_HEALTH_RESULT_FAIL)
assert (responseV4.incidentCount == 1)
assert (responseV4.incidents[0].entityInfo.entityGroupId == dcgm_fields.DCGM_FE_GPU)
assert (responseV4.incidents[0].entityInfo.entityId == gpuId)
assert (responseV4.incidents[0].system == dcgm_structs.DCGM_HEALTH_WATCH_MEM)
assert (responseV4.incidents[0].health == dcgm_structs.DCGM_HEALTH_RESULT_FAIL)
def helper_reset_page_retirements(handle, gpuId=0, reset_sbe=False):
"""
Helper function to reset non volatile page retirements.
"""
ret = dcgm_field_injection_helpers.inject_field_value_i64(handle, gpuId, dcgm_fields.DCGM_FI_DEV_RETIRED_DBE,
0, -30) # set the injected data to 30 seconds ago
assert (ret == dcgm_structs.DCGM_ST_OK)
if reset_sbe:
ret = dcgm_field_injection_helpers.inject_field_value_i64(handle, gpuId, dcgm_fields.DCGM_FI_DEV_RETIRED_SBE,
0, -30) # set the injected data to 30 seconds ago
assert (ret == dcgm_structs.DCGM_ST_OK)
def helper_test_dcgm_health_check_mem_retirements(handle, gpuIds):
"""
Verifies that the health check will fail when the number of non-volatile page retirements
match the failure criteria.
Specifically tests the criteria given in DCGM-458.
"""
handleObj = pydcgm.DcgmHandle(handle=handle)
systemObj = handleObj.GetSystem()
groupObj = systemObj.GetEmptyGroup("test1")
groupObj.AddGpu(gpuIds[0])
gpuIds = groupObj.GetGpuIds() #Limit gpuIds to GPUs in our group
gpuId = gpuIds[0]
newSystems = dcgm_structs.DCGM_HEALTH_WATCH_MEM
groupObj.health.Set(newSystems)
skip_test_if_unhealthy(groupObj)
####### Tests #######
#### Condition 1 ####
### Fail if the total number of page retirements (due to DBE or SBE) meets or exceeds 60
## Test 1: >= 60 page retirements total should fail
ret = dcgm_field_injection_helpers.inject_field_value_i64(handle, gpuId, dcgm_fields.DCGM_FI_DEV_RETIRED_DBE,
30, -30) # set the injected data to 30 seconds ago
assert (ret == dcgm_structs.DCGM_ST_OK)
ret = dcgm_field_injection_helpers.inject_field_value_i64(handle, gpuId, dcgm_fields.DCGM_FI_DEV_RETIRED_SBE,
30, -30) # set the injected data to 30 seconds ago
assert (ret == dcgm_structs.DCGM_ST_OK)
# Make sure we get a failure
helper_verify_dcgm_health_watch_mem_result(groupObj, dcgm_errors.DCGM_FR_RETIRED_PAGES_LIMIT, verifyFail=True,
gpuId=gpuId)
# Reset the field and verify clean result
helper_reset_page_retirements(handle, gpuId=gpuId, reset_sbe=True)
helper_verify_dcgm_health_watch_mem_result(groupObj, 0, gpuId=gpuId)
## Test 2: 59 page retirements total should pass
ret = dcgm_field_injection_helpers.inject_field_value_i64(handle, gpuId, dcgm_fields.DCGM_FI_DEV_RETIRED_DBE,
10, -30) # set the injected data to 30 seconds ago
assert (ret == dcgm_structs.DCGM_ST_OK)
ret = dcgm_field_injection_helpers.inject_field_value_i64(handle, gpuId, dcgm_fields.DCGM_FI_DEV_RETIRED_SBE,
49, -30) # set the injected data to 30 seconds ago
assert (ret == dcgm_structs.DCGM_ST_OK)
# Make sure we pass
helper_verify_dcgm_health_watch_mem_result(groupObj, 0, gpuId=gpuId)
# Reset the field and verify clean result
helper_reset_page_retirements(handle, gpuId=gpuId, reset_sbe=True)
helper_verify_dcgm_health_watch_mem_result(groupObj, 0, gpuId=gpuId)
#### Condition 2 ####
### Fail if > 15 page retirement due to DBEs AND more than 1 DBE page retirement in past week
## Test 1: 15 page retirements due to DBEs should pass
ret = dcgm_field_injection_helpers.inject_field_value_i64(handle, gpuId, dcgm_fields.DCGM_FI_DEV_RETIRED_DBE,
15, -30) # set the injected data to 30 seconds ago
assert (ret == dcgm_structs.DCGM_ST_OK)
# Make sure we pass
helper_verify_dcgm_health_watch_mem_result(groupObj, 0, gpuId=gpuId)
# Reset the field and verify clean result
helper_reset_page_retirements(handle, gpuId=gpuId)
helper_verify_dcgm_health_watch_mem_result(groupObj, 0, gpuId=gpuId)
## Test 2: 16 page retirements due to DBE should fail (since all 16 are inserted in current week)
ret = dcgm_field_injection_helpers.inject_field_value_i64(handle, gpuId, dcgm_fields.DCGM_FI_DEV_RETIRED_DBE,
16, -30) # set the injected data to 30 seconds ago
assert (ret == dcgm_structs.DCGM_ST_OK)
# Make sure we get a failure
helper_verify_dcgm_health_watch_mem_result(groupObj, dcgm_errors.DCGM_FR_RETIRED_PAGES_DBE_LIMIT,
verifyFail=True, gpuId=gpuId)
# Reset the field and verify clean result
helper_reset_page_retirements(handle, gpuId=gpuId)
helper_verify_dcgm_health_watch_mem_result(groupObj, 0, gpuId=gpuId)
## Test 3: 16 page retirements due to SBEs should pass
ret = dcgm_field_injection_helpers.inject_field_value_i64(handle, gpuId, dcgm_fields.DCGM_FI_DEV_RETIRED_SBE,
16, -30) # set the injected data to 30 seconds ago
assert (ret == dcgm_structs.DCGM_ST_OK)
# Make sure we pass
helper_verify_dcgm_health_watch_mem_result(groupObj, 0, gpuId=gpuId)
# Reset the field and verify clean result
helper_reset_page_retirements(handle, gpuId=gpuId, reset_sbe=True)
helper_verify_dcgm_health_watch_mem_result(groupObj, 0, gpuId=gpuId)
## Test 4: 16 page retirements due to DBEs (with first 15 pages inserted more than 1 week ago,
# and 16th page inserted in current week) should pass
ret = dcgm_field_injection_helpers.inject_field_value_i64(handle, gpuId, dcgm_fields.DCGM_FI_DEV_RETIRED_DBE,
15, -604860) # set the injected data to 7 days and 1 minute ago
assert (ret == dcgm_structs.DCGM_ST_OK)
ret = dcgm_field_injection_helpers.inject_field_value_i64(handle, gpuId, dcgm_fields.DCGM_FI_DEV_RETIRED_DBE,
1, -30) # set the injected data to 30 seconds ago
assert (ret == dcgm_structs.DCGM_ST_OK)
# Make sure we pass
helper_verify_dcgm_health_watch_mem_result(groupObj, 0, gpuId=gpuId)
# Reset the field and verify clean result
helper_reset_page_retirements(handle, gpuId=gpuId)
helper_verify_dcgm_health_watch_mem_result(groupObj, 0, gpuId=gpuId)
@test_utils.run_with_standalone_host_engine(120)
@test_utils.run_with_initialized_client()
@test_utils.run_with_injection_gpus()
def test_dcgm_health_check_mem_retirements_standalone(handle, gpuIds):
helper_test_dcgm_health_check_mem_retirements(handle, gpuIds)
@test_utils.run_with_embedded_host_engine()
@test_utils.run_with_injection_gpus()
def test_dcgm_health_check_mem_retirements_embedded(handle, gpuIds):
helper_test_dcgm_health_check_mem_retirements(handle, gpuIds)
def helper_test_dcgm_health_check_mem(handle, gpuIds):
"""
Verifies that a check error occurs when an error is injected
Checks for call errors are done in the bindings except dcgmClientHealthCheck
"""
handleObj = pydcgm.DcgmHandle(handle=handle)
systemObj = handleObj.GetSystem()
groupObj = systemObj.GetEmptyGroup("test1")
groupObj.AddGpu(gpuIds[0])
gpuIds = groupObj.GetGpuIds() #Limit gpuIds to GPUs in our group
gpuId = gpuIds[0]
newSystems = dcgm_structs.DCGM_HEALTH_WATCH_MEM
groupObj.health.Set(newSystems)
skip_test_if_unhealthy(groupObj)
ret = dcgm_field_injection_helpers.inject_field_value_i64(handle, gpuId, dcgm_fields.DCGM_FI_DEV_RETIRED_PENDING,
0, -50)
assert (ret == dcgm_structs.DCGM_ST_OK)
ret = dcgm_field_injection_helpers.inject_field_value_i64(handle, gpuId, dcgm_fields.DCGM_FI_DEV_RETIRED_PENDING,
100, -40)
assert (ret == dcgm_structs.DCGM_ST_OK)
responseV4 = groupObj.health.Check(dcgm_structs.dcgmHealthResponse_version4)
assert (responseV4.incidentCount == 1), "Expected 1 incident but found %d" % responseV4.incidentCount
assert (responseV4.incidents[0].entityInfo.entityGroupId == dcgm_fields.DCGM_FE_GPU)
assert (responseV4.incidents[0].entityInfo.entityId == gpuId)
assert (responseV4.incidents[0].system == dcgm_structs.DCGM_HEALTH_WATCH_MEM)
assert (responseV4.incidents[0].error.code == dcgm_errors.DCGM_FR_PENDING_PAGE_RETIREMENTS),\
"Expected %d but found %d" % (dcgm_errors.DCGM_FR_PENDING_PAGE_RETIREMENTS, \
responseV4.incidents[0].error.code)
assert (responseV4.incidents[0].health == dcgm_structs.DCGM_HEALTH_RESULT_WARN),\
"Expected warning but found %d" % responseV4.incidents[0].health
# Clear the error
ret = dcgm_field_injection_helpers.inject_field_value_i64(handle, gpuId, dcgm_fields.DCGM_FI_DEV_RETIRED_PENDING,
0, -35)
assert (ret == dcgm_structs.DCGM_ST_OK)
# Make sure we've set the monitor frequency to less than 35 seconds - that will make us around
# half or less of the 60 seconds we give the data before calling it stale.
cmFieldInfo = dcgm_agent_internal.dcgmGetCacheManagerFieldInfo(handle, gpuId, dcgm_fields.DCGM_FE_GPU, dcgm_fields.DCGM_FI_DEV_RETIRED_PENDING)
assert cmFieldInfo.monitorIntervalUsec < 35000000
@test_utils.run_with_standalone_host_engine(120)
@test_utils.run_with_initialized_client()
@test_utils.run_with_injection_gpus()
def test_dcgm_health_check_mem_standalone(handle, gpuIds):
helper_test_dcgm_health_check_mem(handle, gpuIds)
@test_utils.run_with_embedded_host_engine()
@test_utils.run_with_injection_gpus()
def test_dcgm_health_check_mem_embedded(handle, gpuIds):
helper_test_dcgm_health_check_mem(handle, gpuIds)
@test_utils.run_with_standalone_host_engine(20)
@test_utils.run_with_initialized_client()
def test_dcgm_standalone_health_set_thermal(handle):
"""
Verifies that the set/get path for the health monitor is working
Checks for call errors are done in the bindings
"""
handleObj = pydcgm.DcgmHandle(handle=handle)
systemObj = handleObj.GetSystem()
groupObj = systemObj.GetDefaultGroup()
groupObj.health.Set(0)
currentSystems = groupObj.health.Get()
assert (currentSystems == 0)
newSystems = currentSystems | dcgm_structs.DCGM_HEALTH_WATCH_THERMAL
groupObj.health.Set(newSystems)
currentSystems = groupObj.health.Get()
assert (currentSystems == newSystems)
@test_utils.run_with_standalone_host_engine(120)
@test_utils.run_with_initialized_client()
@test_utils.run_with_injection_gpus()
def test_dcgm_standalone_health_check_thermal(handle, gpuIds):
"""
Verifies that a check error occurs when an error is injected
Checks for call errors are done in the bindings except dcgmClientHealthCheck
"""
handleObj = pydcgm.DcgmHandle(handle=handle)
systemObj = handleObj.GetSystem()
groupObj = systemObj.GetEmptyGroup("test1")
groupObj.AddGpu(gpuIds[0])
gpuIds = groupObj.GetGpuIds() #Limit gpuIds to GPUs in our group
newSystems = dcgm_structs.DCGM_HEALTH_WATCH_THERMAL
groupObj.health.Set(newSystems)
skip_test_if_unhealthy(groupObj)
ret = dcgm_field_injection_helpers.inject_field_value_i64(handle, gpuIds[0],
dcgm_fields.DCGM_FI_DEV_THERMAL_VIOLATION, 0, -50)
assert (ret == dcgm_structs.DCGM_ST_OK)
responseV4 = groupObj.health.Check(dcgm_structs.dcgmHealthResponse_version4)
# we expect that there will be no data here
#assert (dcgm_structs.DCGM_ST_OK == result or dcgm_structs.DCGM_ST_NO_DATA == result)
# inject an error into thermal
ret = dcgm_field_injection_helpers.inject_field_value_i64(handle, gpuIds[0],
dcgm_fields.DCGM_FI_DEV_THERMAL_VIOLATION, 1000, 10)
assert (ret == dcgm_structs.DCGM_ST_OK)
responseV4 = groupObj.health.Check(dcgm_structs.dcgmHealthResponse_version4)
assert (responseV4.incidentCount == 1)
assert (responseV4.incidents[0].entityInfo.entityGroupId == dcgm_fields.DCGM_FE_GPU)
assert (responseV4.incidents[0].entityInfo.entityId == gpuIds[0])
assert (responseV4.incidents[0].system == dcgm_structs.DCGM_HEALTH_WATCH_THERMAL)
assert (responseV4.incidents[0].error.code == dcgm_errors.DCGM_FR_CLOCK_THROTTLE_THERMAL)
@test_utils.run_with_standalone_host_engine(20)
@test_utils.run_with_initialized_client()
def test_dcgm_standalone_health_set_power(handle):
"""
Verifies that the set/get path for the health monitor is working
Checks for call errors are done in the bindings
"""
handleObj = pydcgm.DcgmHandle(handle=handle)
systemObj = handleObj.GetSystem()
groupObj = systemObj.GetDefaultGroup()
groupObj.health.Set(0)
currentSystems = groupObj.health.Get()
assert (currentSystems == 0)
newSystems = currentSystems | dcgm_structs.DCGM_HEALTH_WATCH_POWER
groupObj.health.Set(newSystems)
currentSystems = groupObj.health.Get()
assert (currentSystems == newSystems)
def helper_check_health_response_v4(gpuIds, response):
numErrors = 0
if response.version == 0:
numErrors += 1
logger.error("bad response.version x%X" % response.version)
if response.overallHealth != dcgm_structs.DCGM_HEALTH_RESULT_PASS:
numErrors += 1
logger.error("bad response.overallHealth %d. Are these GPUs really healthy?" % response.overallHealth)
test_utils.skip_test("bad response.overallHealth %d. Are these GPUs really healthy?" % response.overallHealth)
if response.incidentCount > 0:
numErrors += 1
logger.error("bad response.incidentCount %d > 0" % (response.incidentCount))
assert numErrors == 0, "Errors were encountered. See above."
def helper_run_dcgm_health_check_sanity(handle, gpuIds, system_to_check):
"""
Verifies that the DCGM health checks return healthy for all GPUs on live systems.
"""
handleObj = pydcgm.DcgmHandle(handle=handle)
systemObj = handleObj.GetSystem()
groupObj = systemObj.GetGroupWithGpuIds('testgroup', gpuIds)
groupObj.health.Set(system_to_check)
systemObj.UpdateAllFields(1)
#This will throw an exception on error
responseV4 = groupObj.health.Check(dcgm_structs.dcgmHealthResponse_version4)
#Check that our response comes back clean
helper_check_health_response_v4(gpuIds, responseV4)
################ Start health sanity checks
# The health sanity checks verify that that the DCGM health checks return healthy for all GPUs on live systems.
# Note: These tests can fail if a GPU is really unhealthy. We should give detailed feedback so that this is attributed
# to the GPU and not the test
@test_utils.run_with_embedded_host_engine()
@test_utils.run_only_with_live_gpus()
def test_dcgm_health_check_sanity_pcie(handle, gpuIds):
helper_run_dcgm_health_check_sanity(handle, gpuIds, dcgm_structs.DCGM_HEALTH_WATCH_PCIE)
@test_utils.run_with_standalone_host_engine()
@test_utils.run_with_initialized_client()
@test_utils.run_only_with_live_gpus()
def test_dcgm_health_check_sanity_pcie_standalone(handle, gpuIds):
helper_run_dcgm_health_check_sanity(handle, gpuIds, dcgm_structs.DCGM_HEALTH_WATCH_PCIE)
@test_utils.run_with_embedded_host_engine()
@test_utils.run_only_with_live_gpus()
def test_dcgm_health_check_sanity_mem(handle, gpuIds):
helper_run_dcgm_health_check_sanity(handle, gpuIds, dcgm_structs.DCGM_HEALTH_WATCH_MEM)
@test_utils.run_with_standalone_host_engine()
@test_utils.run_with_initialized_client()
@test_utils.run_only_with_live_gpus()
def test_dcgm_health_check_sanity_mem_standalone(handle, gpuIds):
helper_run_dcgm_health_check_sanity(handle, gpuIds, dcgm_structs.DCGM_HEALTH_WATCH_MEM)
@test_utils.run_with_embedded_host_engine()
@test_utils.run_only_with_live_gpus()
def test_dcgm_health_check_sanity_inforom(handle, gpuIds):
helper_run_dcgm_health_check_sanity(handle, gpuIds, dcgm_structs.DCGM_HEALTH_WATCH_INFOROM)
@test_utils.run_with_standalone_host_engine()
@test_utils.run_with_initialized_client()
@test_utils.run_only_with_live_gpus()
def test_dcgm_health_check_sanity_inforom_standalone(handle, gpuIds):
helper_run_dcgm_health_check_sanity(handle, gpuIds, dcgm_structs.DCGM_HEALTH_WATCH_INFOROM)
@test_utils.run_with_embedded_host_engine()
@test_utils.run_only_with_live_gpus()
def test_dcgm_health_check_sanity_thermal(handle, gpuIds):
helper_run_dcgm_health_check_sanity(handle, gpuIds, dcgm_structs.DCGM_HEALTH_WATCH_THERMAL)
@test_utils.run_with_standalone_host_engine()
@test_utils.run_with_initialized_client()
@test_utils.run_only_with_live_gpus()
def test_dcgm_health_check_sanity_thermal_standalone(handle, gpuIds):
helper_run_dcgm_health_check_sanity(handle, gpuIds, dcgm_structs.DCGM_HEALTH_WATCH_THERMAL)
@test_utils.run_with_embedded_host_engine()
@test_utils.run_only_with_live_gpus()
def test_dcgm_health_check_sanity_power(handle, gpuIds):
helper_run_dcgm_health_check_sanity(handle, gpuIds, dcgm_structs.DCGM_HEALTH_WATCH_POWER)
@test_utils.run_with_standalone_host_engine()
@test_utils.run_with_initialized_client()
@test_utils.run_only_with_live_gpus()
def test_dcgm_health_check_sanity_power_standalone(handle, gpuIds):
helper_run_dcgm_health_check_sanity(handle, gpuIds, dcgm_structs.DCGM_HEALTH_WATCH_POWER)
@test_utils.run_with_embedded_host_engine()
@test_utils.run_only_with_live_gpus()
def test_dcgm_health_check_sanity_nvlink(handle, gpuIds):
#We will get false failures if any nvlinks are down on the GPUs
test_utils.skip_test_if_any_nvlinks_down(handle)
helper_run_dcgm_health_check_sanity(handle, gpuIds, dcgm_structs.DCGM_HEALTH_WATCH_NVLINK)
@test_utils.run_with_standalone_host_engine()
@test_utils.run_with_initialized_client()
@test_utils.run_only_with_live_gpus()
def test_dcgm_health_check_sanity_nvlink_standalone(handle, gpuIds):
#We will get false failures if any nvlinks are down on the GPUs
test_utils.skip_test_if_any_nvlinks_down(handle)
helper_run_dcgm_health_check_sanity(handle, gpuIds, dcgm_structs.DCGM_HEALTH_WATCH_NVLINK)
@test_utils.run_with_embedded_host_engine()
@test_utils.run_only_with_live_gpus()
def test_dcgm_health_check_sanity_nvswitch_nonfatal(handle, gpuIds):
helper_run_dcgm_health_check_sanity(handle, gpuIds, dcgm_structs.DCGM_HEALTH_WATCH_NVSWITCH_NONFATAL)
@test_utils.run_with_standalone_host_engine()
@test_utils.run_with_initialized_client()
@test_utils.run_only_with_live_gpus()
def test_dcgm_health_check_sanity_nvswitch_nonfatal_standalone(handle, gpuIds):
helper_run_dcgm_health_check_sanity(handle, gpuIds, dcgm_structs.DCGM_HEALTH_WATCH_NVSWITCH_NONFATAL)
@test_utils.run_with_embedded_host_engine()
@test_utils.run_only_with_live_gpus()
def test_dcgm_health_check_sanity_nvswitch_fatal(handle, gpuIds):
helper_run_dcgm_health_check_sanity(handle, gpuIds, dcgm_structs.DCGM_HEALTH_WATCH_NVSWITCH_FATAL)
@test_utils.run_with_standalone_host_engine()
@test_utils.run_with_initialized_client()
@test_utils.run_only_with_live_gpus()
def test_dcgm_health_check_sanity_nvswitch_fatal_standalone(handle, gpuIds):
helper_run_dcgm_health_check_sanity(handle, gpuIds, dcgm_structs.DCGM_HEALTH_WATCH_NVSWITCH_FATAL)
################ End health sanity checks
@test_utils.run_with_standalone_host_engine(120)
@test_utils.run_with_initialized_client()
@test_utils.run_with_injection_gpus()
def test_dcgm_standalone_health_check_power(handle, gpuIds):
"""
Verifies that a check error occurs when an error is injected
Checks for call errors are done in the bindings except dcgmClientHealthCheck
"""
handleObj = pydcgm.DcgmHandle(handle=handle)
systemObj = handleObj.GetSystem()
groupObj = systemObj.GetEmptyGroup("test1")
groupObj.AddGpu(gpuIds[0])
gpuIds = groupObj.GetGpuIds() #Limit gpuIds to GPUs in our group
gpuId = gpuIds[0]
newSystems = dcgm_structs.DCGM_HEALTH_WATCH_POWER
groupObj.health.Set(newSystems)
ret = dcgm_field_injection_helpers.inject_field_value_i64(handle, gpuId, dcgm_fields.DCGM_FI_DEV_POWER_VIOLATION,
0, -50)
assert (ret == dcgm_structs.DCGM_ST_OK)
skip_test_if_unhealthy(groupObj)
responseV4 = groupObj.health.Check(dcgm_structs.dcgmHealthResponse_version4)
# we expect that there will be no data here
# inject an error into power
ret = dcgm_field_injection_helpers.inject_field_value_i64(handle, gpuId, dcgm_fields.DCGM_FI_DEV_POWER_VIOLATION,
1000, 10)
assert (ret == dcgm_structs.DCGM_ST_OK)
responseV4 = groupObj.health.Check(dcgm_structs.dcgmHealthResponse_version4)
assert (responseV4.incidentCount == 1)
assert (responseV4.incidents[0].entityInfo.entityGroupId == dcgm_fields.DCGM_FE_GPU)
assert (responseV4.incidents[0].entityInfo.entityId == gpuIds[0])
assert (responseV4.incidents[0].system == dcgm_structs.DCGM_HEALTH_WATCH_POWER)
assert (responseV4.incidents[0].error.code == dcgm_errors.DCGM_FR_CLOCK_THROTTLE_POWER)
@test_utils.run_with_standalone_host_engine(120)
@test_utils.run_with_initialized_client()
@test_utils.run_with_injection_gpus()
def test_dcgm_standalone_health_check_nvlink(handle, gpuIds):
helper_health_check_nvlink_error_counters(handle, gpuIds)
@test_utils.run_with_embedded_host_engine()
@test_utils.run_with_injection_gpus()
def test_dcgm_embedded_health_check_nvlink(handle, gpuIds):
helper_health_check_nvlink_error_counters(handle, gpuIds)
@test_utils.run_with_standalone_host_engine(20)
@test_utils.run_with_initialized_client()
def test_dcgm_standalone_health_set_nvlink(handle):
"""
Verifies that the set/get path for the health monitor is working
Checks for call errors are done in the bindings
"""
handleObj = pydcgm.DcgmHandle(handle=handle)
systemObj = handleObj.GetSystem()
groupObj = systemObj.GetDefaultGroup()
groupObj.health.Set(0)
currentSystems = groupObj.health.Get()
assert (currentSystems == 0)
newSystems = currentSystems | dcgm_structs.DCGM_HEALTH_WATCH_NVLINK
groupObj.health.Set(newSystems)
currentSystems = groupObj.health.Get()
assert (currentSystems == newSystems)
def helper_health_check_nvlink_error_counters(handle, gpuIds):
"""
Verifies that a check error occurs when an error is injected
Checks for call errors are done in the bindings except dcgmClientHealthCheck
"""
#We will get false failures if any nvlinks are down on the GPUs
test_utils.skip_test_if_any_nvlinks_down(handle)
handleObj = pydcgm.DcgmHandle(handle=handle)
systemObj = handleObj.GetSystem()
groupObj = systemObj.GetEmptyGroup("test1")
groupObj.AddGpu(gpuIds[0])
gpuIds = groupObj.GetGpuIds() #Limit gpuIds to GPUs in our group
gpuId = gpuIds[0]
newSystems = dcgm_structs.DCGM_HEALTH_WATCH_NVLINK
groupObj.health.Set(newSystems)
ret = dcgm_field_injection_helpers.inject_field_value_i64(handle, gpuId,
dcgm_fields.DCGM_FI_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_TOTAL,
0, -50)
assert (ret == dcgm_structs.DCGM_ST_OK)
responseV4 = groupObj.health.Check(dcgm_structs.dcgmHealthResponse_version4)
ret = dcgm_field_injection_helpers.inject_field_value_i64(handle, gpuId,
dcgm_fields.DCGM_FI_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_TOTAL,
0, -50)
assert (ret == dcgm_structs.DCGM_ST_OK)
# we expect that there will be no data here
# inject an error into NV Link
ret = dcgm_field_injection_helpers.inject_field_value_i64(handle, gpuId,
dcgm_fields.DCGM_FI_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_TOTAL,
100, 10)
assert (ret == dcgm_structs.DCGM_ST_OK)
responseV4 = groupObj.health.Check(dcgm_structs.dcgmHealthResponse_version4)
assert (responseV4.incidentCount == 1)
assert (responseV4.incidents[0].entityInfo.entityGroupId == dcgm_fields.DCGM_FE_GPU)
assert (responseV4.incidents[0].entityInfo.entityId == gpuId)
assert (responseV4.incidents[0].system == dcgm_structs.DCGM_HEALTH_WATCH_NVLINK)
assert (responseV4.incidents[0].error.code == dcgm_errors.DCGM_FR_NVLINK_ERROR_THRESHOLD)
assert (responseV4.incidents[0].health == dcgm_structs.DCGM_HEALTH_RESULT_WARN)
def helper_nvlink_check_fatal_errors(handle, gpuIds):
test_utils.skip_test_if_any_nvlinks_down(handle)
handleObj = pydcgm.DcgmHandle(handle=handle)
systemObj = handleObj.GetSystem()
groupObj = systemObj.GetEmptyGroup("test1")
groupObj.AddGpu(gpuIds[0])
gpuIds = groupObj.GetGpuIds() #Limit gpuIds to GPUs in our group
gpuId = gpuIds[0]
newSystems = dcgm_structs.DCGM_HEALTH_WATCH_NVLINK
groupObj.health.Set(newSystems)
ret = dcgm_field_injection_helpers.inject_field_value_i64(handle, gpuId,
dcgm_fields.DCGM_FI_DEV_NVLINK_RECOVERY_ERROR_COUNT_TOTAL,
0, -50)
assert (ret == dcgm_structs.DCGM_ST_OK)
responseV4 = groupObj.health.Check(dcgm_structs.dcgmHealthResponse_version4)
ret = dcgm_field_injection_helpers.inject_field_value_i64(handle, gpuId,
dcgm_fields.DCGM_FI_DEV_NVLINK_RECOVERY_ERROR_COUNT_TOTAL,
1, -50)
assert (ret == dcgm_structs.DCGM_ST_OK)
responseV4 = groupObj.health.Check(dcgm_structs.dcgmHealthResponse_version4)
assert (responseV4.overallHealth == dcgm_structs.DCGM_HEALTH_RESULT_FAIL)
assert (responseV4.incidentCount == 1)
assert (responseV4.incidents[0].entityInfo.entityGroupId == dcgm_fields.DCGM_FE_GPU)
assert (responseV4.incidents[0].entityInfo.entityId == gpuId)
assert (responseV4.incidents[0].system == dcgm_structs.DCGM_HEALTH_WATCH_NVLINK)
assert (responseV4.incidents[0].error.code == dcgm_errors.DCGM_FR_NVLINK_ERROR_CRITICAL)
assert (responseV4.incidents[0].health == dcgm_structs.DCGM_HEALTH_RESULT_FAIL)
@test_utils.run_with_standalone_host_engine(120)
@test_utils.run_with_initialized_client()
@test_utils.run_with_injection_gpus()
def test_dcgm_standalone_nvlink_fatal(handle, gpuIds):
helper_nvlink_check_fatal_errors(handle, gpuIds)
@test_utils.run_with_embedded_host_engine()
@test_utils.run_with_injection_gpus()
def test_dcgm_embedded_nvlink_fatal(handle, gpuIds):
helper_nvlink_check_fatal_errors(handle, gpuIds)
def helper_nvlink_crc_fatal_threshold(handle, gpuIds):
test_utils.skip_test_if_any_nvlinks_down(handle)
handleObj = pydcgm.DcgmHandle(handle=handle)
systemObj = handleObj.GetSystem()
groupObj = systemObj.GetEmptyGroup("test1")
groupObj.AddGpu(gpuIds[0])
gpuIds = groupObj.GetGpuIds() #Limit gpuIds to GPUs in our group
gpuId = gpuIds[0]
newSystems = dcgm_structs.DCGM_HEALTH_WATCH_NVLINK
groupObj.health.Set(newSystems)
ret = dcgm_field_injection_helpers.inject_field_value_i64(handle, gpuId,
dcgm_fields.DCGM_FI_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_TOTAL,
0, -50)
assert (ret == dcgm_structs.DCGM_ST_OK)
responseV4 = groupObj.health.Check(dcgm_structs.dcgmHealthResponse_version4)
# Trigger a failure by having more than 100 CRC errors per second
ret = dcgm_field_injection_helpers.inject_field_value_i64(handle, gpuId,
dcgm_fields.DCGM_FI_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_TOTAL,
1000000, -20)
assert (ret == dcgm_structs.DCGM_ST_OK)
responseV4 = groupObj.health.Check(dcgm_structs.dcgmHealthResponse_version4)
assert (responseV4.overallHealth == dcgm_structs.DCGM_HEALTH_RESULT_FAIL)
assert (responseV4.incidentCount == 1)
assert (responseV4.incidents[0].entityInfo.entityGroupId == dcgm_fields.DCGM_FE_GPU)
assert (responseV4.incidents[0].entityInfo.entityId == gpuId)
assert (responseV4.incidents[0].system == dcgm_structs.DCGM_HEALTH_WATCH_NVLINK)
assert (responseV4.incidents[0].error.code == dcgm_errors.DCGM_FR_NVLINK_CRC_ERROR_THRESHOLD)
assert (responseV4.incidents[0].health == dcgm_structs.DCGM_HEALTH_RESULT_FAIL)
@test_utils.run_with_standalone_host_engine(120)
@test_utils.run_with_initialized_client()
@test_utils.run_with_injection_gpus()
def test_dcgm_standalone_nvlink_crc_threshold(handle, gpuIds):
helper_nvlink_crc_fatal_threshold(handle, gpuIds)
@test_utils.run_with_embedded_host_engine()
@test_utils.run_with_injection_gpus()
def test_dcgm_embedded_nvlink_crc_threshold(handle, gpuIds):
helper_nvlink_crc_fatal_threshold(handle, gpuIds)
@test_utils.run_with_standalone_host_engine(120)
@test_utils.run_with_initialized_client()
@test_utils.run_only_with_live_gpus()
def test_dcgm_standalone_health_large_groupid(handle, gpuIds):
"""
Verifies that a health check can run on a large groupId
This verifies the fix for bug 1868821
"""
handleObj = pydcgm.DcgmHandle(handle=handle)
systemObj = handleObj.GetSystem()
#Make a bunch of groups and delete them right away so our next groupId is large
for i in range(100):
groupObj = systemObj.GetEmptyGroup("test_group_%d" % i)
groupObj.Delete()
groupObj = systemObj.GetEmptyGroup("test_good_group")
groupObj.AddGpu(gpuIds[0])
groupId = groupObj.GetId().value
assert groupId >= 100, "Expected groupId > 100. got %d" % groupObj.GetId()
newSystems = dcgm_structs.DCGM_HEALTH_WATCH_ALL
#Any of these will throw an exception on error. Making it past these = success
groupObj.health.Set(newSystems)
systemObj.UpdateAllFields(True)
groupObj.health.Get()
groupObj.health.Check()
def helper_health_check_nvswitch_errors(handle, switchIds, fieldId, healthSystem, healthResult, errorCode):
"""
Verifies that a check error occurs when an error is injected
Checks for call errors are done in the bindings except dcgmClientHealthCheck
"""
#This test will fail if any NvLinks are down
test_utils.skip_test_if_any_nvlinks_down(handle)
handleObj = pydcgm.DcgmHandle(handle=handle)
systemObj = handleObj.GetSystem()
groupObj = systemObj.GetEmptyGroup("test1")
switchId = switchIds[0]
groupObj.AddEntity(dcgm_fields.DCGM_FE_SWITCH, switchId)
newSystems = healthSystem
groupObj.health.Set(newSystems)
field = dcgm_structs_internal.c_dcgmInjectFieldValue_v1()
field.version = dcgm_structs_internal.dcgmInjectFieldValue_version1
field.fieldId = fieldId
field.status = 0
field.fieldType = ord(dcgm_fields.DCGM_FT_INT64)
field.ts = int((time.time()-5) * 1000000.0)
field.value.i64 = 0
ret = dcgm_agent_internal.dcgmInjectEntityFieldValue(handle, dcgm_fields.DCGM_FE_SWITCH,
switchId, field)
assert (ret == dcgm_structs.DCGM_ST_OK)
responseV4 = groupObj.health.Check(dcgm_structs.dcgmHealthResponse_version4)
field.ts = int((time.time()-50) * 1000000.0)
field.value.i64 = 0
ret = dcgm_agent_internal.dcgmInjectEntityFieldValue(handle, dcgm_fields.DCGM_FE_SWITCH,
switchId, field)
assert (ret == dcgm_structs.DCGM_ST_OK)
# we expect that there will be no data here
# inject an error into NvSwitch
field.ts = int((time.time() - 1) * 1000000.0) # set the injected data for a second ago
field.value.i64 = 5
ret = dcgm_agent_internal.dcgmInjectEntityFieldValue(handle, dcgm_fields.DCGM_FE_SWITCH,
switchId, field)
assert (ret == dcgm_structs.DCGM_ST_OK)
responseV4 = groupObj.health.Check(dcgm_structs.dcgmHealthResponse_version4)
assert (responseV4.incidentCount == 1)
assert (responseV4.incidents[0].entityInfo.entityGroupId == dcgm_fields.DCGM_FE_SWITCH)
assert (responseV4.incidents[0].entityInfo.entityId == switchId)
assert (responseV4.incidents[0].health == healthResult)
assert (responseV4.incidents[0].system == healthSystem)
assert (responseV4.incidents[0].error.code == errorCode)
@test_utils.run_with_standalone_host_engine(120)
@test_utils.run_with_initialized_client()
@test_utils.run_with_injection_nvswitches()
def test_health_check_nvswitch_fatal_errors_standalone(handle, switchIds):
helper_health_check_nvswitch_errors(handle, switchIds,
dcgm_fields.DCGM_FI_DEV_NVSWITCH_FATAL_ERRORS,
dcgm_structs.DCGM_HEALTH_WATCH_NVSWITCH_FATAL,
dcgm_structs.DCGM_HEALTH_RESULT_FAIL,
dcgm_errors.DCGM_FR_NVSWITCH_FATAL_ERROR)
@test_utils.run_with_embedded_host_engine()
@test_utils.run_with_injection_nvswitches()
def test_health_check_nvswitch_fatal_errors_embedded(handle, switchIds):
helper_health_check_nvswitch_errors(handle, switchIds,
dcgm_fields.DCGM_FI_DEV_NVSWITCH_FATAL_ERRORS,
dcgm_structs.DCGM_HEALTH_WATCH_NVSWITCH_FATAL,
dcgm_structs.DCGM_HEALTH_RESULT_FAIL,
dcgm_errors.DCGM_FR_NVSWITCH_FATAL_ERROR)
@test_utils.run_with_standalone_host_engine(120)
@test_utils.run_with_initialized_client()
@test_utils.run_with_injection_nvswitches()
def test_health_check_nvswitch_nonfatal_errors_standalone(handle, switchIds):
helper_health_check_nvswitch_errors(handle, switchIds,
dcgm_fields.DCGM_FI_DEV_NVSWITCH_NON_FATAL_ERRORS,
dcgm_structs.DCGM_HEALTH_WATCH_NVSWITCH_NONFATAL,
dcgm_structs.DCGM_HEALTH_RESULT_WARN,
dcgm_errors.DCGM_FR_NVSWITCH_NON_FATAL_ERROR)
@test_utils.run_with_embedded_host_engine()
@test_utils.run_with_injection_nvswitches()
def test_health_check_nvswitch_nonfatal_errors_embedded(handle, switchIds):
helper_health_check_nvswitch_errors(handle, switchIds,
dcgm_fields.DCGM_FI_DEV_NVSWITCH_NON_FATAL_ERRORS,
dcgm_structs.DCGM_HEALTH_WATCH_NVSWITCH_NONFATAL,
dcgm_structs.DCGM_HEALTH_RESULT_WARN,
dcgm_errors.DCGM_FR_NVSWITCH_NON_FATAL_ERROR)
def helper_health_check_nvlink_link_down_gpu(handle, gpuIds):
"""
Verifies that a check error occurs when a NvLink link is set to broken
"""
handleObj = pydcgm.DcgmHandle(handle=handle)
systemObj = handleObj.GetSystem()
groupObj = systemObj.GetEmptyGroup("test1")
groupObj.AddGpu(gpuIds[0])
gpuIds = groupObj.GetGpuIds() #Limit gpuIds to GPUs in our group
gpuId = gpuIds[0]
#Set all links of our injected GPU to Up
for linkId in range(dcgm_structs.DCGM_NVLINK_MAX_LINKS_PER_GPU):
dcgm_agent_internal.dcgmSetEntityNvLinkLinkState(handle, dcgm_fields.DCGM_FE_GPU, gpuId, linkId, dcgm_structs.DcgmNvLinkLinkStateUp)
newSystems = dcgm_structs.DCGM_HEALTH_WATCH_NVLINK
groupObj.health.Set(newSystems)
#By default, the health check should pass
responseV4 = groupObj.health.Check(dcgm_structs.dcgmHealthResponse_version4)
assert responseV4.incidentCount == 0, "Expected no errors. Got %d errors" % responseV4.incidentCount
#Set a link to Down
linkId = 3
dcgm_agent_internal.dcgmSetEntityNvLinkLinkState(handle, dcgm_fields.DCGM_FE_GPU, gpuId, linkId, dcgm_structs.DcgmNvLinkLinkStateDown)
responseV4 = groupObj.health.Check(dcgm_structs.dcgmHealthResponse_version4)
logger.info("Health String: " + responseV4.incidents[0].error.msg)
assert (responseV4.incidentCount == 1)
assert (responseV4.incidents[0].entityInfo.entityGroupId == dcgm_fields.DCGM_FE_GPU)
assert (responseV4.incidents[0].entityInfo.entityId == gpuId)
assert (responseV4.incidents[0].system == dcgm_structs.DCGM_HEALTH_WATCH_NVLINK)
assert (responseV4.incidents[0].error.code == dcgm_errors.DCGM_FR_NVLINK_DOWN)
assert str(linkId) in (responseV4.incidents[0].error.msg), "Didn't find linkId %d in %s" % (linkId, responseV4.incidents[0].error.msg)
@test_utils.run_with_standalone_host_engine(120)
@test_utils.run_with_initialized_client()
@test_utils.run_with_injection_gpus()
def test_health_check_nvlink_link_down_gpu_standalone(handle, gpuIds):
helper_health_check_nvlink_link_down_gpu(handle, gpuIds)
@test_utils.run_with_embedded_host_engine()
@test_utils.run_with_injection_gpus()
def test_health_check_nvlink_link_down_gpu_embedded(handle, gpuIds):
helper_health_check_nvlink_link_down_gpu(handle, gpuIds)
def helper_health_check_nvlink_link_down_nvswitch(handle, switchIds):
"""
Verifies that a check error occurs when a NvLink link is set to broken
"""
handleObj = pydcgm.DcgmHandle(handle=handle)
systemObj = handleObj.GetSystem()
groupObj = systemObj.GetEmptyGroup("test1")
switchId = switchIds[0]
groupObj.AddEntity(dcgm_fields.DCGM_FE_SWITCH, switchId)
linkId = 17
newSystems = dcgm_structs.DCGM_HEALTH_WATCH_NVSWITCH_FATAL
groupObj.health.Set(newSystems)
#By default, the health check should pass
responseV4 = groupObj.health.Check(dcgm_structs.dcgmHealthResponse_version4)
assert responseV4.incidentCount == 0, "Expected no errors. Got %d entities with errors: %s" % (responseV4.incidentCount, responseV4.incidents[0].error.msg)
#Set a link to Down
dcgm_agent_internal.dcgmSetEntityNvLinkLinkState(handle, dcgm_fields.DCGM_FE_SWITCH, switchId, linkId, dcgm_structs.DcgmNvLinkLinkStateDown)
responseV4 = groupObj.health.Check(dcgm_structs.dcgmHealthResponse_version4)
assert (responseV4.incidentCount == 1)
assert (responseV4.incidents[0].entityInfo.entityId == switchId)
assert (responseV4.incidents[0].entityInfo.entityGroupId == dcgm_fields.DCGM_FE_SWITCH)
assert (responseV4.incidents[0].system == dcgm_structs.DCGM_HEALTH_WATCH_NVSWITCH_FATAL)
assert (responseV4.incidents[0].error.code == dcgm_errors.DCGM_FR_NVLINK_DOWN)
assert str(linkId) in responseV4.incidents[0].error.msg, "Didn't find linkId %d in %s" % (linkId, responseV4.incidents[0].error.msg)
@test_utils.run_with_standalone_host_engine(120)
@test_utils.run_with_initialized_client()
@test_utils.run_with_injection_nvswitches()
def test_health_check_nvlink_link_down_nvswitch_standalone(handle, switchIds):
helper_health_check_nvlink_link_down_nvswitch(handle, switchIds)
@test_utils.run_with_embedded_host_engine()
@test_utils.run_with_injection_nvswitches()
def test_health_check_nvlink_link_down_nvswitch_embedded(handle, switchIds):
helper_health_check_nvlink_link_down_nvswitch(handle, switchIds)
def helper_health_check_multiple_failures(handle, gpuIds):
handleObj = pydcgm.DcgmHandle(handle=handle)
systemObj = handleObj.GetSystem()
groupObj = systemObj.GetEmptyGroup("test1")
groupObj.AddGpu(gpuIds[0])
gpuIds = groupObj.GetGpuIds() #Limit gpuIds to GPUs in our group
gpuId = gpuIds[0]
# We are going to trigger two failures at the same time
newSystems = dcgm_structs.DCGM_HEALTH_WATCH_PCIE | dcgm_structs.DCGM_HEALTH_WATCH_MEM
groupObj.health.Set(newSystems)
skip_test_if_unhealthy(groupObj)
ret = dcgm_field_injection_helpers.inject_field_value_i64(handle, gpuId, dcgm_fields.DCGM_FI_DEV_PCIE_REPLAY_COUNTER,
0, -50)
assert (ret == dcgm_structs.DCGM_ST_OK)
# inject a PCI error and a memory error, and make sure we report both
ret = dcgm_field_injection_helpers.inject_field_value_i64(handle, gpuId, dcgm_fields.DCGM_FI_DEV_ECC_DBE_VOL_TOTAL,
4, 100)
assert (ret == dcgm_structs.DCGM_ST_OK)
ret = dcgm_field_injection_helpers.inject_field_value_i64(handle, gpuId, dcgm_fields.DCGM_FI_DEV_PCIE_REPLAY_COUNTER,
100, 100)
assert (ret == dcgm_structs.DCGM_ST_OK)
responseV4 = groupObj.health.Check(dcgm_structs.dcgmHealthResponse_version4)
assert (responseV4.incidentCount == 2)
assert (responseV4.incidents[0].entityInfo.entityGroupId == dcgm_fields.DCGM_FE_GPU)
assert (responseV4.incidents[0].entityInfo.entityId == gpuId)
assert (responseV4.incidents[1].entityInfo.entityGroupId == dcgm_fields.DCGM_FE_GPU)
assert (responseV4.incidents[1].entityInfo.entityId == gpuId)
if responseV4.incidents[0].system == dcgm_structs.DCGM_HEALTH_WATCH_MEM:
# The memory error is in position 0 here
assert (responseV4.incidents[0].error.code == dcgm_errors.DCGM_FR_VOLATILE_DBE_DETECTED)
# PCIE error is in position 1 here
assert (responseV4.incidents[1].system == dcgm_structs.DCGM_HEALTH_WATCH_PCIE)
assert (responseV4.incidents[1].error.code == dcgm_errors.DCGM_FR_PCI_REPLAY_RATE)
else:
assert (responseV4.incidents[0].system == dcgm_structs.DCGM_HEALTH_WATCH_PCIE)
assert (responseV4.incidents[1].system == dcgm_structs.DCGM_HEALTH_WATCH_MEM)
# Mem is in position 1 now
assert (responseV4.incidents[1].error.code == dcgm_errors.DCGM_FR_VOLATILE_DBE_DETECTED)
assert (responseV4.incidents[0].error.code == dcgm_errors.DCGM_FR_PCI_REPLAY_RATE)
@test_utils.run_with_standalone_host_engine(120)
@test_utils.run_with_initialized_client()
@test_utils.run_with_injection_gpus()
def test_health_check_standalone_multiple_failures(handle, gpuIds):
helper_health_check_multiple_failures(handle, gpuIds)
@test_utils.run_with_embedded_host_engine()
@test_utils.run_with_injection_gpus()
def test_health_check_embedded_multiple_failures(handle, gpuIds):
helper_health_check_multiple_failures(handle, gpuIds)
def helper_health_check_unreadable_power_usage(handle, gpuIds):
handleObj = pydcgm.DcgmHandle(handle=handle)
systemObj = handleObj.GetSystem()
groupObj = systemObj.GetEmptyGroup("test1")
groupObj.AddGpu(gpuIds[0])
gpuIds = groupObj.GetGpuIds() #Limit gpuIds to GPUs in our group
gpuId = gpuIds[0]
newSystems = dcgm_structs.DCGM_HEALTH_WATCH_POWER
groupObj.health.Set(newSystems)
ret = dcgm_field_injection_helpers.inject_field_value_fp64(handle, gpuId, dcgm_fields.DCGM_FI_DEV_POWER_USAGE,
dcgmvalue.DCGM_FP64_BLANK, 50)
assert (ret == dcgm_structs.DCGM_ST_OK)
responseV4 = groupObj.health.Check(dcgm_structs.dcgmHealthResponse_version4)
assert (responseV4.incidentCount == 1)
assert (responseV4.incidents[0].entityInfo.entityId == gpuId)
assert (responseV4.incidents[0].entityInfo.entityGroupId == dcgm_fields.DCGM_FE_GPU)
assert (responseV4.incidents[0].system == dcgm_structs.DCGM_HEALTH_WATCH_POWER)
assert (responseV4.incidents[0].health == dcgm_structs.DCGM_HEALTH_RESULT_WARN)
assert (responseV4.incidents[0].error.code == dcgm_errors.DCGM_FR_POWER_UNREADABLE)
@test_utils.run_with_standalone_host_engine(120)
@test_utils.run_with_initialized_client()
@test_utils.run_with_injection_gpus()
def test_health_check_standalone_unreadable_power_usage(handle, gpuIds):
helper_health_check_unreadable_power_usage(handle, gpuIds)
@test_utils.run_with_embedded_host_engine()
@test_utils.run_with_injection_gpus()
def test_health_check_embedded_unreadable_power_usage(handle, gpuIds):
helper_health_check_unreadable_power_usage(handle, gpuIds)
def helper_health_set_version2(handle, gpuIds):
handleObj = pydcgm.DcgmHandle(handle=handle)
systemObj = handleObj.GetSystem()
groupObj = systemObj.GetGroupWithGpuIds("test1", gpuIds)
watchInterval = 999999
maxKeepAge = 1234.5
maxKeepAgeUsec = int(maxKeepAge) * 1000000
fieldId = dcgm_fields.DCGM_FI_DEV_PCIE_REPLAY_COUNTER
newSystems = dcgm_structs.DCGM_HEALTH_WATCH_PCIE
groupObj.health.Set(newSystems, watchInterval, maxKeepAge)
for gpuId in gpuIds:
cmfi = dcgm_agent_internal.dcgmGetCacheManagerFieldInfo(handle, gpuId, dcgm_fields.DCGM_FE_GPU, fieldId)
assert cmfi.flags & dcgm_structs_internal.DCGM_CMI_F_WATCHED, "x%X" % cmfi.flags
assert cmfi.monitorIntervalUsec == watchInterval, "%d != %d" % (cmfi.monitorIntervalUsec, watchInterval)
assert cmfi.maxAgeUsec == maxKeepAgeUsec, "%d != %d" % (cmfi.maxAgeUsec, maxKeepAgeUsec)
@test_utils.run_with_standalone_host_engine(120)
@test_utils.run_with_initialized_client()
@test_utils.run_with_injection_gpus(2)
def test_health_set_version2_standalone(handle, gpuIds):
helper_health_set_version2(handle, gpuIds)
@test_utils.run_with_embedded_host_engine()
@test_utils.run_with_injection_gpus(2)
def test_health_set_version2_embedded(handle, gpuIds):
helper_health_set_version2(handle, gpuIds)
def helper_test_dcgm_health_check_uncontained_errors(handle, gpuIds):
"""
Verifies that the health check will fail if we inject an uncontained error
"""
handleObj = pydcgm.DcgmHandle(handle=handle)
systemObj = handleObj.GetSystem()
groupObj = systemObj.GetEmptyGroup("test1")
groupObj.AddGpu(gpuIds[0])
gpuIds = groupObj.GetGpuIds() #Limit gpuIds to GPUs in our group
gpuId = gpuIds[0]
newSystems = dcgm_structs.DCGM_HEALTH_WATCH_MEM
groupObj.health.Set(newSystems)
skip_test_if_unhealthy(groupObj)
ret = dcgm_field_injection_helpers.inject_field_value_i64(handle, gpuId, dcgm_fields.DCGM_FI_DEV_XID_ERRORS,
95, 0) # set the injected data to now
assert (ret == dcgm_structs.DCGM_ST_OK)
responseV4 = groupObj.health.Check(dcgm_structs.dcgmHealthResponse_version4)
assert (responseV4.overallHealth == dcgm_structs.DCGM_HEALTH_RESULT_FAIL)
assert (responseV4.incidentCount == 1)
assert (responseV4.incidents[0].entityInfo.entityId == gpuId)
assert (responseV4.incidents[0].entityInfo.entityGroupId == dcgm_fields.DCGM_FE_GPU)
assert (responseV4.incidents[0].system == dcgm_structs.DCGM_HEALTH_WATCH_MEM)
assert (responseV4.incidents[0].health == dcgm_structs.DCGM_HEALTH_RESULT_FAIL)
assert (responseV4.incidents[0].error.code == dcgm_errors.DCGM_FR_UNCONTAINED_ERROR)
@test_utils.run_with_embedded_host_engine()
@test_utils.run_with_injection_gpus(2)
def test_dcgm_health_check_uncontained_errors(handle, gpuIds):
helper_test_dcgm_health_check_uncontained_errors(handle, gpuIds)
def helper_test_dcgm_health_check_row_remap_failure(handle, gpuIds):
"""
Verifies that the health check will fail if we inject an uncontained error
"""
handleObj = pydcgm.DcgmHandle(handle=handle)
systemObj = handleObj.GetSystem()
groupObj = systemObj.GetEmptyGroup("test1")
groupObj.AddGpu(gpuIds[0])
gpuIds = groupObj.GetGpuIds() #Limit gpuIds to GPUs in our group
gpuId = gpuIds[0]
newSystems = dcgm_structs.DCGM_HEALTH_WATCH_MEM
groupObj.health.Set(newSystems)
skip_test_if_unhealthy(groupObj)
ret = dcgm_field_injection_helpers.inject_field_value_i64(handle, gpuId, dcgm_fields.DCGM_FI_DEV_ROW_REMAP_FAILURE,
1, 0) # set the injected data to now
assert (ret == dcgm_structs.DCGM_ST_OK)
responseV4 = groupObj.health.Check(dcgm_structs.dcgmHealthResponse_version4)
assert (responseV4.overallHealth == dcgm_structs.DCGM_HEALTH_RESULT_FAIL)
assert (responseV4.incidentCount == 1)
assert (responseV4.incidents[0].entityInfo.entityId == gpuId)
assert (responseV4.incidents[0].entityInfo.entityGroupId == dcgm_fields.DCGM_FE_GPU)
assert (responseV4.incidents[0].system == dcgm_structs.DCGM_HEALTH_WATCH_MEM)
assert (responseV4.incidents[0].health == dcgm_structs.DCGM_HEALTH_RESULT_FAIL)
assert (responseV4.incidents[0].error.code == dcgm_errors.DCGM_FR_ROW_REMAP_FAILURE)
@test_utils.run_with_embedded_host_engine()
@test_utils.run_with_injection_gpus(2)
def test_dcgm_health_check_row_remap_failure(handle, gpuIds):
helper_test_dcgm_health_check_row_remap_failure(handle, gpuIds)
| DCGM-master | testing/python3/tests/test_health.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Sample script to test python bindings for DCGM
import os
import re
import dcgm_structs
import dcgm_structs_internal
import dcgm_agent_internal
import dcgm_agent
import logger
import test_utils
import option_parser
import utils
import dcgm_fields
import apps
import inspect
import dcgmvalue
import pydcgm
import time
import shlex
from dcgm_structs import dcgmExceptionClass
from subprocess import check_output, check_call, Popen, CalledProcessError
# Helper function to find out if DCGM is actually installed
def is_dcgm_package_installed():
""" Find out if DCGM package is already installed """
# Looks for the nv-hostengine in the $PATH and hides possible error output
with open(os.devnull, "w") as fnull:
nv_host_find = Popen(["which","nv-hostengine"], stdout=fnull, stderr=fnull)
nv_host_find.wait()
if nv_host_find.returncode == 0:
return True
else:
return False
# Helper function to get the path to libdcgm.so.3
# Returns the path to libdcgm.so.3 as a string on success
# Returns None on failure
def get_libdcgm_path():
"""
Returns relative path to libdcgm.so.3
"""
return "../../lib/libdcgm.so.3"
@test_utils.run_only_as_root()
@test_utils.run_only_on_bare_metal()
@test_utils.run_with_embedded_host_engine()
def test_dcgm_stub_library(handle):
"""
Verifies that DCGM fails gracefully using the stub library
if a proper DCGM installation is not present or shared
libraries are not included in the library search path
"""
if utils.is_esx_hypervisor_system():
test_utils.skip_test("Compute Mode tests are not supported in VMware ESX Environments")
if is_dcgm_package_installed():
test_utils.skip_test("A DCGM package is already installed on this machine")
# Checks if libdcgm.so.3 is set within LD_LIBRARY_PATH
libdcgm_path = get_libdcgm_path()
assert libdcgm_path is not None
if libdcgm_path is not None:
# Verify is stub library is present
if not (os.path.isfile(libdcgm_path + "/libdcgm_stub.a")):
test_utils.skip_test("Unable to find \"libdcgm_stub.a\" in %s" % libdcgm_path)
else:
dcgm_lib_original = libdcgm_path + "/libdcgm.so.3"
dcgm_lib_modified = dcgm_lib_original + "_modified"
else:
# Tear down the environment by finding and renaming "libdcgm.so.3" to "libdcgm.so.3_orig"
# gets the path to libdcgm.so.3, like: /usr/lib/libdcgm.so.3
try:
ldconfig_out_buf = check_output(["ldconfig","-p"])
ldconfig_out = ldconfig_out_buf.decode('utf-8')
dcgm_lib = [x for x in ldconfig_out.split("\n") if "libdcgm.so.3" in x]
dcgm_lib_original = [x for x in dcgm_lib[0].split("=>")[-1] if x[0]!=" "]
dcgm_lib_modified = [x for x in dcgm_lib_original + "_modified" if x[0]!=" "]
except:
test_utils.skip_test("Unable to find libdcgm.so.3 library")
# Renaming the file
try:
os.rename(dcgm_lib_original,dcgm_lib_modified)
except:
test_utils.skip_test("Unable to rename libdcgm.so.3 library")
try:
stub_app = apps.DcgmStubRunnerApp()
stub_app.start()
pid = stub_app.getpid()
stub_app.wait()
finally:
# Restore environment
os.rename(dcgm_lib_modified,dcgm_lib_original)
logger.info("stub_library_tet PID was %d" % pid)
assert "!!!!!!!!" in stub_app.stdout_lines[1], "Failed to collect stub library output"
assert "WARNING:" in stub_app.stdout_lines[2], "Failed to collect stub library output"
@test_utils.run_with_embedded_host_engine()
@test_utils.run_only_with_live_gpus()
@test_utils.skip_denylisted_gpus(["GeForce GT 640"])
def test_dcgm_agent_get_values_for_fields(handle, gpuIds):
"""
Verifies that DCGM Engine can be initialized successfully
"""
# Watch field so we can fetch it
fieldId = dcgm_fields.DCGM_FI_DEV_NAME
gpuId = gpuIds[0]
ret = dcgm_agent_internal.dcgmWatchFieldValue(handle, gpuId, fieldId, 10000000, 86400.0, 0)
assert(ret == dcgm_structs.DCGM_ST_OK)
# wait for at least one update of the field before trying to read it
ret = dcgm_agent.dcgmUpdateAllFields(handle, True)
assert(ret == dcgm_structs.DCGM_ST_OK)
values = dcgm_agent_internal.dcgmGetLatestValuesForFields(handle, gpuId, [fieldId,])
assert values[0].status == dcgm_structs.DCGM_ST_OK
assert chr(values[0].fieldType) == dcgm_fields.DCGM_FT_STRING, "Wrong field type: %s" % values[0].fieldType
assert len(values[0].value.str) > 0
logger.debug("Brand of GPU %u is %s" % (gpuId, values[0].value.str))
@test_utils.run_with_embedded_host_engine()
def test_dcgm_engine_watch_field_values(handle):
"""
Verifies that cache manager can watch a field value
"""
# Watch field so we can fetch it
fieldId = dcgm_fields.DCGM_FI_DEV_NAME
gpuId = 0
try:
fieldInfo = dcgm_agent_internal.dcgmGetCacheManagerFieldInfo(handle, gpuId, dcgm_fields.DCGM_FE_GPU, fieldId)
numWatchersBefore = fieldInfo.numWatchers
except dcgm_structs.dcgmExceptionClass(dcgm_structs.DCGM_ST_NOT_WATCHED) as e:
numWatchersBefore = 0
ret = dcgm_agent_internal.dcgmWatchFieldValue(handle, gpuId, fieldId, 10000000, 86400.0, 0)
assert(ret == dcgm_structs.DCGM_ST_OK)
fieldInfo = dcgm_agent_internal.dcgmGetCacheManagerFieldInfo(handle, gpuId, dcgm_fields.DCGM_FE_GPU, fieldId)
assert fieldInfo.flags & dcgm_structs_internal.DCGM_CMI_F_WATCHED, "Expected watch. got flags %08X" % fieldInfo.flags
numWatchersAfter = fieldInfo.numWatchers
assert numWatchersAfter == numWatchersBefore + 1, "Expected 1 extra watcher. Before %d. After %d" % (numWatchersBefore, numWatchersAfter)
@test_utils.run_with_embedded_host_engine()
def test_dcgm_engine_unwatch_field_value(handle):
"""
Verifies that the cache manager can unwatch a field value
"""
# Watch field so we can fetch it
fieldId = dcgm_fields.DCGM_FI_DEV_NAME
gpuId = 0
ret = dcgm_agent_internal.dcgmWatchFieldValue(handle, gpuId, fieldId, 10000000, 86400.0, 0)
assert(ret == dcgm_structs.DCGM_ST_OK)
fieldInfo = dcgm_agent_internal.dcgmGetCacheManagerFieldInfo(handle, gpuId, dcgm_fields.DCGM_FE_GPU, fieldId)
numWatchersBefore = fieldInfo.numWatchers
# Unwatch field
clearCache = 1
ret = dcgm_agent_internal.dcgmUnwatchFieldValue(handle, gpuId, fieldId, clearCache)
assert(ret == dcgm_structs.DCGM_ST_OK)
fieldInfo = dcgm_agent_internal.dcgmGetCacheManagerFieldInfo(handle, gpuId, dcgm_fields.DCGM_FE_GPU, fieldId)
numWatchersAfter = fieldInfo.numWatchers
assert numWatchersAfter == numWatchersBefore - 1, "Expected 1 fewer watcher. Before %d. After %d" % (numWatchersBefore, numWatchersAfter)
assert (numWatchersAfter > 0) or (0 == (fieldInfo.flags & dcgm_structs_internal.DCGM_CMI_F_WATCHED)), "Expected no watch. got flags %08X" % fieldInfo.flags
def helper_unwatch_field_values_public(handle, gpuIds):
"""
Verifies that dcgm can unwatch a field value
"""
fieldId = dcgm_fields.DCGM_FI_DEV_NAME
fieldIds = [fieldId, ]
handleObj = pydcgm.DcgmHandle(handle=handle)
systemObj = handleObj.GetSystem()
groupObj = systemObj.GetGroupWithGpuIds('mygroup', gpuIds)
fieldGroup = pydcgm.DcgmFieldGroup(handleObj, "myfieldgroup", fieldIds)
updateFreq = 10000000
maxKeepAge = 86400
maxKeepSamples = 0
#These are all gpuId -> watcher count
numWatchersBefore = {}
numWatchersWithWatch = {}
numWatchersAfter = {}
#Get watch info before our test begins
for gpuId in gpuIds:
try:
fieldInfo = dcgm_agent_internal.dcgmGetCacheManagerFieldInfo(handleObj.handle, gpuId, dcgm_fields.DCGM_FE_GPU, fieldId)
numWatchersBefore[gpuId] = fieldInfo.numWatchers
except dcgm_structs.dcgmExceptionClass(dcgm_structs.DCGM_ST_NOT_WATCHED) as e:
numWatchersBefore[gpuId] = 0
#Now watch the fields
groupObj.samples.WatchFields(fieldGroup, updateFreq, maxKeepAge, maxKeepSamples)
#Get watcher info after our watch and check it against before our watch
for gpuId in gpuIds:
fieldInfo = dcgm_agent_internal.dcgmGetCacheManagerFieldInfo(handleObj.handle, gpuId, dcgm_fields.DCGM_FE_GPU, fieldId)
numWatchersWithWatch[gpuId] = fieldInfo.numWatchers
assert numWatchersWithWatch[gpuId] == numWatchersBefore[gpuId] + 1,\
"Watcher mismatch at gpuId %d, numWatchersWithWatch[gpuId] %d != numWatchersBefore[gpuId] %d + 1" %\
(gpuId, numWatchersWithWatch[gpuId], numWatchersBefore[gpuId])
#Unwatch fields
groupObj.samples.UnwatchFields(fieldGroup)
#Get watcher count after our unwatch. This should match our original watch count
for gpuId in gpuIds:
fieldInfo = dcgm_agent_internal.dcgmGetCacheManagerFieldInfo(handleObj.handle, gpuId, dcgm_fields.DCGM_FE_GPU, fieldId)
numWatchersAfter[gpuId] = fieldInfo.numWatchers
assert numWatchersBefore == numWatchersAfter, "Expected numWatchersBefore (%s) to match numWatchersAfter %s" %\
(str(numWatchersBefore), str(numWatchersAfter))
@test_utils.run_with_embedded_host_engine()
@test_utils.run_only_with_live_gpus()
def test_dcgm_unwatch_field_values_public_embedded(handle, gpuIds):
helper_unwatch_field_values_public(handle, gpuIds)
@test_utils.run_with_standalone_host_engine(20)
@test_utils.run_with_initialized_client()
@test_utils.run_only_with_live_gpus()
def test_dcgm_unwatch_field_values_public_remote(handle, gpuIds):
helper_unwatch_field_values_public(handle, gpuIds)
def helper_promote_field_values_watch_public(handle, gpuIds):
"""
Verifies that dcgm can update a field value watch
"""
fieldId = dcgm_fields.DCGM_FI_DEV_NAME
fieldIds = [fieldId, ]
handleObj = pydcgm.DcgmHandle(handle=handle)
systemObj = handleObj.GetSystem()
groupObj = systemObj.GetGroupWithGpuIds('mygroup', gpuIds)
fieldGroup = pydcgm.DcgmFieldGroup(handleObj, "myfieldgroup", fieldIds)
updateFreq = 100000 #100 msec
maxKeepAge = 3600
maxKeepSamples = 0
#Track the number of watchers to make sure our watch promotion doesn't create another sub-watch
#but rather updates the existing one
numWatchersWithWatch = {}
numWatchersAfter = {}
#Watch the fields
groupObj.samples.WatchFields(fieldGroup, updateFreq, maxKeepAge, maxKeepSamples)
#Get watcher info after our watch and verify that the updateFrequency matches
for gpuId in gpuIds:
fieldInfo = dcgm_agent_internal.dcgmGetCacheManagerFieldInfo(handleObj.handle, gpuId, dcgm_fields.DCGM_FE_GPU, fieldId)
numWatchersWithWatch[gpuId] = fieldInfo.numWatchers
assert fieldInfo.monitorIntervalUsec == updateFreq, "after watch: fieldInfo.monitorIntervalUsec %d != updateFreq %d" % \
(fieldInfo.monitorIntervalUsec, updateFreq)
#Update the watch with a faster update frequency
updateFreq = 50000 #50 msec
groupObj.samples.WatchFields(fieldGroup, updateFreq, maxKeepAge, maxKeepSamples)
#Get watcher info after our second watch and verify that the updateFrequency matches
for gpuId in gpuIds:
fieldInfo = dcgm_agent_internal.dcgmGetCacheManagerFieldInfo(handleObj.handle, gpuId, dcgm_fields.DCGM_FE_GPU, fieldId)
numWatchersAfter[gpuId] = fieldInfo.numWatchers
assert fieldInfo.monitorIntervalUsec == updateFreq, "after watch: fieldInfo.monitorIntervalUsec %d != updateFreq %d" % \
(fieldInfo.monitorIntervalUsec, updateFreq)
assert numWatchersWithWatch == numWatchersAfter, "numWatchersWithWatch (%s) != numWatchersAfter (%s)" % \
(str(numWatchersWithWatch), str(numWatchersAfter))
@test_utils.run_with_embedded_host_engine()
@test_utils.run_only_with_live_gpus()
def test_dcgm_promote_field_values_watch_public_embedded(handle, gpuIds):
helper_promote_field_values_watch_public(handle, gpuIds)
@test_utils.run_with_standalone_host_engine(20)
@test_utils.run_with_initialized_client()
@test_utils.run_only_with_live_gpus()
def test_dcgm_promote_field_values_watch_public_remote(handle, gpuIds):
helper_promote_field_values_watch_public(handle, gpuIds)
@test_utils.run_with_embedded_host_engine()
def test_dcgm_engine_update_all_fields(handle):
"""
Verifies that the cache manager can update all fields
"""
waitForUpdate = True
ret = dcgm_agent.dcgmUpdateAllFields(handle, waitForUpdate)
assert(ret == dcgm_structs.DCGM_ST_OK)
@test_utils.run_only_on_linux()
@test_utils.run_only_as_root()
def test_dcgm_cgroups_device_block():
"""
Test whether the correct device uuid is found when a
device is blocked by cgroups.
"""
try:
cgsetPath = check_output(["which", "cgset"])
cgclearPath = check_output(["which", "cgclear"])
except CalledProcessError as e:
logger.debug("Exception was: %s" % e)
test_utils.skip_test("Unable to find cgset or gclear, skipping test.")
if (not os.path.exists(cgsetPath.strip())) or (not os.path.exists(cgclearPath.strip())):
test_utils.skip_test("Unable to find cgset or gclear, skipping test.")
dcgmHandle = pydcgm.DcgmHandle()
dcgmSystem = dcgmHandle.GetSystem()
allDcgmGpuIds = dcgmSystem.discovery.GetAllSupportedGpuIds()
if len(allDcgmGpuIds) > 0:
# Mounting a new cgroups hierarchy
try:
os.system("mkdir devices")
os.system("mount -t cgroup -o devices dcgm devices")
os.system("cgcreate -g devices:/cgroup/dcgm_group1")
except Exception as msg:
logger.debug("Failed to mount cgroup with: %s" % msg)
test_utils.skip_test("Unable to create cgroups mount point, skipping test.")
try:
PrevGpuUuid = []
for gpuId in allDcgmGpuIds:
# Recording first GPU UUID seen
PrevGpuUuid.append(dcgmSystem.discovery.GetGpuAttributes(gpuId).identifiers.uuid)
logger.info("Blocking access to device %s using cgroups..." % dcgmSystem.discovery.GetGpuAttributes(gpuId).identifiers.deviceName)
os.system("%s -r devices.deny='c 195:%d rwm' /" % (cgsetPath.strip(), gpuId))
GpuUuid = []
for gpuId in allDcgmGpuIds:
# Release the cgroups restriction
logger.info("Freeing access device %s using cgroups..." % dcgmSystem.discovery.GetGpuAttributes(gpuId).identifiers.deviceName)
os.system("%s -r devices.allow='c 195:%d rwm' /" % (cgsetPath.strip(), gpuId))
# Getting current GPU UUID
GpuUuid.append(dcgmSystem.discovery.GetGpuAttributes(gpuId).identifiers.uuid)
assert PrevGpuUuid == GpuUuid, "Previous UUIDs %s should have been the same as current GPU UUID %s" % (PrevGpuUuid, GpuUuid)
finally:
#This will always bring GPUs back out of cgroups
os.system("umount dcgm")
os.system("cgclear")
os.system("rm -rf devices")
@test_utils.run_with_embedded_host_engine()
@test_utils.run_only_with_live_gpus()
def test_dcgm_entity_api_sanity(handle, gpuIds):
'''
Test that the basic entity APIs behave sanely
'''
handleObj = pydcgm.DcgmHandle(handle=handle)
systemObj = handleObj.GetSystem()
entityList = systemObj.discovery.GetEntityGroupEntities(dcgm_fields.DCGM_FE_GPU, True)
assert entityList == gpuIds, "entityList %s != gpuIds %s" % (str(entityList), str(gpuIds))
#Now check unsupported GPU IDs. This will only behave differently if you have an old GPU
gpuIds = systemObj.discovery.GetAllGpuIds()
entityList = systemObj.discovery.GetEntityGroupEntities(dcgm_fields.DCGM_FE_GPU, False)
assert entityList == gpuIds, "entityList %s != gpuIds %s" % (str(entityList), str(gpuIds))
@test_utils.run_with_embedded_host_engine()
@test_utils.run_only_with_all_supported_gpus()
@test_utils.skip_denylisted_gpus(["GeForce GT 640"])
@test_utils.run_with_injection_nvswitches(2)
def test_dcgm_nvlink_link_state(handle, gpuIds, switchIds):
handleObj = pydcgm.DcgmHandle(handle=handle)
systemObj = handleObj.GetSystem()
#Will throw an exception on API error
linkStatus = systemObj.discovery.GetNvLinkLinkStatus()
assert linkStatus.version == dcgm_structs.dcgmNvLinkStatus_version3, "Version mismatch %d != %d" % (linkStatus.version, dcgm_structs.dcgmNvLinkStatus_version3)
if len(systemObj.discovery.GetAllGpuIds()) == len(gpuIds):
assert linkStatus.numGpus == len(gpuIds), "Gpu count mismatch: %d != %d" % (linkStatus.numGpus, len(gpuIds))
allSwitchIds = test_utils.get_live_nvswitch_ids(handle)
totalSwitchCount = len(allSwitchIds)
assert linkStatus.numNvSwitches == totalSwitchCount, "NvSwitch count mismatch: %d != %d" % (linkStatus.numNvSwitches, totalSwitchCount)
#Check for unset/duplicate GPU IDs
if len(gpuIds) > 1:
assert linkStatus.gpus[0].entityId != linkStatus.gpus[1].entityId, "Got same GPU entity ID"
if len(switchIds) > 1:
assert linkStatus.nvSwitches[0].entityId != linkStatus.nvSwitches[1].entityId, "Got same switch entity ID"
#Further sanity checks
for i in range(len(gpuIds)):
assert linkStatus.gpus[i].entityId in gpuIds, "GPU index %d id %d missing from %s" % (i, linkStatus.gpus[i].entityId, str(gpuIds))
for j in range(dcgm_structs.DCGM_NVLINK_MAX_LINKS_PER_GPU):
ls = linkStatus.gpus[i].linkState[j]
assert ls >= dcgm_structs.DcgmNvLinkLinkStateNotSupported and ls <= dcgm_structs.DcgmNvLinkLinkStateUp, "Invalid GPU linkState %d at i %d, j %d" % (ls, i, j)
for i in range(len(switchIds)):
assert linkStatus.nvSwitches[i].entityId in allSwitchIds, "Switch index %d id %d missing from %s" % (i, linkStatus.nvSwitches[i].entityId, str(switchIds))
for j in range(dcgm_structs.DCGM_NVLINK_MAX_LINKS_PER_NVSWITCH):
ls = linkStatus.nvSwitches[i].linkState[j]
assert ls >= dcgm_structs.DcgmNvLinkLinkStateNotSupported and ls <= dcgm_structs.DcgmNvLinkLinkStateUp, "Invalid NvSwitch linkState %d at i %d, j %d" % (ls, i, j)
| DCGM-master | testing/python3/tests/test_starter.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Sample script to test python bindings for DCGM
import dcgm_structs
import dcgm_agent_internal
import dcgm_agent
import logger
import test_utils
import dcgm_fields
import apps
import time
@test_utils.run_only_on_linux()
@test_utils.run_only_on_bare_metal()
def test_nv_hostengine_app():
"""
Verifies that nv-hostengine can be lauched properly and
can run for whatever timeout it's given in seconds
"""
# Start nv-hostengine and run for 15 seconds
nvhost_engine = apps.NvHostEngineApp()
nvhost_engine.start(timeout=15)
# Getting nv-hostenging process id
pid = nvhost_engine.getpid()
# Cleanning up
time.sleep(5)
nvhost_engine.terminate()
nvhost_engine.validate()
logger.debug("nv-hostengine PID was %d" % pid)
@test_utils.run_only_on_linux()
@test_utils.run_only_on_bare_metal()
def test_dcgmi_app():
"""
Verifies that dcgmi can be lauched properly with
2 parameters at least
"""
# Start dcgmi and start collecting data from nv-hostengine
dcgmi_app = apps.DcgmiApp(["127.0.0.1", "0"])
dcgmi_app.start()
# Getting nv-hostenging process id
pid = dcgmi_app.getpid()
# Cleanning up dcgmi run
time.sleep(3)
dcgmi_app.terminate()
dcgmi_app.validate()
logger.debug("dcgmi PID was %d" % pid)
@test_utils.run_only_on_linux()
@test_utils.run_only_on_bare_metal()
@test_utils.run_only_with_all_supported_gpus()
@test_utils.skip_denylisted_gpus(["GeForce GT 640"])
def test_dcgm_unittests_app(*args, **kwargs):
"""
Runs the testdcgmunittests app and verifies if there are any failing tests
"""
# Run testsdcgmunittests
unittest_app = apps.TestDcgmUnittestsApp()
unittest_app.run(1000)
# Getting testsdcgmunittests process id
pid = unittest_app.getpid()
logger.debug("The PID of testdcgmunittests is %d" % pid)
# Cleanning up unittests run
unittest_app.wait()
unittest_app.validate()
assert unittest_app._retvalue == 0, "Unittest failed with return code %s" % unittest_app._retvalue
| DCGM-master | testing/python3/tests/test_dcgm_apprunners.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dcgm_json import DcgmJson
import test_utils
import json
def helper_test_dcgm_json(handle):
valid_json = False
obj = DcgmJson()
obj.SetHandle(handle)
return_list = obj.CreateJson()
obj.Shutdown()
# we parse every element of list return_list to check that json
# it contains are valid.
assert return_list is not None, "Nothing was returned from CreateJson()"
for x in return_list:
if x is not None:
try:
json.loads(x) #If json loads fine, it is valid
valid_json = True
except ValueError as ex:
# Exception in loading the json.We exit.
valid_json = False
print(('Invalid json: %s' % ex))
break
else:
valid_json = False
break
assert valid_json == True, "Json parsing error. Received incorrect json."
@test_utils.run_with_standalone_host_engine(20)
@test_utils.run_with_initialized_client()
@test_utils.run_only_with_live_gpus()
def test_dcgm_json_standalone(handle, gpuIds):
helper_test_dcgm_json(handle)
@test_utils.run_with_embedded_host_engine()
@test_utils.run_only_with_live_gpus()
def test_dcgm_json_embedded(handle, gpuIds):
helper_test_dcgm_json(handle)
| DCGM-master | testing/python3/tests/test_other.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pydcgm
import dcgm_structs
import test_utils
import utils
import dcgm_agent
import os
@test_utils.run_with_embedded_host_engine()
def test_dcgm_modules_get_statuses(handle):
'''
Do a basic sanity check of the DCGM module statuses returned
'''
dcgmHandle = pydcgm.DcgmHandle(handle=handle)
dcgmSystem = dcgmHandle.GetSystem()
ms = dcgmSystem.modules.GetStatuses()
assert ms.numStatuses == dcgm_structs.DcgmModuleIdCount, "%d != %d" % (ms.numStatuses, dcgm_structs.DcgmModuleIdCount)
assert ms.statuses[0].id == dcgm_structs.DcgmModuleIdCore, "%d != %d" % (ms.statuses[0].id, dcgm_structs.DcgmModuleIdCore)
assert ms.statuses[0].status == dcgm_structs.DcgmModuleStatusLoaded, "%d != %d" % (ms.statuses[0].status, dcgm_structs.DcgmModuleStatusLoaded)
for i in range(1, ms.numStatuses):
#.id == index
assert ms.statuses[i].id == i, "%d != %d" % (ms.statuses[i].id, i)
#Assert all non-core modules aren't loaded besides NvSwitch. This one can be loaded
#because creating default groups causes a RPC to the NvSwitch manager
if ms.statuses[i].id != dcgm_structs.DcgmModuleIdNvSwitch:
assert ms.statuses[i].status == dcgm_structs.DcgmModuleStatusNotLoaded, "%d != %d" % (ms.statuses[i].status, dcgm_structs.DcgmModuleStatusNotLoaded)
@test_utils.run_with_embedded_host_engine()
def test_dcgm_modules_in_use_introspection(handle):
'''
Make sure that the introspection module cannot be added to denylist after it's loaded
'''
dcgmHandle = pydcgm.DcgmHandle(handle=handle)
dcgmSystem = dcgmHandle.GetSystem()
moduleId = dcgm_structs.DcgmModuleIdIntrospect
#Lazy load the introspection module
bytesUsed = dcgmSystem.introspect.memory.GetForHostengine().bytesUsed
#Make sure the module was loaded
ms = dcgmSystem.modules.GetStatuses()
assert ms.statuses[moduleId].status == dcgm_structs.DcgmModuleStatusLoaded, "%d != %d" % (ms.statuses[moduleId].status, dcgm_structs.DcgmModuleStatusLoaded)
#Make sure we can't add the module to the denylist after it's loaded
with test_utils.assert_raises(dcgm_structs.dcgmExceptionClass(dcgm_structs.DCGM_ST_IN_USE)):
dcgmSystem.modules.Denylist(moduleId)
@test_utils.run_with_embedded_host_engine()
def test_dcgm_modules_denylist_introspection(handle):
'''
Make sure that the introspection module can be added to the denylist
'''
dcgmHandle = pydcgm.DcgmHandle(handle=handle)
dcgmSystem = dcgmHandle.GetSystem()
moduleId = dcgm_structs.DcgmModuleIdIntrospect
dcgmSystem.modules.Denylist(moduleId)
#Try to lazy load the introspection module which is on the denylist
with test_utils.assert_raises(dcgm_structs.dcgmExceptionClass(dcgm_structs.DCGM_ST_MODULE_NOT_LOADED)):
bytesUsed = dcgmSystem.introspect.memory.GetForHostengine().bytesUsed
@test_utils.run_with_embedded_host_engine()
def test_dcgm_modules_in_use_health(handle):
'''
Make sure that the health module cannot be added to the denylist after it's loaded
'''
dcgmHandle = pydcgm.DcgmHandle(handle=handle)
dcgmSystem = dcgmHandle.GetSystem()
dcgmGroup = dcgmSystem.GetDefaultGroup()
moduleId = dcgm_structs.DcgmModuleIdHealth
#Lazy load the health module
dcgmGroup.health.Set(dcgm_structs.DCGM_HEALTH_WATCH_ALL)
#Make sure the module was loaded
ms = dcgmSystem.modules.GetStatuses()
assert ms.statuses[moduleId].status == dcgm_structs.DcgmModuleStatusLoaded, "%d != %d" % (ms.statuses[moduleId].status, dcgm_structs.DcgmModuleStatusLoaded)
#Make sure we can't add the module to the denylist after it's loaded
with test_utils.assert_raises(dcgm_structs.dcgmExceptionClass(dcgm_structs.DCGM_ST_IN_USE)):
dcgmSystem.modules.Denylist(moduleId)
@test_utils.run_with_embedded_host_engine()
def test_dcgm_modules_denylist_health(handle):
'''
Make sure that the health module can be added to the denylist
'''
dcgmHandle = pydcgm.DcgmHandle(handle=handle)
dcgmSystem = dcgmHandle.GetSystem()
dcgmGroup = dcgmSystem.GetDefaultGroup()
moduleId = dcgm_structs.DcgmModuleIdHealth
dcgmSystem.modules.Denylist(moduleId)
#Try to lazy load the introspection module which is on the denylist
with test_utils.assert_raises(dcgm_structs.dcgmExceptionClass(dcgm_structs.DCGM_ST_MODULE_NOT_LOADED)):
dcgmGroup.health.Set(dcgm_structs.DCGM_HEALTH_WATCH_ALL)
@test_utils.run_with_embedded_host_engine()
def test_dcgm_modules_paused(handle):
"""
Make sure that a module is loaded in the paused state if the DCGM is paused
And that it is resumed when DCGM is resumed
"""
dcgmHandle = pydcgm.DcgmHandle(handle=handle)
dcgmSystem = dcgmHandle.GetSystem()
dcgmGroup = dcgmSystem.GetDefaultGroup()
moduleId = dcgm_structs.DcgmModuleIdHealth
# First make sure the module is not loaded
ms = dcgmSystem.modules.GetStatuses()
status = ms.statuses[moduleId].status
assert status == dcgm_structs.DcgmModuleStatusNotLoaded, "{} != {}".format(status,
dcgm_structs.DcgmModuleStatusNotLoaded)
dcgmSystem.PauseTelemetryForDiag()
# Lazy load the health module
dcgmGroup.health.Set(dcgm_structs.DCGM_HEALTH_WATCH_ALL)
# Make sure the module was loaded
ms = dcgmSystem.modules.GetStatuses()
status = ms.statuses[moduleId].status
assert status == dcgm_structs.DcgmModuleStatusPaused, "{} != {}".format(status,
dcgm_structs.DcgmModuleStatusPaused)
dcgmSystem.ResumeTelemetryForDiag()
# Make sure the module was resumed
ms = dcgmSystem.modules.GetStatuses()
status = ms.statuses[moduleId].status
assert status == dcgm_structs.DcgmModuleStatusLoaded, "{} != {}".format(status,
dcgm_structs.DcgmModuleStatusLoaded)
@test_utils.run_only_if_checking_libraries()
def test_dcgm_library_existence():
libraries = [
'libdcgmmoduleconfig.so',
'libdcgmmodulehealth.so',
'libdcgmmodulenvswitch.so',
'libdcgmmoduleprofiling.so',
'libdcgm_cublas_proxy11.so',
'libdcgmmodulediag.so',
'libdcgmmoduleintrospect.so',
'libdcgmmodulepolicy.so',
'libdcgmmodulevgpu.so',
'libnvperf_dcgm_host.so',
]
name_to_found = {}
for library in libraries:
name_to_found[library] = False
lib_path = utils.get_testing_framework_library_path()
# Only check for the older proxy libraries if we aren't on aarch64
if lib_path[-8:] != 'aarch64/':
name_to_found['libdcgm_cublas_proxy10.so'] = False
file_list = os.listdir(lib_path)
for filename in file_list:
if filename in name_to_found:
name_to_found[filename] = True
for name in name_to_found:
assert name_to_found[name] == True, "Didn't find required library '%s' in library path '%s'" % (name, lib_path) | DCGM-master | testing/python3/tests/test_modules.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# test the policy manager for DCGM
import dcgm_structs
import dcgm_structs_internal
import dcgm_agent_internal
import dcgmvalue
import pydcgm
import logger
import test_utils
import dcgm_fields
from dcgm_structs import dcgmExceptionClass
import time
from ctypes import *
import queue
import sys
import os
POLICY_CALLBACK_TIMEOUT_SECS = 15 #How long to wait for policy callbacks to occur. The loop runs every 10 seconds in the host engine, so this should be 10 seconds + some fuzz time
# creates a callback function for dcgmPolicyRegister calls which will push its args
# into "queue" when it is called. This allows synchronization on the callback by
# retrieving the args from the queue as well as checks of the args via asserts.
def create_c_callback(queue=None):
@CFUNCTYPE(None, c_void_p)
def c_callback(data):
if queue:
# copy data into a python struct so that it is the right format and is not lost when "data" var is lost
callbackData = dcgm_structs.c_dcgmPolicyCallbackResponse_v1()
memmove(addressof(callbackData), data, callbackData.FieldsSizeof())
queue.put(callbackData)
return c_callback
@test_utils.run_with_standalone_host_engine(20)
@test_utils.run_with_initialized_client()
@test_utils.run_with_injection_gpus()
def test_dcgm_policy_reg_unreg_for_policy_update_standalone(handle, gpuIds):
"""
Verifies that the reg/unreg path for the policy manager is working
"""
dcgmHandle = pydcgm.DcgmHandle(handle)
dcgmSystem = dcgmHandle.GetSystem()
group = dcgmSystem.GetGroupWithGpuIds('test', gpuIds)
empty_c_callback = create_c_callback() # must hold ref so func is not GC'ed before c api uses it
# Register/Unregister will throw exceptions if they fail
group.policy.Register(dcgm_structs.DCGM_POLICY_COND_DBE, None, empty_c_callback)
group.policy.Unregister(dcgm_structs.DCGM_POLICY_COND_DBE)
@test_utils.run_with_standalone_host_engine(20)
@test_utils.run_with_initialized_client()
def test_dcgm_policy_negative_register_standalone(handle):
"""
Verifies that the register function does not allow a bad groupId value
"""
policy = pydcgm.DcgmGroupPolicy(pydcgm.DcgmHandle(handle), 9999, None)
empty_c_callback = create_c_callback() # must hold ref so func is not GC'ed before c api uses it
with test_utils.assert_raises(dcgmExceptionClass(dcgm_structs.DCGM_ST_NOT_CONFIGURED)):
policy.Register(dcgm_structs.DCGM_POLICY_COND_DBE, empty_c_callback)
@test_utils.run_with_standalone_host_engine(20)
@test_utils.run_with_initialized_client()
def test_dcgm_policy_negative_unregister_standalone(handle):
"""
Verifies that the unregister function does not allow a bad groupId value
"""
policy = pydcgm.DcgmGroupPolicy(pydcgm.DcgmHandle(handle), 9999, None)
with test_utils.assert_raises(dcgmExceptionClass(dcgm_structs.DCGM_ST_NOT_CONFIGURED)):
policy.Unregister(dcgm_structs.DCGM_POLICY_COND_DBE)
@test_utils.run_with_standalone_host_engine(20)
@test_utils.run_with_initialized_client()
@test_utils.run_with_injection_gpus()
def test_dcgm_policy_set_get_violation_policy_standalone(handle, gpuIds):
"""
Verifies that set and get violation policy work
"""
dcgmHandle = pydcgm.DcgmHandle(handle)
dcgmSystem = dcgmHandle.GetSystem()
group = dcgmSystem.GetGroupWithGpuIds("test1", gpuIds)
newPolicy = dcgm_structs.c_dcgmPolicy_v1()
newPolicy.version = dcgm_structs.dcgmPolicy_version1
newPolicy.condition = dcgm_structs.DCGM_POLICY_COND_DBE
newPolicy.parms[dcgm_structs.DCGM_POLICY_COND_IDX_DBE].tag = 0
newPolicy.parms[dcgm_structs.DCGM_POLICY_COND_IDX_DBE].val.boolean = True
group.policy.Set(newPolicy)
policies = group.policy.Get()
_assert_policies_equal(policies[0], newPolicy)
def _assert_policies_equal(policy1, policy2):
assert(policy1) # check if None
assert(policy2)
assert(policy1.version == policy2.version)
assert(policy1.condition == policy2.condition)
assert(policy1.parms[dcgm_structs.DCGM_POLICY_COND_IDX_DBE].tag == policy2.parms[dcgm_structs.DCGM_POLICY_COND_IDX_DBE].tag)
assert(policy1.parms[dcgm_structs.DCGM_POLICY_COND_IDX_DBE].val.boolean == policy2.parms[dcgm_structs.DCGM_POLICY_COND_IDX_DBE].val.boolean)
def helper_dcgm_policy_inject_eccerror(handle, gpuIds):
"""
Verifies that we can inject an error into the ECC counters and receive a callback
"""
newPolicy = dcgm_structs.c_dcgmPolicy_v1()
newPolicy.version = dcgm_structs.dcgmPolicy_version1
newPolicy.condition = dcgm_structs.DCGM_POLICY_COND_DBE
newPolicy.parms[dcgm_structs.DCGM_POLICY_COND_IDX_DBE].tag = 0
newPolicy.parms[dcgm_structs.DCGM_POLICY_COND_IDX_DBE].val.boolean = True
dcgmHandle = pydcgm.DcgmHandle(handle)
dcgmSystem = dcgmHandle.GetSystem()
group = dcgmSystem.GetGroupWithGpuIds("test1", gpuIds)
group.policy.Set(newPolicy)
# the order of the callbacks will change once implementation is complete
callbackQueue = queue.Queue()
c_callback = create_c_callback(callbackQueue)
group.policy.Register(dcgm_structs.DCGM_POLICY_COND_DBE, c_callback, None)
# inject an error into ECC
field = dcgm_structs_internal.c_dcgmInjectFieldValue_v1()
field.version = dcgm_structs_internal.dcgmInjectFieldValue_version1
field.fieldId = dcgm_fields.DCGM_FI_DEV_ECC_DBE_VOL_DEV
field.status = 0
field.fieldType = ord(dcgm_fields.DCGM_FT_INT64)
field.ts = int((time.time()+60) * 1000000.0) # set the injected data into the future
field.value.i64 = 1
logger.debug("injecting %s for gpuId %d" % (str(field), gpuIds[0]))
ret = dcgm_agent_internal.dcgmInjectFieldValue(handle, gpuIds[0], field)
assert (ret == dcgm_structs.DCGM_ST_OK)
# wait for the the policy manager to call back
try:
callbackData = callbackQueue.get(timeout=POLICY_CALLBACK_TIMEOUT_SECS)
except queue.Empty:
assert False, "Callback never happened"
# check that the callback occurred with the correct arguments
assert(dcgm_structs.DCGM_POLICY_COND_DBE == callbackData.condition), \
("error callback was not for a DBE error, got: %s" % callbackData.condition)
assert(1 == callbackData.val.dbe.numerrors), 'Expected 1 DBE error but got %s' % callbackData.val.dbe.numerrors
assert(dcgm_structs.c_dcgmPolicyConditionDbe_t.LOCATIONS['DEVICE'] == callbackData.val.dbe.location), \
'got: %s' % callbackData.val.dbe.location
@test_utils.run_with_embedded_host_engine()
@test_utils.run_with_injection_gpus()
def test_dcgm_policy_inject_eccerror_embedded(handle, gpuIds):
helper_dcgm_policy_inject_eccerror(handle, gpuIds)
@test_utils.run_with_standalone_host_engine(40)
@test_utils.run_with_initialized_client()
@test_utils.run_with_injection_gpus()
def test_dcgm_policy_inject_eccerror_standalone(handle, gpuIds):
helper_dcgm_policy_inject_eccerror(handle, gpuIds)
@test_utils.run_with_standalone_host_engine(40)
@test_utils.run_with_initialized_client()
@test_utils.run_with_injection_gpus()
def test_dcgm_policy_inject_nvlinkerror_standalone(handle, gpuIds):
"""
Verifies that we can inject an error into the NVLINK error and receive a callback
"""
newPolicy = dcgm_structs.c_dcgmPolicy_v1()
newPolicy.version = dcgm_structs.dcgmPolicy_version1
newPolicy.condition = dcgm_structs.DCGM_POLICY_COND_NVLINK
newPolicy.parms[dcgm_structs.DCGM_POLICY_COND_IDX_NVLINK].tag = 0
newPolicy.parms[dcgm_structs.DCGM_POLICY_COND_IDX_NVLINK].val.boolean = True
# find a GPU that supports nvlink (otherwise internal test will ignore it)
dcgmHandle = pydcgm.DcgmHandle(handle)
dcgmSystem = dcgmHandle.GetSystem()
group = dcgmSystem.GetGroupWithGpuIds('test1', gpuIds)
group.policy.Set(newPolicy)
callbackQueue = queue.Queue()
c_callback = create_c_callback(callbackQueue)
group.policy.Register(dcgm_structs.DCGM_POLICY_COND_NVLINK, finishCallback=c_callback)
field = dcgm_structs_internal.c_dcgmInjectFieldValue_v1()
field.version = dcgm_structs_internal.dcgmInjectFieldValue_version1
field.fieldId = dcgm_fields.DCGM_FI_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_TOTAL
field.status = 0
field.fieldType = ord(dcgm_fields.DCGM_FT_INT64)
field.ts = int((time.time()+60) * 1000000.0) # set the injected data into the future
field.value.i64 = 1
ret = dcgm_agent_internal.dcgmInjectFieldValue(handle, gpuIds[0], field)
assert (ret == dcgm_structs.DCGM_ST_OK)
# wait for the the policy manager to call back
try:
callbackData = callbackQueue.get(timeout=POLICY_CALLBACK_TIMEOUT_SECS)
except queue.Empty:
assert False, "Callback never happened"
# check that the callback occurred with the correct arguments
assert(dcgm_structs.DCGM_POLICY_COND_NVLINK == callbackData.condition), \
("NVLINK error callback was not for a NVLINK error, got: %s" % callbackData.condition)
assert(dcgm_fields.DCGM_FI_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_TOTAL == callbackData.val.nvlink.fieldId), \
("Expected 130 fieldId but got %s" % callbackData.val.nvlink.fieldId)
assert(1 == callbackData.val.nvlink.counter), 'Expected 1 PCI error but got %s' % callbackData.val.nvlink.counter
def helper_test_dcgm_policy_inject_xiderror(handle, gpuIds):
"""
Verifies that we can inject an XID error and receive a callback
"""
newPolicy = dcgm_structs.c_dcgmPolicy_v1()
newPolicy.version = dcgm_structs.dcgmPolicy_version1
newPolicy.condition = dcgm_structs.DCGM_POLICY_COND_XID
newPolicy.parms[dcgm_structs.DCGM_POLICY_COND_IDX_XID].tag = 0
newPolicy.parms[dcgm_structs.DCGM_POLICY_COND_IDX_XID].val.boolean = True
dcgmHandle = pydcgm.DcgmHandle(handle)
validDeviceId = -1
devices = gpuIds
for x in devices:
fvSupported = dcgm_agent_internal.dcgmGetLatestValuesForFields(handle, x, [dcgm_fields.DCGM_FI_DEV_XID_ERRORS, ])
if (fvSupported[0].value.i64 != dcgmvalue.DCGM_INT64_NOT_SUPPORTED):
validDeviceId = x
break
if (validDeviceId == -1):
test_utils.skip_test("Can only run if at least one GPU that supports XID errors is present")
group = pydcgm.DcgmGroup(dcgmHandle, groupName="test1", groupType=dcgm_structs.DCGM_GROUP_EMPTY)
group.AddGpu(validDeviceId)
group.policy.Set(newPolicy)
callbackQueue = queue.Queue()
c_callback = create_c_callback(callbackQueue)
group.policy.Register(dcgm_structs.DCGM_POLICY_COND_XID, finishCallback=c_callback)
field = dcgm_structs_internal.c_dcgmInjectFieldValue_v1()
field.version = dcgm_structs_internal.dcgmInjectFieldValue_version1
field.fieldId = dcgm_fields.DCGM_FI_DEV_XID_ERRORS
field.status = 0
field.fieldType = ord(dcgm_fields.DCGM_FT_INT64)
field.ts = int((time.time()+60) * 1000000.0) # set the injected data into the future
field.value.i64 = 16
ret = dcgm_agent_internal.dcgmInjectFieldValue(handle, validDeviceId, field)
assert (ret == dcgm_structs.DCGM_ST_OK)
# wait for the the policy manager to call back
try:
callbackData = callbackQueue.get(timeout=POLICY_CALLBACK_TIMEOUT_SECS)
except queue.Empty:
assert False, "Callback never happened"
# check that the callback occurred with the correct arguments
assert(dcgm_structs.DCGM_POLICY_COND_XID == callbackData.condition), \
("XID error callback was not for a XID error, got: %s" % callbackData.condition)
assert(16 == callbackData.val.xid.errnum), ('Expected XID error 16 but got %s' % callbackData.val.xid.errnum)
@test_utils.run_with_standalone_host_engine(40)
@test_utils.run_with_initialized_client()
@test_utils.run_with_injection_gpus()
def test_dcgm_policy_inject_xiderror_standalone(handle, gpuIds):
helper_test_dcgm_policy_inject_xiderror(handle, gpuIds)
@test_utils.run_with_embedded_host_engine()
@test_utils.run_with_injection_gpus()
def test_dcgm_policy_inject_xiderror_embedded(handle, gpuIds):
helper_test_dcgm_policy_inject_xiderror(handle, gpuIds)
def helper_dcgm_policy_inject_pcierror(handle, gpuIds):
"""
Verifies that we can inject an error into the PCI counters and receive a callback
"""
newPolicy = dcgm_structs.c_dcgmPolicy_v1()
newPolicy.version = dcgm_structs.dcgmPolicy_version1
newPolicy.condition = dcgm_structs.DCGM_POLICY_COND_PCI
newPolicy.parms[dcgm_structs.DCGM_POLICY_COND_IDX_PCI].tag = 1
newPolicy.parms[dcgm_structs.DCGM_POLICY_COND_IDX_PCI].val.llval = 0
gpuId = gpuIds[0]
group = pydcgm.DcgmGroup(pydcgm.DcgmHandle(handle), groupName="test1", groupType=dcgm_structs.DCGM_GROUP_EMPTY)
group.AddGpu(gpuId)
group.policy.Set(newPolicy)
callbackQueue = queue.Queue()
c_callback = create_c_callback(callbackQueue)
group.policy.Register(dcgm_structs.DCGM_POLICY_COND_PCI, finishCallback=c_callback)
field = dcgm_structs_internal.c_dcgmInjectFieldValue_v1()
field.version = dcgm_structs_internal.dcgmInjectFieldValue_version1
field.fieldId = dcgm_fields.DCGM_FI_DEV_PCIE_REPLAY_COUNTER
field.status = 0
field.fieldType = ord(dcgm_fields.DCGM_FT_INT64)
field.ts = int((time.time()+60) * 1000000.0) # set the injected data into the future
field.value.i64 = 1
ret = dcgm_agent_internal.dcgmInjectFieldValue(handle, gpuId, field)
assert (ret == dcgm_structs.DCGM_ST_OK)
# wait for the the policy manager to call back
try:
callbackData = callbackQueue.get(timeout=POLICY_CALLBACK_TIMEOUT_SECS)
except queue.Empty:
assert False, "Callback never happened"
# check that the callback occurred with the correct arguments
assert(dcgm_structs.DCGM_POLICY_COND_PCI == callbackData.condition), \
("PCI error callback was not for a PCI error, got: %s" % callbackData.condition)
assert(1 == callbackData.val.pci.counter), 'Expected 1 PCI error but got %s' % callbackData.val.pci.counter
@test_utils.run_with_standalone_host_engine(40)
@test_utils.run_with_initialized_client()
@test_utils.run_with_injection_gpus()
def test_dcgm_policy_inject_pcierror_standalone(handle, gpuIds):
helper_dcgm_policy_inject_pcierror(handle, gpuIds)
@test_utils.run_with_embedded_host_engine()
@test_utils.run_with_injection_gpus()
def test_dcgm_policy_inject_pcierror_embedded(handle, gpuIds):
helper_dcgm_policy_inject_pcierror(handle, gpuIds)
@test_utils.run_with_standalone_host_engine(40)
@test_utils.run_with_initialized_client()
@test_utils.run_with_injection_gpus()
def test_dcgm_policy_inject_retiredpages_standalone(handle, gpuIds):
"""
Verifies that we can inject an error into the retired pages counters and receive a callback
"""
newPolicy = dcgm_structs.c_dcgmPolicy_v1()
newPolicy.version = dcgm_structs.dcgmPolicy_version1
newPolicy.condition = dcgm_structs.DCGM_POLICY_COND_MAX_PAGES_RETIRED
newPolicy.parms[dcgm_structs.DCGM_POLICY_COND_IDX_MAX_PAGES_RETIRED].tag = 1
newPolicy.parms[dcgm_structs.DCGM_POLICY_COND_IDX_MAX_PAGES_RETIRED].val.llval = 5
# find a GPU that supports ECC and retired pages (otherwise internal test will ignore it)
dcgmHandle = pydcgm.DcgmHandle(handle)
dcgmSystem = dcgmHandle.GetSystem()
group = dcgmSystem.GetGroupWithGpuIds("test1", gpuIds)
group.policy.Set(newPolicy)
callbackQueue = queue.Queue()
c_callback = create_c_callback(callbackQueue)
group.policy.Register(dcgm_structs.DCGM_POLICY_COND_MAX_PAGES_RETIRED, finishCallback=c_callback)
# inject an error into ECC
numPages = 10
field = dcgm_structs_internal.c_dcgmInjectFieldValue_v1()
field.version = dcgm_structs_internal.dcgmInjectFieldValue_version1
field.fieldId = dcgm_fields.DCGM_FI_DEV_RETIRED_DBE
field.status = 0
field.fieldType = ord(dcgm_fields.DCGM_FT_INT64)
field.ts = int((time.time()+60) * 1000000.0) # set the injected data into the future
field.value.i64 = numPages
ret = dcgm_agent_internal.dcgmInjectFieldValue(handle, gpuIds[0], field)
assert (ret == dcgm_structs.DCGM_ST_OK)
#inject a SBE too so that the health check code gets past its internal checks
field.fieldId = dcgm_fields.DCGM_FI_DEV_RETIRED_SBE
ret = dcgm_agent_internal.dcgmInjectFieldValue(handle, gpuIds[0], field)
assert (ret == dcgm_structs.DCGM_ST_OK)
# wait for the the policy manager to call back
try:
callbackData = callbackQueue.get(timeout=POLICY_CALLBACK_TIMEOUT_SECS)
except queue.Empty:
assert False, "Callback never happened"
# check that the callback occurred with the correct arguments
assert(dcgm_structs.DCGM_POLICY_COND_MAX_PAGES_RETIRED == callbackData.condition), \
("error callback was not for a retired pages, got: %s" % callbackData.condition)
assert(numPages == callbackData.val.mpr.dbepages), \
'Expected %s errors but got %s' % (numPages, callbackData.val.mpr.dbepages)
@test_utils.run_with_standalone_host_engine(40)
@test_utils.run_with_initialized_client()
def test_dcgm_policy_get_with_no_gpus_standalone(handle):
'''
Test that getting the policies when no GPUs are in the group raises an exception
'''
group = pydcgm.DcgmGroup(pydcgm.DcgmHandle(handle), groupType=dcgm_structs.DCGM_GROUP_EMPTY, groupName="test")
with test_utils.assert_raises(pydcgm.DcgmException):
policies = group.policy.Get()
@test_utils.run_with_standalone_host_engine(40)
@test_utils.run_with_initialized_client()
@test_utils.run_only_with_live_gpus()
def test_dcgm_policy_get_with_some_gpus_standalone(handle, gpuIds):
'''
Test that getting the policies returns the correct number of policies as GPUs in the system
when "count" is not specified for policy.Get
'''
group = pydcgm.DcgmGroup(pydcgm.DcgmHandle(handle), groupType=dcgm_structs.DCGM_GROUP_EMPTY, groupName="test")
group.AddGpu(gpuIds[0])
policies = group.policy.Get()
assert len(policies) == 1, len(policies)
def helper_dcgm_policy_inject_powererror(handle, gpuIds):
"""
Verifies that we can inject an error into the Power level and receive a callback
"""
newPolicy = dcgm_structs.c_dcgmPolicy_v1()
newPolicy.version = dcgm_structs.dcgmPolicy_version1
newPolicy.condition = dcgm_structs.DCGM_POLICY_COND_POWER
newPolicy.parms[dcgm_structs.DCGM_POLICY_COND_IDX_POWER].tag = 1
newPolicy.parms[dcgm_structs.DCGM_POLICY_COND_IDX_POWER].val.llval = 200
gpuId = gpuIds[0]
group = pydcgm.DcgmGroup(pydcgm.DcgmHandle(handle), groupName="test1", groupType=dcgm_structs.DCGM_GROUP_EMPTY)
group.AddGpu(gpuId)
group.policy.Set(newPolicy)
# the order of the callbacks will change once implementation is complete
callbackQueue = queue.Queue()
c_callback = create_c_callback(callbackQueue)
group.policy.Register(dcgm_structs.DCGM_POLICY_COND_POWER, c_callback, None)
# inject an error into Power Violation
field = dcgm_structs_internal.c_dcgmInjectFieldValue_v1()
field.version = dcgm_structs_internal.dcgmInjectFieldValue_version1
field.fieldId = dcgm_fields.DCGM_FI_DEV_POWER_USAGE
field.status = 0
field.fieldType = ord(dcgm_fields.DCGM_FT_DOUBLE)
field.ts = int((time.time()+60) * 1000000.0) # set the injected data into the future
field.value.dbl = 210.0
logger.debug("injecting %s for gpuId %d" % (str(field), gpuIds[0]))
ret = dcgm_agent_internal.dcgmInjectFieldValue(handle, gpuIds[0], field)
assert (ret == dcgm_structs.DCGM_ST_OK)
# wait for the the policy manager to call back
try:
callbackData = callbackQueue.get(timeout=POLICY_CALLBACK_TIMEOUT_SECS)
except queue.Empty:
assert False, "Callback never happened"
# check that the callback occurred with the correct arguments
assert(dcgm_structs.DCGM_POLICY_COND_POWER == callbackData.condition), \
("error callback was not for a Power Violation error, got: %s" % callbackData.condition)
assert(210 == callbackData.val.power.powerViolation), \
'Expected Power Violation at 210 but got %s' % callbackData.val.power.powerViolation
@test_utils.run_with_standalone_host_engine(40)
@test_utils.run_with_initialized_client()
@test_utils.run_with_injection_gpus()
def test_dcgm_policy_inject_powererror_standalone(handle, gpuIds):
helper_dcgm_policy_inject_powererror(handle, gpuIds)
@test_utils.run_with_embedded_host_engine()
@test_utils.run_with_injection_gpus()
def test_dcgm_policy_inject_powererror_embedded(handle, gpuIds):
helper_dcgm_policy_inject_powererror(handle, gpuIds)
def helper_dcgm_policy_inject_thermalerror(handle, gpuIds):
"""
Verifies that we can inject an error into the Thermal level and receive a callback
"""
newPolicy = dcgm_structs.c_dcgmPolicy_v1()
newPolicy.version = dcgm_structs.dcgmPolicy_version1
newPolicy.condition = dcgm_structs.DCGM_POLICY_COND_THERMAL
newPolicy.parms[dcgm_structs.DCGM_POLICY_COND_IDX_THERMAL].tag = 1
newPolicy.parms[dcgm_structs.DCGM_POLICY_COND_IDX_THERMAL].val.llval = 90
gpuId = gpuIds[0]
group = pydcgm.DcgmGroup(pydcgm.DcgmHandle(handle), groupName="test1", groupType=dcgm_structs.DCGM_GROUP_EMPTY)
group.AddGpu(gpuId)
group.policy.Set(newPolicy)
# the order of the callbacks will change once implementation is complete
callbackQueue = queue.Queue()
c_callback = create_c_callback(callbackQueue)
group.policy.Register(dcgm_structs.DCGM_POLICY_COND_THERMAL, c_callback, None)
# inject an error into Power Violation
field = dcgm_structs_internal.c_dcgmInjectFieldValue_v1()
field.version = dcgm_structs_internal.dcgmInjectFieldValue_version1
field.fieldId = dcgm_fields.DCGM_FI_DEV_GPU_TEMP
field.status = 0
field.fieldType = ord(dcgm_fields.DCGM_FT_INT64)
field.ts = int((time.time()+60) * 1000000.0) # set the injected data into the future
field.value.i64 = 95
logger.debug("injecting %s for gpuId %d" % (str(field), gpuIds[0]))
ret = dcgm_agent_internal.dcgmInjectFieldValue(handle, gpuIds[0], field)
assert (ret == dcgm_structs.DCGM_ST_OK)
# wait for the the policy manager to call back
try:
callbackData = callbackQueue.get(timeout=POLICY_CALLBACK_TIMEOUT_SECS)
except queue.Empty:
assert False, "Callback never happened"
# check that the callback occurred with the correct arguments
assert(dcgm_structs.DCGM_POLICY_COND_THERMAL == callbackData.condition), \
("error callback was not for a Thermal Violation error, got: %s" % callbackData.condition)
assert(95 == callbackData.val.power.powerViolation), \
'Expected Thermal Violation at 95 but got %s' % callbackData.val.power.powerViolation
@test_utils.run_with_standalone_host_engine(40)
@test_utils.run_with_initialized_client()
@test_utils.run_with_injection_gpus()
def test_dcgm_policy_inject_thermalerror_standalone(handle, gpuIds):
helper_dcgm_policy_inject_thermalerror(handle, gpuIds)
@test_utils.run_with_embedded_host_engine()
@test_utils.run_with_injection_gpus()
def test_dcgm_policy_inject_thermalerror_embedded(handle, gpuIds):
helper_dcgm_policy_inject_thermalerror(handle, gpuIds)
| DCGM-master | testing/python3/tests/test_policy.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from socket import socket, AF_INET, SOCK_DGRAM
from common.Struct import Struct
from dcgm_fluentd import DcgmFluentd
def test_send_to_fluentd():
# Can't create a proper closure in Python, so we create an object which acts
# as a closure
namespace = Struct(message=None, dest=None)
def mysendto(_message, _dest):
namespace.message = _message
namespace.dest = _dest
mysock = Struct(sendto=mysendto)
dr = DcgmFluentd('FAKE_HOST', 101010)
# Assert that we are sending over UDP
assert dr.m_sock.family == AF_INET
assert dr.m_sock.type == SOCK_DGRAM
dr.m_sock = mysock
dr.SendToFluentd('message')
assert(namespace.message == 'message')
assert(namespace.dest == ('FAKE_HOST', 101010))
def test_fluentd_custom_json_handler():
namespace = Struct(arg=None)
def MySendToFluentd(json):
namespace.arg = json # pylint: disable=no-member
dr = DcgmFluentd('FAKE_HOST', 101010)
dr.SendToFluentd = MySendToFluentd
dr.CustomJsonHandler('value')
assert namespace.arg == 'value' # pylint: disable=no-member
| DCGM-master | testing/python3/tests/test_dcgm_fluentd.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pydcgm
import dcgm_structs
import dcgm_agent
from dcgm_structs import dcgmExceptionClass
import test_utils
import logger
import os
import option_parser
import DcgmDiag
g_allValidations = [dcgm_structs.DCGM_POLICY_VALID_NONE, dcgm_structs.DCGM_POLICY_VALID_SV_SHORT,
dcgm_structs.DCGM_POLICY_VALID_SV_MED, dcgm_structs.DCGM_POLICY_VALID_SV_LONG,
dcgm_structs.DCGM_POLICY_VALID_SV_XLONG]
def helper_validate_action(groupObj):
if not option_parser.options.developer_mode:
validations = g_allValidations[0:0] #Just run short for non-developer
else:
validations = g_allValidations
for validation in validations:
if validation == dcgm_structs.DCGM_POLICY_VALID_NONE:
#This returns success unconditionally. Not worth checking
continue
response = groupObj.action.Validate(validation)
#Validate the contents
assert response.version == dcgm_structs.dcgmDiagResponse_version8, "Version mismatch. Expected %d. got %d" % \
(dcgm_structs.dcgmDiagResponse_version8, response.version)
@test_utils.run_with_embedded_host_engine()
@test_utils.run_only_with_live_gpus()
@test_utils.for_all_same_sku_gpus()
@test_utils.run_only_as_root()
@test_utils.run_with_max_power_limit_set()
def test_dcgm_action_validate_embedded(handle, gpuIds):
handleObj = pydcgm.DcgmHandle(handle=handle)
systemObj = handleObj.GetSystem()
groupObj = systemObj.GetGroupWithGpuIds('actiongroup', gpuIds)
helper_validate_action(groupObj)
@test_utils.run_with_standalone_host_engine(20)
@test_utils.run_with_initialized_client()
@test_utils.run_only_with_live_gpus()
@test_utils.for_all_same_sku_gpus()
def test_dcgm_action_validate_remote(handle, gpuIds):
handleObj = pydcgm.DcgmHandle(handle=handle)
systemObj = handleObj.GetSystem()
groupObj = systemObj.GetGroupWithGpuIds('actiongroup', gpuIds)
helper_validate_action(groupObj)
g_allDiagLevels = [dcgm_structs.DCGM_DIAG_LVL_SHORT,
dcgm_structs.DCGM_DIAG_LVL_MED,
dcgm_structs.DCGM_DIAG_LVL_LONG,
dcgm_structs.DCGM_DIAG_LVL_XLONG]
def helper_validate_run_diag(groupObj):
if not option_parser.options.developer_mode:
diagLevels = g_allDiagLevels[0:0] #Just run short for non-developer
else:
diagLevels = g_allDiagLevels
for diagLevel in diagLevels:
logger.info("Running diag level %d. This may take minutes." % diagLevel)
response = groupObj.action.RunDiagnostic(diagLevel)
#Validate the contents
assert response.version == dcgm_structs.dcgmDiagResponse_version8, "Version mismatch. Expected %d. got %d" % \
(dcgm_structs.dcgmDiagResponse_version8, response.version)
@test_utils.run_with_embedded_host_engine()
@test_utils.run_only_with_live_gpus()
@test_utils.for_all_same_sku_gpus()
@test_utils.run_only_as_root()
@test_utils.run_with_max_power_limit_set()
def test_dcgm_action_run_diag_embedded(handle, gpuIds):
handleObj = pydcgm.DcgmHandle(handle=handle)
systemObj = handleObj.GetSystem()
groupObj = systemObj.GetGroupWithGpuIds('actiongroup', gpuIds)
helper_validate_run_diag(groupObj)
@test_utils.run_with_standalone_host_engine(20)
@test_utils.run_with_initialized_client()
@test_utils.run_only_with_live_gpus()
@test_utils.for_all_same_sku_gpus()
@test_utils.run_only_as_root()
@test_utils.run_with_max_power_limit_set()
def test_dcgm_action_run_diag_remote(handle, gpuIds):
handleObj = pydcgm.DcgmHandle(handle=handle)
systemObj = handleObj.GetSystem()
groupObj = systemObj.GetGroupWithGpuIds('actiongroup', gpuIds)
helper_validate_run_diag(groupObj)
def helper_dcgm_action_run_diag_gpu_list(handle, gpuIds):
'''
Test that running the DCGM diagnostic works if you provide a GPU ID list rather
than a groupId.
'''
gpuIdStr = ""
for i, gpuId in enumerate(gpuIds):
if i > 0:
gpuIdStr += ","
gpuIdStr += str(gpuId)
drd = dcgm_structs.c_dcgmRunDiag_t()
drd.version = dcgm_structs.dcgmRunDiag_version
drd.validate = dcgm_structs.DCGM_POLICY_VALID_SV_SHORT
drd.groupId = 0 #Initializing to 0 in case the constructor above doesn't
drd.gpuList = gpuIdStr
#this will throw an exception on error
response = test_utils.action_validate_wrapper(drd, handle, runDiagVersion=dcgm_structs.dcgmRunDiag_version)
@test_utils.run_with_embedded_host_engine()
@test_utils.run_only_with_live_gpus()
@test_utils.for_all_same_sku_gpus()
@test_utils.run_only_if_mig_is_disabled()
def test_dcgm_action_run_diag_gpu_list_embedded(handle, gpuIds):
helper_dcgm_action_run_diag_gpu_list(handle, gpuIds)
@test_utils.run_with_standalone_host_engine(20)
@test_utils.run_with_initialized_client()
@test_utils.run_only_with_live_gpus()
@test_utils.for_all_same_sku_gpus()
@test_utils.run_only_if_mig_is_disabled()
def test_dcgm_action_run_diag_gpu_list_standalone(handle, gpuIds):
helper_dcgm_action_run_diag_gpu_list(handle, gpuIds)
@test_utils.run_with_embedded_host_engine()
@test_utils.run_only_with_live_gpus()
@test_utils.for_all_same_sku_gpus()
def test_dcgm_action_run_diag_bad_validation(handle, gpuIds):
gpuIdStr = ""
for i, gpuId in enumerate(gpuIds):
if i > 0:
gpuIdStr += ","
gpuIdStr += str(gpuId)
drd = dcgm_structs.c_dcgmRunDiag_t()
drd.version = dcgm_structs.dcgmRunDiag_version
drd.validate = dcgm_structs.DCGM_POLICY_VALID_SV_XLONG + 1 #use an invalid value
drd.groupId = 0 #Initializing to 0 in case the constructor above doesn't
drd.gpuList = gpuIdStr
with test_utils.assert_raises(dcgm_structs.dcgmExceptionClass(dcgm_structs.DCGM_ST_BADPARAM)):
response = test_utils.action_validate_wrapper(drd, handle, runDiagVersion=dcgm_structs.dcgmRunDiag_version)
| DCGM-master | testing/python3/tests/test_action.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import test_utils
import dcgm_field_injection_helpers
import dcgm_agent_internal
import dcgm_structs_internal
import dcgm_agent
import dcgm_fields
import dcgm_structs
import dcgm_errors
import DcgmHandle
import subprocess
import time
import logger
import dcgmvalue
import pydcgm
import dcgm_field_helpers
'''
Attemps to create fake GPU instances
handle - the handle open to the hostengine
gpuId - the ID of the GPU we want to add instances to
instanceCount - the number of instances requested
Returns a map of fake GPU instances: fake GPU instance ID -> GPU ID
'''
def create_fake_gpu_instances(handle, gpuId, instanceCount):
cfe = dcgm_structs_internal.c_dcgmCreateFakeEntities_v2()
cfe.numToCreate = 0
fakeInstanceMap = {}
if instanceCount > 0:
for i in range(0, instanceCount):
cfe.entityList[cfe.numToCreate].parent.entityGroupId = dcgm_fields.DCGM_FE_GPU
cfe.entityList[cfe.numToCreate].parent.entityId = gpuId
cfe.entityList[cfe.numToCreate].entity.entityGroupId = dcgm_fields.DCGM_FE_GPU_I
cfe.numToCreate += 1
# Create the instances first so we can control which GPU the compute instances are placed on
updated = dcgm_agent_internal.dcgmCreateFakeEntities(handle, cfe)
for i in range(0, updated.numToCreate):
if updated.entityList[i].entity.entityGroupId == dcgm_fields.DCGM_FE_GPU_I:
fakeInstanceMap[updated.entityList[i].entity.entityId] = updated.entityList[i].parent.entityId
return fakeInstanceMap
'''
Attemps to create fake compute instances
handle - the handle open to the hostengine
parentIds - a list of GPU instance IDs on which to place the fake compute instances
ciCount - the number of compute instances requested
Returns a map of fake compute instances: fake compute instance ID -> GPU instance ID
'''
def create_fake_compute_instances(handle, parentIds, ciCount):
fakeCIMap = {}
if ciCount > 0:
cfe = dcgm_structs_internal.c_dcgmCreateFakeEntities_v2()
instanceIndex = 0
for i in range(0, ciCount):
cfe.entityList[cfe.numToCreate].parent.entityGroupId = dcgm_fields.DCGM_FE_GPU_I
if instanceIndex > len(parentIds):
instanceIndex = 0
cfe.entityList[cfe.numToCreate].parent.entityId = parentIds[instanceIndex]
instanceIndex = instanceIndex + 1
cfe.entityList[cfe.numToCreate].entity.entityGroupId = dcgm_fields.DCGM_FE_GPU_CI
cfe.numToCreate += 1
updated = dcgm_agent_internal.dcgmCreateFakeEntities(handle, cfe)
for i in range(0, updated.numToCreate):
if updated.entityList[i].entity.entityGroupId == dcgm_fields.DCGM_FE_GPU_CI:
fakeCIMap[updated.entityList[i].entity.entityId] = updated.entityList[i].parent.entityId
return fakeCIMap
'''
Creates fake GPU instances and compute instances if needed to ensure we have the specified number of each
on a specific GPU. It checks the amount of MIG devices that currently exist on that GPU and creates fake
MIG devices to make up the difference, if needed.
handle - the handle open to the hostengine
gpuId - the GPU that needs to have the specified numbers of GPU instances and compute instances
minInstances - the minimum number of GPU instances that need to be on the specified GPU
minCIs - the minimum number of compute instances that need to be on the specified GPU
Returns a tuple that contains a map of GPU instances to their parent GPU IDs and a map of compute instances
to their parent GPU instance IDs.
'''
def ensure_instance_ids(handle, gpuId, minInstances, minCIs):
instanceMap = {}
ciMap = {}
legalInstances = []
legalGpu = False
hierarchy = dcgm_agent.dcgmGetGpuInstanceHierarchy(handle)
for i in range(0, hierarchy.count):
entity = hierarchy.entityList[i]
if entity.entity.entityGroupId == dcgm_fields.DCGM_FE_GPU_I:
if entity.parent.entityId == gpuId:
legalGpu = True
instanceMap[entity.entity.entityId] = entity.parent.entityId
else:
legalGpu = False
elif entity.entity.entityGroupId == dcgm_fields.DCGM_FE_GPU_CI and legalGpu:
ciMap[entity.entity.entityId] = entity.parent.entityId
legalInstances.append(entity.parent.entityId)
if hierarchy.count == 0:
logger.info("There were no MIG instances configured on this host")
instancesNeeded = minInstances - len(instanceMap)
cisNeeded = minCIs - len(ciMap)
fakeInstanceMap = create_fake_gpu_instances(handle, gpuId, instancesNeeded)
for fakeInstance in fakeInstanceMap:
legalInstances.append(fakeInstance)
instanceMap.update(fakeInstanceMap)
fakeCIMap = create_fake_compute_instances(handle, legalInstances, cisNeeded)
ciMap.update(fakeCIMap)
return instanceMap, ciMap
@test_utils.run_with_embedded_host_engine()
@test_utils.run_with_injection_gpus(gpuCount=8)
def test_instances_large_mig_topology_getlatestvalues_v2(handle, gpuIds):
dcgmHandle = pydcgm.DcgmHandle(handle)
dcgmSystem = dcgmHandle.GetSystem()
instanceIds = []
computeInstanceIds = []
for gpuId in gpuIds:
gpuInstances, gpuCis = ensure_instance_ids(handle, gpuId, 8, 8)
instanceIds.extend(gpuInstances.keys())
computeInstanceIds.extend(gpuCis.keys())
logger.debug("Got gpuInstances: " + str(instanceIds))
logger.debug("Got computeInstanceIds: " + str(computeInstanceIds))
fieldId = dcgm_fields.DCGM_FI_PROF_GR_ENGINE_ACTIVE
#Build up a list of up the max entity group size
entities = []
expectedValues = {dcgm_fields.DCGM_FE_GPU : {},
dcgm_fields.DCGM_FE_GPU_I : {},
dcgm_fields.DCGM_FE_GPU_CI : {}}
value = 0.0
for gpuId in gpuIds:
entityPair = dcgm_structs.c_dcgmGroupEntityPair_t()
entityPair.entityGroupId = dcgm_fields.DCGM_FE_GPU
entityPair.entityId = gpuId
entities.append(entityPair)
expectedValues[entityPair.entityGroupId][entityPair.entityId] = {fieldId : value}
value += 0.01
for instanceId in instanceIds:
entityPair = dcgm_structs.c_dcgmGroupEntityPair_t()
entityPair.entityGroupId = dcgm_fields.DCGM_FE_GPU_I
entityPair.entityId = instanceId
entities.append(entityPair)
expectedValues[entityPair.entityGroupId][entityPair.entityId] = {fieldId : value}
value += 0.01
for ciId in computeInstanceIds:
entityPair = dcgm_structs.c_dcgmGroupEntityPair_t()
entityPair.entityGroupId = dcgm_fields.DCGM_FE_GPU_CI
entityPair.entityId = ciId
entities.append(entityPair)
expectedValues[entityPair.entityGroupId][entityPair.entityId] = {fieldId : value}
value += 0.01
#Truncate the group to the max size
if len(entities) > dcgm_structs.DCGM_GROUP_MAX_ENTITIES:
entities = entities[:dcgm_structs.DCGM_GROUP_MAX_ENTITIES]
dcgmGroup = dcgmSystem.GetGroupWithEntities("biggroup", entities)
dcgmFieldGroup = pydcgm.DcgmFieldGroup(dcgmHandle, "myfields", [fieldId, ])
#inject a known value for every entity
offset = 5
for entityPair in entities:
value = expectedValues[entityPair.entityGroupId][entityPair.entityId][fieldId]
dcgm_field_injection_helpers.inject_value(handle, entityPair.entityId, fieldId,
value, offset, verifyInsertion=True,
entityType=entityPair.entityGroupId)
dfvc = dcgm_field_helpers.DcgmFieldValueCollection(handle, dcgmGroup._groupId)
dfvc.GetLatestValues_v2(dcgmFieldGroup)
assert dfvc._numValuesSeen == len(entities), "%d != %d" % (dfvc._numValuesSeen, len(entities))
for entityPair in entities:
expectedValue = expectedValues[entityPair.entityGroupId][entityPair.entityId][fieldId]
timeSeries = dfvc.entityValues[entityPair.entityGroupId][entityPair.entityId][fieldId]
assert len(timeSeries) == 1, "%d != 1" % len(timeSeries)
readValue = timeSeries.values[0].value
assert expectedValue == readValue, "%s != %s" % (str(expectedValue), str(readValue))
@test_utils.run_with_embedded_host_engine()
@test_utils.run_with_injection_gpus(gpuCount=1)
def test_instances_fetch_global_fields(handle, gpuIds):
dcgmHandle = pydcgm.DcgmHandle(handle)
dcgmSystem = dcgmHandle.GetSystem()
instanceIds = []
computeInstanceIds = []
for gpuId in gpuIds:
gpuInstances, gpuCis = ensure_instance_ids(handle, gpuId, 1, 1)
instanceIds.extend(gpuInstances.keys())
computeInstanceIds.extend(gpuCis.keys())
logger.debug("Got gpuInstances: " + str(instanceIds))
logger.debug("Got computeInstanceIds: " + str(computeInstanceIds))
fieldId = dcgm_fields.DCGM_FI_DEV_COUNT
#Build up a list of up the max entity group size
entities = []
value = 0.0
for gpuId in gpuIds:
entityPair = dcgm_structs.c_dcgmGroupEntityPair_t()
entityPair.entityGroupId = dcgm_fields.DCGM_FE_GPU
entityPair.entityId = gpuId
entities.append(entityPair)
for instanceId in instanceIds:
entityPair = dcgm_structs.c_dcgmGroupEntityPair_t()
entityPair.entityGroupId = dcgm_fields.DCGM_FE_GPU_I
entityPair.entityId = instanceId
entities.append(entityPair)
for ciId in computeInstanceIds:
entityPair = dcgm_structs.c_dcgmGroupEntityPair_t()
entityPair.entityGroupId = dcgm_fields.DCGM_FE_GPU_CI
entityPair.entityId = ciId
entities.append(entityPair)
#Truncate the group to the max size
if len(entities) > dcgm_structs.DCGM_GROUP_MAX_ENTITIES:
entities = entities[:dcgm_structs.DCGM_GROUP_MAX_ENTITIES]
logger.debug("entities: %s" % str(entities))
dcgmGroup = dcgmSystem.GetGroupWithEntities("biggroup", entities)
dcgmFieldGroup = pydcgm.DcgmFieldGroup(dcgmHandle, "myfields", [fieldId, ])
#inject the global value
injectedValue = len(gpuIds)
entityId = 0 # Globals ignore entityId
offset = 0
entityGroupId = dcgm_fields.DCGM_FE_NONE
dcgm_field_injection_helpers.inject_value(handle, entityId, fieldId,
injectedValue, offset, verifyInsertion=True,
entityType=entityGroupId)
dfvc = dcgm_field_helpers.DcgmFieldValueCollection(handle, dcgmGroup._groupId)
dfvc.GetLatestValues_v2(dcgmFieldGroup)
assert dfvc._numValuesSeen == len(entities), "%d != %d" % (dfvc._numValuesSeen, len(entities))
#Make sure we don't get any unexpected entities
for entityGroupId, entityGroupList in dfvc.entityValues.items():
entityPair = dcgm_structs.c_dcgmGroupEntityPair_t()
entityPair.entityGroupId = entityGroupId
for entityId, entityTs in entityGroupList.items():
entityPair.entityId = entityId
assert entityPair in entities, "Unexpected eg %d, eid %d in returned data" % (entityGroupId, entityId)
#Make sure we get expected entities
for entityPair in entities:
assert entityPair.entityGroupId in dfvc.entityValues, "dfvc.entityValues missing entity group %d. has %s" % (entityPair.entityGroupId, str(dfvc.entityValues.keys()))
assert entityPair.entityId in dfvc.entityValues[entityPair.entityGroupId], "dfvc.entityValues[%d] missing entityId %d. has %s" % (entityPair.entityGroupId, entityPair.entityId, str(dfvc.entityValues[entityPair.entityGroupId].keys()))
timeSeries = dfvc.entityValues[entityPair.entityGroupId][entityPair.entityId][fieldId]
assert len(timeSeries) == 1, "%d != 1" % len(timeSeries)
readValue = timeSeries.values[0].value
assert injectedValue == readValue, "injectedValue %d != readValue %d" % (injectedValue, readValue)
def helper_test_inject_instance_fields(handle, gpuIds):
instances, cis = ensure_instance_ids(handle, gpuIds[0], 1, 1)
firstInstanceId = list(instances.keys())[0]
lastCIId = list(cis.keys())[0]
# Set up the watches on these groups
groupId = dcgm_agent.dcgmGroupCreate(handle, dcgm_structs.DCGM_GROUP_EMPTY, 'tien')
fieldGroupId = dcgm_agent.dcgmFieldGroupCreate(handle, [dcgm_fields.DCGM_FI_DEV_ECC_DBE_VOL_TOTAL], 'kal')
dcgm_agent.dcgmGroupAddEntity(handle, groupId, dcgm_fields.DCGM_FE_GPU, gpuIds[0])
dcgm_agent.dcgmGroupAddEntity(handle, groupId, dcgm_fields.DCGM_FE_GPU_I, firstInstanceId)
dcgm_agent.dcgmGroupAddEntity(handle, groupId, dcgm_fields.DCGM_FE_GPU_CI, lastCIId)
dcgm_agent.dcgmWatchFields(handle, groupId, fieldGroupId, 1, 100, 100)
dcgm_field_injection_helpers.inject_value(handle, gpuIds[0], dcgm_fields.DCGM_FI_DEV_ECC_DBE_VOL_TOTAL,
2, 5, verifyInsertion=True,
entityType=dcgm_fields.DCGM_FE_GPU, repeatCount=5)
# Read the values to make sure they were stored properly
entities = [dcgm_structs.c_dcgmGroupEntityPair_t(), dcgm_structs.c_dcgmGroupEntityPair_t(),
dcgm_structs.c_dcgmGroupEntityPair_t()]
entities[0].entityGroupId = dcgm_fields.DCGM_FE_GPU_I
entities[0].entityId = firstInstanceId
entities[1].entityGroupId = dcgm_fields.DCGM_FE_GPU_CI
entities[1].entityId = lastCIId
entities[2].entityGroupId = dcgm_fields.DCGM_FE_GPU
entities[2].entityId = gpuIds[0]
fieldIds = [dcgm_fields.DCGM_FI_DEV_ECC_DBE_VOL_TOTAL]
values = dcgm_agent.dcgmEntitiesGetLatestValues(handle, entities, fieldIds, 0)
for v in values:
if v.entityGroupId == dcgm_fields.DCGM_FE_GPU:
assert v.value.i64 == 2, "Failed to inject value 2 for entity %u from group %u" % (
v.entityId, v.entityGroupId)
else:
from dcgm_structs import DCGM_ST_NO_DATA
assert (v.status == DCGM_ST_NO_DATA), "Injected meaningless value %u for entity %u from group %u" % (
v.value.i64, v.entityId, v.entityGroupId)
@test_utils.run_with_standalone_host_engine(240)
@test_utils.run_with_initialized_client()
@test_utils.run_with_injection_gpus()
def test_inject_instance_fields_standalone(handle, gpuIds):
helper_test_inject_instance_fields(handle, gpuIds)
@test_utils.run_with_embedded_host_engine()
@test_utils.run_with_injection_gpus()
def test_inject_instance_fields_embedded(handle, gpuIds):
helper_test_inject_instance_fields(handle, gpuIds)
def verify_fake_profile_names(handle, fakeEntities, isGpuInstance):
fieldIds = [dcgm_fields.DCGM_FI_DEV_NAME]
entities = []
for entityId in fakeEntities:
entity = dcgm_structs.c_dcgmGroupEntityPair_t()
if isGpuInstance:
entity.entityGroupId = dcgm_fields.DCGM_FE_GPU_I
else:
entity.entityGroupId = dcgm_fields.DCGM_FE_GPU_CI
entity.entityId = entityId
entities.append(entity)
values = dcgm_agent.dcgmEntitiesGetLatestValues(handle, entities, fieldIds, dcgm_structs.DCGM_FV_FLAG_LIVE_DATA)
if isGpuInstance:
expectedFakeName = "1fg.4gb"
else:
expectedFakeName = "1fc.1g.4gb"
for v in values:
assert v.value.str == expectedFakeName, "Fake profile name appears to be wrong. Expected '%s', found '%s'" % (
expectedFakeName, v.value.str)
def verify_profile_names_exist(handle, migEntityList, isGpuInstance):
fieldIds = [dcgm_fields.DCGM_FI_DEV_NAME]
entities = []
for entityId in migEntityList:
entity = dcgm_structs.c_dcgmGroupEntityPair_t()
if isGpuInstance:
entity.entityGroupId = dcgm_fields.DCGM_FE_GPU_I
else:
entity.entityGroupId = dcgm_fields.DCGM_FE_GPU_CI
entity.entityId = entityId
entities.append(entity)
values = dcgm_agent.dcgmEntitiesGetLatestValues(handle, entities, fieldIds, dcgm_structs.DCGM_FV_FLAG_LIVE_DATA)
for v in values:
assert len(v.value.str) and v.value.str != dcgmvalue.DCGM_STR_BLANK, \
"Expected a non-empty profile name, but found '%s'" % (v.value.str)
def helper_test_fake_mig_device_profile_names(handle, gpuIds):
fakeInstanceMap = {}
for gpuId in gpuIds:
tmpMap = create_fake_gpu_instances(handle, gpuId, 1)
fakeInstanceMap.update(tmpMap)
fakeCIMap = create_fake_compute_instances(handle, list(fakeInstanceMap.keys()), len(fakeInstanceMap))
verify_fake_profile_names(handle, list(fakeInstanceMap.keys()), True)
verify_fake_profile_names(handle, list(fakeCIMap.keys()), False)
@test_utils.run_with_standalone_host_engine(120)
@test_utils.run_with_initialized_client()
@test_utils.run_with_injection_gpus()
def test_fake_mig_device_profile_names_standalone(handle, gpuIds):
helper_test_fake_mig_device_profile_names(handle, gpuIds)
@test_utils.run_with_embedded_host_engine()
@test_utils.run_with_injection_gpus()
def test_fake_mig_device_profile_names_embedded(handle, gpuIds):
helper_test_fake_mig_device_profile_names(handle, gpuIds)
def helper_test_health_check_instances(handle, gpuIds):
instances, cis = ensure_instance_ids(handle, gpuIds[0], 1, 1)
instanceId = list(instances.keys())[0]
ciId = list(cis.keys())[0]
handleObj = DcgmHandle.DcgmHandle(handle=handle)
systemObj = handleObj.GetSystem()
groupObj = systemObj.GetEmptyGroup("test1")
groupObj.AddEntity(dcgm_fields.DCGM_FE_GPU, gpuIds[0])
groupObj.AddEntity(dcgm_fields.DCGM_FE_GPU_I, instanceId)
groupObj.AddEntity(dcgm_fields.DCGM_FE_GPU_CI, ciId)
newSystems = dcgm_structs.DCGM_HEALTH_WATCH_MEM
groupObj.health.Set(newSystems)
# Verify health prior to testing
responseV4 = groupObj.health.Check(dcgm_structs.dcgmHealthResponse_version4)
if responseV4.incidentCount != 0:
test_utils.skip_test("Cannot test on unhealthy systems.")
# Inject one error per system
dcgm_field_injection_helpers.inject_value(handle, gpuIds[0], dcgm_fields.DCGM_FI_DEV_ECC_DBE_VOL_TOTAL,
2, 5, verifyInsertion=True,
entityType=dcgm_fields.DCGM_FE_GPU, repeatCount=5)
responseV4 = groupObj.health.Check(dcgm_structs.dcgmHealthResponse_version4)
assert (responseV4.incidentCount == 1), "Should have 1 total incidents but found %d" % responseV4.incidentCount
assert (responseV4.incidents[0].entityInfo.entityId == gpuIds[0])
assert (responseV4.incidents[0].entityInfo.entityGroupId == dcgm_fields.DCGM_FE_GPU)
assert (responseV4.incidents[0].error.code == dcgm_errors.DCGM_FR_VOLATILE_DBE_DETECTED)
assert (responseV4.incidents[0].system == dcgm_structs.DCGM_HEALTH_WATCH_MEM)
assert (responseV4.incidents[0].health == dcgm_structs.DCGM_HEALTH_RESULT_FAIL)
@test_utils.run_with_standalone_host_engine(120)
@test_utils.run_with_initialized_client()
@test_utils.run_with_injection_gpus()
def test_health_check_instances_standalone(handle, gpuIds):
helper_test_health_check_instances(handle, gpuIds)
@test_utils.run_with_embedded_host_engine()
@test_utils.run_with_injection_gpus()
def test_health_check_instances_embedded(handle, gpuIds):
helper_test_health_check_instances(handle, gpuIds)
def populate_counts_per_gpu(hierarchy):
gpuInstances = {}
gpuCIIds = {}
gpus = {}
# Get counts for each GPU
for i in range(0, hierarchy.count):
entity = hierarchy.entityList[i]
if entity.entity.entityGroupId == dcgm_fields.DCGM_FE_GPU_I:
if entity.parent.entityId not in gpuInstances:
gpuInstances[entity.parent.entityId] = []
gpuInstances[entity.parent.entityId].append(entity.entity.entityId)
elif entity.entity.entityGroupId == dcgm_fields.DCGM_FE_GPU_CI:
for key in gpuInstances:
if entity.parent.entityId in gpuInstances[key]:
if key not in gpuCIIds:
gpuCIIds[key] = []
gpuCIIds[key].append(entity.entity.entityId)
if entity.info.nvmlGpuIndex not in gpus:
gpus[entity.info.nvmlGpuIndex] = []
gpus[entity.info.nvmlGpuIndex].append(entity.entity.entityId)
return gpus, gpuInstances, gpuCIIds
class ExpectedValues(object):
def __init__(self, instanceCount=0, ciCount=0):
self.instanceCount = instanceCount
self.ciCount = ciCount
self.verified = False
def create_small_mig_objects(handle, gpuIds, numToCreate):
numInstancesCreated = 0
for gpuId in gpuIds:
try:
dcgm_agent.dcgmCreateMigEntity(handle, gpuId, dcgm_structs.DcgmMigProfileGpuInstanceSlice1, dcgm_structs.DcgmMigCreateGpuInstance, 0)
numInstancesCreated = numInstancesCreated + 1
if numInstancesCreated >= numToCreate:
break
except:
# There may not be space; ignore this.
continue
return numInstancesCreated
def verifyMigUpdates(handle, oGpuInstances, oGpuCIIds, numInstancesCreated, numCIsCreated, retries=19):
newGpuInstances = []
newComputeInstances = []
if numInstancesCreated == 0 and numCIsCreated == 0:
return newGpuInstances, newComputeInstances, ''
errMsg = ''
while retries >= 0:
newGpuInstances = []
newComputeInstances = []
hierarchy = dcgm_agent.dcgmGetGpuInstanceHierarchy(handle)
_, gpuInstances, gpuCIIds = populate_counts_per_gpu(hierarchy)
# Add any new instances to the map
for key in gpuInstances:
if key in oGpuInstances:
# Compare lists
for instanceId in gpuInstances[key]:
if instanceId not in oGpuInstances[key]:
newGpuInstances.append(instanceId)
else:
# Add the entire list to the new instances
for instanceId in gpuInstances[key]:
newGpuInstances.append(instanceId)
# Add any new compute instances to the map
for key in gpuCIIds:
if key in oGpuCIIds:
# Compare lists
for ciId in gpuCIIds[key]:
if ciId not in oGpuCIIds[key]:
newComputeInstances.append(ciId)
else:
# Add the entire list to the new compute instances
for ciId in gpuCIIds[key]:
newComputeInstances.append(ciId)
if len(newGpuInstances) >= numInstancesCreated and len(newComputeInstances) >= numCIsCreated:
errMsg = ''
break
elif len(newGpuInstances) < numInstancesCreated and len(newComputeInstances) < numCIsCreated:
errMsg = 'Expected %d new GPU instances and %d new compute instances but only found %d and %d' % \
(numInstancesCreated, numCIsCreated, len(newGpuInstances), len(newComputeInstances))
elif len(newGpuInstances) < numInstancesCreated:
errMsg = "Expected %d new GPU instances but only found %d" % (numInstancesCreated, len(newGpuInstances))
else:
errMsg = "Expected %d new compute instances but only found %d" % (numCIsCreated, len(newComputeInstances))
retries = retries - 1
time.sleep(1)
return newGpuInstances, newComputeInstances, errMsg
def verify_entries_are_deleted(deletedMap, detectedMap):
stillHere = []
for key in deletedMap:
for gpu in detectedMap:
if key in detectedMap[gpu]:
stillHere.append(key)
return stillHere
def delete_gpu_instances(handle, newGpuInstances, flags):
for instanceId in newGpuInstances[:-1]:
dcgm_agent.dcgmDeleteMigEntity(handle, dcgm_fields.DCGM_FE_GPU_I, instanceId, flags)
dcgm_agent.dcgmDeleteMigEntity(handle, dcgm_fields.DCGM_FE_GPU_I, newGpuInstances[-1], 0)
def delete_gpu_instances_no_fail(handle, newGpuInstances, flags):
try:
delete_gpu_instances(handle, newGpuInstances, flags)
except:
pass
def create_mig_entities_and_verify(handle, gpuIds, instanceCreateCount, minInstanceCreateCount):
# get mig hierarchy
hierarchy = dcgm_agent.dcgmGetGpuInstanceHierarchy(handle)
oGpus, oGpuInstances, oGpuCIIds = populate_counts_per_gpu(hierarchy)
numInstancesCreated = create_small_mig_objects(handle, gpuIds, 3)
if numInstancesCreated < minInstanceCreateCount:
test_utils.skip_test("Cannot create any GPU instances, skipping test.")
# Make sure the new instances appear
newGpuInstances, newComputeInstances, errMsg = verifyMigUpdates(handle, oGpuInstances, oGpuCIIds, numInstancesCreated, 0)
assert errMsg == '', errMsg
# Create new compute instances
flags = dcgm_structs.DCGM_MIG_RECONFIG_DELAY_PROCESSING
numCIsCreated = 0
try:
for instanceId in newGpuInstances[:-1]:
dcgm_agent.dcgmCreateMigEntity(handle, instanceId, dcgm_structs.DcgmMigProfileComputeInstanceSlice1, \
dcgm_structs.DcgmMigCreateComputeInstance, flags)
numCIsCreated = numCIsCreated + 1
# For the last one, send a flag to ask hostengine to process the reconfiguring
dcgm_agent.dcgmCreateMigEntity(handle, newGpuInstances[-1], dcgm_structs.DcgmMigProfileComputeInstanceSlice1, \
dcgm_structs.DcgmMigCreateComputeInstance, 0)
numCIsCreated = numCIsCreated + 1
except dcgm_structs.dcgmExceptionClass(dcgm_structs.DCGM_ST_INSUFFICIENT_RESOURCES) as e:
delete_gpu_instances_no_fail(handle, newGpuInstances, flags)
test_utils.skip_test("Insufficient resources to run this test")
# Verify the new compute instances have appeared
newGpuInstances, newComputeInstances, errMsg = verifyMigUpdates(handle, oGpuInstances, oGpuCIIds, numInstancesCreated, numCIsCreated)
if errMsg != '':
delete_gpu_instances_no_fail(handle, newGpuInstances, flags)
assert errMsg == '', errMsg
return oGpus, newGpuInstances, newComputeInstances
def delete_compute_instances_and_verify(handle, newComputeInstances):
errMsg = ''
flags = dcgm_structs.DCGM_MIG_RECONFIG_DELAY_PROCESSING
# Delete the new instances
for ciId in newComputeInstances[:-1]:
dcgm_agent.dcgmDeleteMigEntity(handle, dcgm_fields.DCGM_FE_GPU_CI, ciId, flags)
# don't block processing the reconfigure with the last one
dcgm_agent.dcgmDeleteMigEntity(handle, dcgm_fields.DCGM_FE_GPU_CI, newComputeInstances[-1], 0)
# verify that the compute instances disappear
retries = 20
cisStillHere = newComputeInstances
while retries > 0:
hierarchy = dcgm_agent.dcgmGetGpuInstanceHierarchy(handle)
_, gpuInstances, gpuCIIds = populate_counts_per_gpu(hierarchy)
retries = retries - 1
updated = verify_entries_are_deleted(cisStillHere, gpuCIIds)
if len(updated) == 0:
errMsg = ''
break
else:
errMsg = "Compute instances '"
for item in updated:
errMsg = "%s %s" % (errMsg, item)
errMsg = "%s' were not deleted successfully" % errMsg
cisStillHere = updated
time.sleep(1)
return errMsg
def delete_gpu_instances_and_verify(handle, newGpuInstances):
errMsg = ''
flags = dcgm_structs.DCGM_MIG_RECONFIG_DELAY_PROCESSING
delete_gpu_instances(handle, newGpuInstances, flags)
retries = 20
gpuInstancesStillHere = newGpuInstances
while retries > 0:
hierarchy = dcgm_agent.dcgmGetGpuInstanceHierarchy(handle)
_, gpuInstances, gpuCIIds = populate_counts_per_gpu(hierarchy)
retries = retries - 1
updated = verify_entries_are_deleted(gpuInstancesStillHere, gpuInstances)
if len(updated) == 0:
errMsg = ''
break
else:
errMsg = "GPU instances '"
for item in updated:
errMsg = "%s %s" % (errMsg, item)
errMsg = "%s' were not deleted successfully" % errMsg
gpuInstancesStillHere = updated
time.sleep(1)
return errMsg
def helper_test_mig_reconfigure(handle, gpuIds):
_, newGpuInstances, newComputeInstances = create_mig_entities_and_verify(handle, gpuIds, 3, 1)
verify_profile_names_exist(handle, newComputeInstances, False)
ciFailMsg = delete_compute_instances_and_verify(handle, newComputeInstances)
# Save this and attempt to cleanup the rest even though we failed here
if ciFailMsg != '':
logger.warning("The compute instances didn't clean up correctly, but we'll attempt to clean up the GPU instances anyway")
instanceFailMsg = delete_gpu_instances_and_verify(handle, newGpuInstances)
assert ciFailMsg == '', ciFailMsg
assert instanceFailMsg == '', instanceFailMsg
@test_utils.run_with_standalone_host_engine(120)
@test_utils.run_with_initialized_client()
@test_utils.run_only_with_live_gpus()
@test_utils.run_only_if_mig_is_enabled()
@test_utils.run_only_as_root()
def test_mig_reconfigure_standalone(handle, gpuIds):
helper_test_mig_reconfigure(handle, gpuIds)
@test_utils.run_with_embedded_host_engine()
@test_utils.run_only_with_live_gpus()
@test_utils.run_only_if_mig_is_enabled()
@test_utils.run_only_as_root()
def test_mig_reconfigure_embedded(handle, gpuIds):
helper_test_mig_reconfigure(handle, gpuIds)
def helper_test_mig_cuda_visible_devices_string(handle, gpuIds):
hierarchy = dcgm_agent.dcgmGetGpuInstanceHierarchy(handle)
gpuPartOfTest = False
for i in range(0, hierarchy.count):
entity = hierarchy.entityList[i]
isInstance = False
if entity.entity.entityGroupId == dcgm_fields.DCGM_FE_GPU_I:
gpuPartOfTest = entity.parent.entityId in gpuIds
isInstance = True
if gpuPartOfTest:
cuda_vis = test_utils.get_cuda_visible_devices_str(handle, entity.entity.entityGroupId, entity.entity.entityId)
assert cuda_vis[:4] == 'MIG-', "Expected the CUDA_VISIBLE_DEVICES string to start with 'MIG-', but found '%s" % (cuda_vis)
firstSlashIndex = cuda_vis.find('/')
assert firstSlashIndex != -1, "Expected to find '/' in CUDA_VISIBLE_DEVICES, but didn't: '%s'" % (cuda_vis)
if not isInstance:
secondSlashIndex = cuda_vis.find('/', firstSlashIndex+1)
assert secondSlashIndex != -1, "Expected to find two '/' marks in CUDA_VISIBLE_DEVICES, but didn't: '%s'" % (cuda_vis)
@test_utils.run_with_embedded_host_engine()
@test_utils.run_only_with_live_gpus()
@test_utils.run_only_if_mig_is_enabled()
@test_utils.run_only_as_root()
def test_mig_cuda_visible_devices_string_embedded(handle, gpuIds):
helper_test_mig_cuda_visible_devices_string(handle, gpuIds)
@test_utils.run_with_standalone_host_engine(120)
@test_utils.run_with_initialized_client()
@test_utils.run_only_with_live_gpus()
@test_utils.run_only_if_mig_is_enabled()
@test_utils.run_only_as_root()
def test_mig_cuda_visible_devices_string_standalone(handle, gpuIds):
helper_test_mig_cuda_visible_devices_string(handle, gpuIds)
def helper_test_mig_value_reporting(handle, gpuIds):
# These fields should report the same value for GPUs, instances, and compute instances
sameValueFieldIds = [
dcgm_fields.DCGM_FI_DEV_COMPUTE_MODE,
dcgm_fields.DCGM_FI_DEV_MIG_MODE,
dcgm_fields.DCGM_FI_DEV_SHUTDOWN_TEMP,
]
differentValueFieldIds = [
dcgm_fields.DCGM_FI_DEV_FB_TOTAL,
]
gpus, newGpuInstances, newComputeInstances = create_mig_entities_and_verify(handle, gpuIds, 1, 1)
# Make sure we get the same values for these fields on the GPU, instances, and compute instances
# Build the entity list
entities = []
for gpuId in gpuIds:
entities.append(dcgm_structs.c_dcgmGroupEntityPair_t(dcgm_fields.DCGM_FE_GPU, gpuId))
for instanceId in newGpuInstances:
entities.append(dcgm_structs.c_dcgmGroupEntityPair_t(dcgm_fields.DCGM_FE_GPU_I, instanceId))
for ciId in newComputeInstances:
entities.append(dcgm_structs.c_dcgmGroupEntityPair_t(dcgm_fields.DCGM_FE_GPU_CI, ciId))
fieldIds = []
fieldIds.extend(sameValueFieldIds)
fieldIds.extend(differentValueFieldIds)
values = dcgm_agent.dcgmEntitiesGetLatestValues(handle, entities, fieldIds, dcgm_structs.DCGM_FV_FLAG_LIVE_DATA)
gpuValues = {}
# Make a map of a map the values reported by the GPUs: gpuId -> fieldId -> value
for value in values:
if value.entityGroupId == dcgm_fields.DCGM_FE_GPU:
if value.entityId not in gpuValues:
gpuValues[value.entityId] = {}
gpuValues[value.entityId][value.fieldId] = value.value.i64
elif value.fieldId not in gpuValues[value.entityId]:
gpuValues[value.entityId][value.fieldId] = value.value.i64
errMsg = ''
for value in values:
for gpuId in gpus:
if value.entityId not in gpus:
continue
if value.entityGroupId == dcgm_fields.DCGM_FE_GPU_I:
same = gpuValues[gpuId][value.fieldId] == value.value.i64
if not same and value.fieldId in sameValueFieldIds:
errMsg = errMsg + "\nExpected %d but found %d for field %d GPU instance %d on GPU %d" \
% (gpuValues[gpuId][value.fieldId], value.value.i64, value.fieldId, value.entityId, gpuId)
elif same and value.fieldId in differentValueFieldIds:
errMsg = errMsg + "\nExpected different values but found %d for field %d for GPU instance %d on GPU %d" \
% (value.value.i64, value.fieldId, value.entityId, gpuId)
if value.entityGroupId == dcgm_fields.DCGM_FE_GPU_CI:
same = gpuValues[gpuId][value.fieldId] == value.value.i64
if not same and value.fieldId in sameValueFieldIds:
errMsg = errMsg + "\nExpected %d but found %d for field %d compute instance %d on GPU %d" \
% (gpuValues[gpuId][value.fieldId], value.value.i64, value.fieldId, value.entityId, gpuId)
elif same and value.fieldId in differentValueFieldIds:
errMsg = errMsg + "\nExpected different values but found %d for field %d for compute instance %d on GPU %d" \
% (value.value.i64, value.fieldId, value.entityId, gpuId)
ciFailMsg = delete_compute_instances_and_verify(handle, newComputeInstances)
instanceFailMsg = delete_gpu_instances_and_verify(handle, newGpuInstances)
if ciFailMsg != '':
logger.warning("The compute instances didn't clean up correctly: %s" % ciFailMsg)
if instanceFailMsg != '':
logger.warning("The GPU instances didn't clean up correctly: %s" % instanceFailMsg)
assert errMsg == '', errMsg
@test_utils.run_with_embedded_host_engine()
@test_utils.run_only_with_live_gpus()
@test_utils.run_only_if_mig_is_enabled()
@test_utils.run_only_as_root()
def test_mig_value_reporting_embedded(handle, gpuIds):
helper_test_mig_value_reporting(handle, gpuIds)
@test_utils.run_with_standalone_host_engine(120)
@test_utils.run_with_initialized_client()
@test_utils.run_only_with_live_gpus()
@test_utils.run_only_if_mig_is_enabled()
@test_utils.run_only_as_root()
def test_mig_value_reporting_standalone(handle, gpuIds):
helper_test_mig_value_reporting(handle, gpuIds) | DCGM-master | testing/python3/tests/test_instances.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pydcgm
import dcgm_structs
import dcgm_structs_internal
import dcgm_agent_internal
import dcgm_fields
from dcgm_structs import dcgmExceptionClass
import test_utils
import time
import os
import sys
FUTURE_INSERT_TIME = 2
# Set up the environment for the DcgmPrometheus class before importing
os.environ['DCGM_TESTING_FRAMEWORK'] = 'True'
if 'LD_LIBRARY_PATH' in os.environ:
os.environ['DCGMLIBPATH'] = os.environ['LD_LIBRARY_PATH']
stubspath = os.path.dirname(os.path.realpath(__file__)) + '/stubs/'
if stubspath not in sys.path:
sys.path.insert(0, stubspath)
import dcgm_prometheus
import prometheus_tester_globals
@test_utils.run_with_standalone_host_engine(90)
@test_utils.run_with_initialized_client()
@test_utils.run_only_with_live_gpus()
def test_prometheus_basic_integration(handle, gpuIds):
"""
Verifies that we can inject specific data and get that same data back
"""
dcgmHandle = pydcgm.DcgmHandle(handle)
dcgmSystem = dcgmHandle.GetSystem()
specificFieldIds = [dcgm_fields.DCGM_FI_DEV_RETIRED_DBE,
dcgm_fields.DCGM_FI_DEV_RETIRED_SBE,
dcgm_fields.DCGM_FI_DEV_POWER_VIOLATION,
dcgm_fields.DCGM_FI_DEV_THERMAL_VIOLATION]
fieldValues = [1,
5,
1000,
9000]
dcgm_prometheus.initialize_globals()
dcgm_prometheus.g_settings['publishFieldIds'] = specificFieldIds
dcgm_prometheus.g_settings['prometheusPublishInterval'] = 10
dcgm_prometheus.g_settings['sendUuid'] = False
dcgm_prometheus.g_settings['dcgmHostName'] = "localhost"
dcgmPrometheus = dcgm_prometheus.DcgmPrometheus()
dcgmPrometheus.Init()
dcgmPrometheus.LogBasicInformation()
for gpuId in gpuIds:
for i in range(0, len(specificFieldIds)):
field = dcgm_structs_internal.c_dcgmInjectFieldValue_v1()
field.version = dcgm_structs_internal.dcgmInjectFieldValue_version1
field.fieldId = specificFieldIds[i]
field.status = 0
field.fieldType = ord(dcgm_fields.DCGM_FT_INT64)
field.ts = int((time.time()+FUTURE_INSERT_TIME) * 1000000.0) # set the injected data into the future
field.value.i64 = fieldValues[i]
ret = dcgm_agent_internal.dcgmInjectFieldValue(handle, gpuId, field)
assert (ret == dcgm_structs.DCGM_ST_OK)
# Verify that we can read back the fields we watch.
time.sleep(FUTURE_INSERT_TIME)
dcgmPrometheus.Scrape(dcgmPrometheus)
for gpuId in gpuIds:
for i in range(0, len(specificFieldIds)):
fieldTag = dcgmSystem.fields.GetFieldById(specificFieldIds[i]).tag
label = prometheus_tester_globals.gvars['fields']["dcgm_" + fieldTag]
foundGpuId = False
for uniqueGpuId, value in label.values.items():
if gpuId == value.id:
foundGpuId = True
assert (fieldValues[i] == value.get())
assert(foundGpuId == True)
| DCGM-master | testing/python3/tests/test_dcgm_prometheus.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# test the policy manager for DCGM
import pydcgm
import dcgm_structs
import dcgm_structs_internal
import dcgm_agent
import dcgm_agent_internal
import dcgmvalue
import logger
import test_utils
import dcgm_fields
import time
from ctypes import *
import sys
import os
import pprint
import DcgmSystem
@test_utils.run_with_standalone_host_engine(20)
@test_utils.run_with_initialized_client()
@test_utils.run_only_with_all_supported_gpus()
def test_dcgm_topology_device_standalone(handle, gpuIds):
"""
Verifies that the topology get for the default group works
"""
handleObj = pydcgm.DcgmHandle(handle=handle)
systemObj = handleObj.GetSystem()
groupObj = systemObj.GetDefaultGroup()
gpuIds = groupObj.GetGpuIds() #Use just the GPUs in our group
if len(gpuIds) < 2:
test_utils.skip_test("Needs >= 2 GPUs")
topologyInfo = systemObj.discovery.GetGpuTopology(gpuIds[0])
assert (topologyInfo.numGpus == len(gpuIds) - 1), "Expected %d, received numGpus = %d" % (len(gpuIds) - 1, topologyInfo.numGpus)
assert (topologyInfo.cpuAffinityMask[0] != 0), "GPU 0 should have *some* affinity"
@test_utils.run_with_standalone_host_engine(20)
@test_utils.run_with_initialized_client()
@test_utils.run_only_with_all_supported_gpus()
def test_dcgm_topology_group_single_gpu_standalone(handle, gpuIds):
"""
Verifies that the topology get for a group works for a single GPU
"""
#Topology will work for a one-GPU group if there are > 1 GPUs on the system
if len(gpuIds) < 2:
test_utils.skip_test("Needs >= 2 GPUs")
handleObj = pydcgm.DcgmHandle(handle=handle)
systemObj = handleObj.GetSystem()
groupObj = systemObj.GetEmptyGroup("test1")
groupObj.AddGpu(gpuIds[0])
gpuIds = groupObj.GetGpuIds() #Use just the GPUs in our group
topologyInfo = groupObj.discovery.GetTopology()
assert (topologyInfo.numaOptimalFlag > 0), "with a single GPU, numa is by default optimal"
assert (topologyInfo.slowestPath == 0), "with a single GPU, slowest path shouldn't be set"
@test_utils.run_with_standalone_host_engine(20)
@test_utils.run_with_initialized_client()
@test_utils.run_only_with_all_supported_gpus()
def test_dcgm_topology_device_nvlink_standalone(handle, gpuIds):
"""
Verifies that the topology get for the default group returns valid NVLINK info
"""
handleObj = pydcgm.DcgmHandle(handle=handle)
systemObj = handleObj.GetSystem()
groupObj = systemObj.GetDefaultGroup()
gpuIds = groupObj.GetGpuIds() #Use just the GPUs in our group
if len(gpuIds) < 2:
test_utils.skip_test("Needs >= 2 GPUs")
topologyInfo = systemObj.discovery.GetGpuTopology(gpuIds[0])
if topologyInfo.gpuPaths[0].localNvLinkIds == 0:
test_utils.skip_test("Needs NVLINK support")
assert ((topologyInfo.gpuPaths[0].path & 0xFFFFFF00) > 0), "No NVLINK state set when localNvLinkIds is > 0"
def helper_test_select_gpus_by_topology(handle, gpuIds):
'''
Verifies basic selection of GPUs by topology.
'''
handleObj = pydcgm.DcgmHandle(handle=handle)
discover = DcgmSystem.DcgmSystemDiscovery(handleObj)
inputList = 0
gpuBits = {}
# Create the initial input list
for gpuId in gpuIds:
mask = (0x1 << gpuId)
inputList = inputList | mask
gpuBits[gpuId] = mask
# If we ask for all the GPUs then we should get all the GPUs
numGpus = len(gpuIds)
# Ignore the health since we don't know if this system is healthy or not
hints = dcgm_structs.DCGM_TOPO_HINT_F_IGNOREHEALTH
selectedMask = dcgm_agent.dcgmSelectGpusByTopology(handle, inputList, numGpus, hints)
sysSelectedMask = discover.SelectGpusByTopology(inputList, numGpus, hints)
assert (selectedMask.value == inputList), "Expected %s but got %s" % (str(inputList), str(selectedMask))
assert (sysSelectedMask.value == selectedMask.value)
if len(gpuIds) > 2:
numGpus = len(gpuIds) - 1
# Make sure we don't select a gpu that isn't in the parameters
for gpuId in gpuIds:
intputList = inputList & (~gpuBits[gpuId])
selectedMask = dcgm_agent.dcgmSelectGpusByTopology(handle, inputList, numGpus, hints)
sysSelectedMask = discover.SelectGpusByTopology(inputList, numGpus, hints)
assert ((selectedMask.value & inputList) == selectedMask.value), "Selected a GPU outside of the input list"
assert (sysSelectedMask.value == selectedMask.value)
intputList = inputList | (gpuBits[gpuId])
@test_utils.run_with_embedded_host_engine()
@test_utils.run_only_with_live_gpus()
def test_select_gpus_by_topology_embedded(handle, gpuIds):
helper_test_select_gpus_by_topology(handle, gpuIds)
@test_utils.run_with_standalone_host_engine(20)
@test_utils.run_with_initialized_client()
@test_utils.run_only_with_all_supported_gpus()
def test_select_gpus_by_topology_standalone(handle, gpuIds):
helper_test_select_gpus_by_topology(handle, gpuIds)
| DCGM-master | testing/python3/tests/test_topology.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
##### Spoof collectd for our testing framework
################################################################################
import collectd_tester_globals
import dcgm_collectd
################################################################################
def register_init(func_ptr):
collectd_tester_globals.gvars['init'] = func_ptr
################################################################################
def register_read(func_ptr):
collectd_tester_globals.gvars['read'] = func_ptr
################################################################################
def register_shutdown(func_ptr):
collectd_tester_globals.gvars['shutdown'] = func_ptr
################################################################################
def info(msg):
print(msg)
################################################################################
def debug(msg):
pass
################################################################################
class Values:
############################################################################
def __init__(self, **kwargs):
# dcgm_collectd references these, so we'll reference them as well
self.plugin = ''
self.plugin_instance = ''
############################################################################
def dispatch(self, **kwargs):
if 'out' not in collectd_tester_globals.gvars:
collectd_tester_globals.gvars['out'] = {}
if 'type_instance' in kwargs and 'type' in kwargs and 'values' in kwargs:
gpuId = kwargs['type_instance']
fieldTag = kwargs['type']
oneVal = kwargs['values'][0]
if gpuId not in collectd_tester_globals.gvars['out']:
collectd_tester_globals.gvars['out'][gpuId] = {}
# Put this in a global dictionary for later inspection
collectd_tester_globals.gvars['out'][gpuId][fieldTag] = oneVal
| DCGM-master | testing/python3/tests/stubs/collectd_tester_api_cosmos.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
##### Spoof prometheus for our testing framework
################################################################################
import prometheus_tester_globals
################################################################################
def info(msg):
print(msg)
################################################################################
def error(msg):
print(msg)
################################################################################
def debug(msg):
pass
################################################################################
class Value:
def __init__(self, id, value = None):
self.id = id
self.value = value
def set(self, value):
self.value = value
def get(self):
return self.value
################################################################################
class Labels:
def __init__(self):
self.values = {}
################################################################################
class Gauge:
def __init__(self, name, documentation, fields=[]):
self.name = name
self.documentation = documentation
if not 'fields' in prometheus_tester_globals.gvars:
prometheus_tester_globals.gvars['fields'] = {}
prometheus_tester_globals.gvars['fields'][name] = Labels()
def labels(self, id, uniqueId):
prometheus_tester_globals.gvars['fields'][self.name].values[uniqueId] = Value(id)
return prometheus_tester_globals.gvars['fields'][self.name].values[uniqueId]
def start_http_server(port):
return
| DCGM-master | testing/python3/tests/stubs/prometheus_tester_api.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Declare the global variable map here to avoid multiple initializations
gvars = {}
| DCGM-master | testing/python3/tests/stubs/prometheus_tester_globals.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Declare the global variable map here to avoid multiple initializations
gvars = {}
interval = 10
| DCGM-master | testing/python3/tests/stubs/collectd_tester_globals.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
##### Spoof collectd for our testing framework
################################################################################
import collectd_tester_globals
import dcgm_collectd_plugin
################################################################################
def register_config(func_ptr, name=None):
collectd_tester_globals.gvars['config'] = func_ptr
################################################################################
def register_init(func_ptr):
collectd_tester_globals.gvars['init'] = func_ptr
################################################################################
def register_read(func_ptr, interval=10):
collectd_tester_globals.gvars['read'] = func_ptr
collectd_tester_globals.interval = interval
################################################################################
def register_shutdown(func_ptr):
collectd_tester_globals.gvars['shutdown'] = func_ptr
################################################################################
def info(msg):
print(msg)
################################################################################
def error(msg):
print(msg)
################################################################################
def debug(msg):
pass
################################################################################
class Values:
############################################################################
def __init__(self, **kwargs):
# dcgm_collectd references these, so we'll reference them as well
self.plugin = ''
self.plugin_instance = ''
############################################################################
def dispatch(self, **kwargs):
if 'out' not in collectd_tester_globals.gvars:
collectd_tester_globals.gvars['out'] = {}
if 'type_instance' in kwargs and 'type' in kwargs and 'values' in kwargs:
gpuId = kwargs['type_instance']
fieldTag = kwargs['type']
oneVal = kwargs['values'][0]
if gpuId not in collectd_tester_globals.gvars['out']:
collectd_tester_globals.gvars['out'][gpuId] = {}
# Put this in a global dictionary for later inspection
collectd_tester_globals.gvars['out'][gpuId][fieldTag] = oneVal
| DCGM-master | testing/python3/tests/stubs/collectd_tester_api.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from _test_helpers import maybemock, skip_test_if_no_mock
from common.Struct import Struct
import logging
from common import dcgm_client_cli_parser as cli
def get_mock_call_name(call):
return call[0]
def get_mock_call_args(call):
return call[1]
def get_mock_call_kwargs(call):
return call[2]
def helper_check_argument_added(call_list, short_param=None, long_param=None, dest=None, type=None):
calls_with_short_param = list(filter(
lambda call: get_mock_call_name(call) == 'add_argument' and
len(get_mock_call_args(call)) == 2,
call_list,
))
calls_without_short_param = list(filter(
lambda call: get_mock_call_name(call) == 'add_argument' and
len(get_mock_call_args(call)) == 1,
call_list,
))
if short_param:
filtered = list(filter(
lambda call: get_mock_call_args(call)[0] == short_param and
get_mock_call_args(call)[1] == long_param and
get_mock_call_kwargs(call)['dest'] == dest,
calls_with_short_param,
))
else:
filtered = list(filter(
lambda call: get_mock_call_args(call)[0] == long_param and
get_mock_call_kwargs(call)['dest'] == dest,
calls_without_short_param,
))
# Check we have found at least one match
if len(filtered) == 0:
return False
# Check the type is correct if it has been provided
if type and type != get_mock_call_kwargs(filtered[0])['type']:
return False
# Check we have found exactly one match
return len(filtered) == 1
def helper_check_mutually_exclusive_group_added():
pass
# autospec tells mock to return objects that have the same interface
@maybemock.patch('argparse.ArgumentParser', autospec=True)
def test_create_parser(MockArgumentParser):
result = cli.create_parser()
mock_calls = result.mock_calls # pylint: disable=no-member
assert helper_check_argument_added(mock_calls, '-p', '--publish-port',
'publish_port', type=int)
assert helper_check_argument_added(mock_calls, '-i', '--interval',
dest='interval', type=int)
assert helper_check_argument_added(mock_calls, '-f', '--field-ids',
dest='field_ids', type=str)
assert helper_check_argument_added(mock_calls, long_param='--log-file',
dest='logfile', type=str)
assert helper_check_argument_added(mock_calls, long_param='--log-level',
dest='loglevel', type=str)
# TODO mutually-exclusive group tests
@maybemock.patch('argparse.ArgumentParser', autospec=True)
def test_add_target_host_argument(MockArgumentParser):
parser = MockArgumentParser()
cli.add_target_host_argument('name', parser)
mock_calls = parser.mock_calls # pylint: disable=no-member
assert helper_check_argument_added(mock_calls, '-t', '--publish-hostname',
dest='publish_hostname', type=str)
@skip_test_if_no_mock()
def test_run_parser():
parser = maybemock.Mock()
cli.run_parser(parser)
parser.parse_args.assert_called()
def test_get_field_ids():
assert cli.get_field_ids(Struct(field_ids="1,2,3")) == [1,2,3]
assert cli.get_field_ids(Struct(field_ids=[1,2,3])) == [1,2,3]
@maybemock.patch('sys.exit')
def test_get_log_level(mock_exit):
mock_help = maybemock.Mock()
assert cli.get_log_level(Struct(loglevel='0')) == logging.CRITICAL
assert cli.get_log_level(Struct(loglevel='1')) == logging.ERROR
assert cli.get_log_level(Struct(loglevel='2')) == logging.WARNING
assert cli.get_log_level(Struct(loglevel='3')) == logging.INFO
assert cli.get_log_level(Struct(loglevel='4')) == logging.DEBUG
assert cli.get_log_level(Struct(loglevel='critical')) == logging.CRITICAL
assert cli.get_log_level(Struct(loglevel='error')) == logging.ERROR
assert cli.get_log_level(Struct(loglevel='warning')) == logging.WARNING
assert cli.get_log_level(Struct(loglevel='info')) == logging.INFO
assert cli.get_log_level(Struct(loglevel='debug')) == logging.DEBUG
mock_exit.assert_not_called()
try: # It raises an exception because it tries to return an undeclared var
cli.get_log_level(Struct(loglevel='wrong', print_help=mock_help))
except:
pass
mock_exit.assert_called()
mock_help.assert_called()
def test_parse_command_line():
# TODO maybe add a test here. This function will be a pain to test
pass
| DCGM-master | testing/python3/tests/common_tests/test_dcgm_client_cli_parser.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import signal
from _test_helpers import maybemock
from common import dcgm_client_main as m
@maybemock.patch('builtins.exit')
def test_exit_handler(mock_exit):
m.exit_handler(None, None)
mock_exit.assert_called()
@maybemock.patch('signal.signal')
def test_initialize_signal_handlers(mock_signal):
m.initialize_signal_handlers()
assert mock_signal.mock_calls[0][1] == (signal.SIGINT, m.exit_handler)
assert mock_signal.mock_calls[1][1] == (signal.SIGTERM, m.exit_handler)
| DCGM-master | testing/python3/tests/common_tests/test_dcgm_client_main.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| DCGM-master | testing/python3/tests/common_tests/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from common.Struct import Struct
def test_struct():
s = Struct(field='field')
assert s.field == 'field' # pylint: disable=no-member
try:
i = s.notfield # pylint: disable=no-member
assert False # notfield should not exist
except:
pass
| DCGM-master | testing/python3/tests/common_tests/test_struct.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| DCGM-master | testing/python3/tests/nvswitch_tests/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pydcgm
import dcgm_field_helpers
import dcgm_fields
import dcgm_structs
import shlex
import time
import logger
import subprocess
import test_utils
from . import test_nvswitch_utils
@test_utils.run_with_standalone_host_engine()
@test_utils.run_with_initialized_client()
@test_utils.run_only_with_live_nvswitches()
def test_nvswitch_traffic_p2p(handle, switchIds):
"""
Verifies that fabric can pass p2p read and write traffic successfully
"""
test_utils.skip_test("Bandwidth field not being updated yet")
# TX_0 and RX_0 on port 0
nvSwitchBandwidth0FieldIds = []
for i in range(dcgm_fields.DCGM_FI_DEV_NVSWITCH_BANDWIDTH_TX_0_P00,
dcgm_fields.DCGM_FI_DEV_NVSWITCH_BANDWIDTH_RX_0_P00 + 1, 1):
nvSwitchBandwidth0FieldIds.append(i)
# TX_1 and RX_1 on port 0
nvSwitchBandwidth1FieldIds = []
for i in range(dcgm_fields.DCGM_FI_DEV_NVSWITCH_BANDWIDTH_TX_1_P00,
dcgm_fields.DCGM_FI_DEV_NVSWITCH_BANDWIDTH_RX_1_P00 + 1, 1):
nvSwitchBandwidth1FieldIds.append(i)
dcgmHandle = pydcgm.DcgmHandle(ipAddress="127.0.0.1")
groupName = "test_nvswitches"
allNvSwitchesGroup = pydcgm.DcgmGroup(dcgmHandle, groupName=groupName,
groupType=dcgm_structs.DCGM_GROUP_DEFAULT_NVSWITCHES)
fgName = "test_nvswitches_bandwidth0"
nvSwitchBandwidth0FieldGroup = pydcgm.DcgmFieldGroup(dcgmHandle, name=fgName,
fieldIds=nvSwitchBandwidth0FieldIds)
fgName = "test_nvswitches_bandwidth1"
nvSwitchBandwidth1FieldGroup = pydcgm.DcgmFieldGroup(dcgmHandle, name=fgName,
fieldIds=nvSwitchBandwidth1FieldIds)
updateFreq = int(20 / 2.0) * 1000000
maxKeepAge = 600.0
maxKeepSamples = 0
nvSwitchBandwidth0Watcher = dcgm_field_helpers.DcgmFieldGroupEntityWatcher(
dcgmHandle.handle, allNvSwitchesGroup.GetId(),
nvSwitchBandwidth0FieldGroup, dcgm_structs.DCGM_OPERATION_MODE_AUTO,
updateFreq, maxKeepAge, maxKeepSamples, 0)
nvSwitchBandwidth1Watcher = dcgm_field_helpers.DcgmFieldGroupEntityWatcher(
dcgmHandle.handle, allNvSwitchesGroup.GetId(),
nvSwitchBandwidth1FieldGroup, dcgm_structs.DCGM_OPERATION_MODE_AUTO,
updateFreq, maxKeepAge, maxKeepSamples, 0)
# wait for FM reports and populates stats
time.sleep(30)
# read the counters before sending traffic
nvSwitchBandwidth0Watcher.GetMore()
nvSwitchBandwidth1Watcher.GetMore()
for entityGroupId in list(nvSwitchBandwidth0Watcher.values.keys()):
for entityId in nvSwitchBandwidth0Watcher.values[entityGroupId]:
bandwidth0FieldId = dcgm_fields.DCGM_FI_DEV_NVSWITCH_BANDWIDTH_TX_0_P00
bandwidth1FieldId = dcgm_fields.DCGM_FI_DEV_NVSWITCH_BANDWIDTH_TX_1_P00
counter0TxBefore = nvSwitchBandwidth0Watcher.values[entityGroupId][entityId][bandwidth0FieldId].values[
-1].value
bandwidth0FieldId += 1
counter0RxBefore = nvSwitchBandwidth0Watcher.values[entityGroupId][entityId][bandwidth0FieldId].values[
-1].value
counter1TxBefore = nvSwitchBandwidth1Watcher.values[entityGroupId][entityId][bandwidth1FieldId].values[
-1].value
bandwidth1FieldId += 1
counter1RxBefore = nvSwitchBandwidth1Watcher.values[entityGroupId][entityId][bandwidth1FieldId].values[
-1].value
# Generate write traffic for the nvswitches
test_utils.run_p2p_bandwidth_app(test_nvswitch_utils.MEMCPY_DTOD_WRITE_CE_BANDWIDTH)
# Generate read traffic for the nvswitches
test_utils.run_p2p_bandwidth_app(test_nvswitch_utils.MEMCPY_DTOD_READ_CE_BANDWIDTH)
# read the counters again after sending traffic
nvSwitchBandwidth0Watcher.GetMore()
nvSwitchBandwidth1Watcher.GetMore()
for entityGroupId in list(nvSwitchBandwidth0Watcher.values.keys()):
for entityId in nvSwitchBandwidth0Watcher.values[entityGroupId]:
bandwidth0FieldId = dcgm_fields.DCGM_FI_DEV_NVSWITCH_BANDWIDTH_TX_0_P00
bandwidth1FieldId = dcgm_fields.DCGM_FI_DEV_NVSWITCH_BANDWIDTH_TX_1_P00
counter0TxAfter = nvSwitchBandwidth0Watcher.values[entityGroupId][entityId][bandwidth0FieldId].values[
-1].value
bandwidth0FieldId += 1
counter0RxAfter = nvSwitchBandwidth0Watcher.values[entityGroupId][entityId][bandwidth0FieldId].values[
-1].value
counter1TxAfter = nvSwitchBandwidth1Watcher.values[entityGroupId][entityId][bandwidth1FieldId].values[
-1].value
bandwidth1FieldId += 1
counter1RxAfter = nvSwitchBandwidth1Watcher.values[entityGroupId][entityId][bandwidth1FieldId].values[
-1].value
assert counter0TxAfter > counter0TxBefore, "Counter0Tx did not increase"
assert counter0RxAfter > counter0RxBefore, "counter0Rx did not increase"
assert counter1TxAfter > counter1TxBefore, "Counter1Tx did not increase"
assert counter1RxAfter > counter1RxBefore, "counter1Rx did not increase"
| DCGM-master | testing/python3/tests/nvswitch_tests/test_nvswitch_with_running_fm.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import apps
import logger
import subprocess
import test_utils
import time
# p2p_bandwidth app argument lists
P2P_TEST_LIST = ["-l"]
MEMCPY_DTOD_WRITE_CE_BANDWIDTH = ["-t", "Memcpy_DtoD_Write_CE_Bandwidth"]
MEMCPY_DTOD_READ_CE_BANDWIDTH = ["-t", "Memcpy_DtoD_Read_CE_Bandwidth"]
def is_nvidia_docker_running():
"""
Return True if nvidia-docker service is running on the system
"""
cmd = 'systemctl status nvidia-docker'
p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out_buf, err_buf = p.communicate()
out = out_buf.decode('utf-8')
err = err_buf.decode('utf-8')
if "running" in out.rstrip():
return True
else:
return False
def get_nvswitch_pci_bdf():
""" Get nvswitch PCI BDFs """
bdf = []
try:
lsPciOutputBuf = subprocess.check_output("lspci | grep -i nvidia | grep -i bridge", shell=True)
lsPciOutput = lsPciOutputBuf.decode('utf-8')
except subprocess.CalledProcessError as e:
logger.error(e.message)
return bdf
nvswitches = lsPciOutput.split('\n')
for i in range(len(nvswitches)):
dev = nvswitches[i].split()
if len(dev) > 0:
bdf.append(dev[0])
return bdf
def get_gpu_pci_bdf():
""" Get GPU PCI BDFs """
bdf = []
try:
lsPciOutputBuf = subprocess.check_output("lspci | grep -i nvidia | grep -i '3d controller'", shell=True)
lsPciOutput = lsPciOutputBuf.decode('utf-8')
except subprocess.CalledProcessError as e:
logger.error(e.message)
return bdf
gpus = lsPciOutput.split('\n')
for i in range(len(gpus)):
dev = gpus[i].split()
if len(dev) > 0:
bdf.append(dev[0])
return bdf
def is_dgx_2_full_topology():
"""
Return true if detect all nvswitches and GPUs on two base boards or one base board
"""
switch_bdf = get_nvswitch_pci_bdf()
gpu_bdf = get_gpu_pci_bdf()
if len(switch_bdf) == 12 and len(gpu_bdf) == 16:
return True
elif len(switch_bdf) == 6 and len(gpu_bdf) == 8:
return True
else:
return False
| DCGM-master | testing/python3/tests/nvswitch_tests/test_nvswitch_utils.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| DCGM-master | testing/python3/common/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from os import environ
import argparse
import logging
import sys
###############################################################################
def create_parser(
publish_port = 8000,
interval = 10,
name = 'the monitoring tool', # Replace with 'prometheus', 'telegraf', etc.
field_ids = None,
log_file = None,
log_level = 'INFO',
dcgm_hostname = environ.get('DCGM_HOSTNAME') or 'localhost',
):
'''
Create a parser that defaults to sane parameters.
The default parameters can be overridden through keyword arguments.
Note: if DCGM_HOSTNAME is set as an environment variable, it is used as
the default instead of localhost
'''
parser = argparse.ArgumentParser()
parser.add_argument('-p', '--publish-port', dest='publish_port', type=int, default=publish_port,
help='TCP port that the client should publish to. Default={}.'.format(publish_port))
parser.add_argument('-i', '--interval', dest='interval', type=int, default=interval,
help='How often the client should retrieve new values from DCGM in seconds. Default={}.'.format(interval))
parser.add_argument('-f', '--field-ids', dest='field_ids', type=str, default=field_ids,
help='Comma-separated list of field IDs that should be retrieved from DCGM. '+
'The full list of available field IDs can be obtained from dcgm_fields.h, dcgm_fields.py, '+
'or running \'dcgmi dmon -l\'.')
parser.add_argument('--log-file', dest='logfile', type=str, default=log_file,
help='A path to a log file for recording what information is being sent to {}'.format(name))
parser.add_argument('--log-level', dest='loglevel', type=str, default=log_level,
help='Specify a log level to use for logging.\n\tCRITICAL (0) - log only critical errors that drastically affect execution' +
'\n\tERROR (1) - Log any error in execution\n\tWARNING (2) - Log all warnings and errors that occur' +
'\n\tINFO (3) - Log informational messages about program execution in addition to warnings and errors' +
'\n\tDEBUG (4) - Log debugging information in addition to all information about execution' +
'\nDefault: {}'.format(log_level))
group = parser.add_mutually_exclusive_group()
group.add_argument('-n', '--hostname', dest='hostname', type=str, default=dcgm_hostname,
help='IP/hostname where the client should query DCGM for values. Default={} (all interfaces).' .format(dcgm_hostname))
group.add_argument('-e', '--embedded', dest='embedded', action='store_true',
help='Launch DCGM from within this process instead of connecting to nv-hostengine.')
return parser
def add_custom_argument(parser, *args, **kwargs):
parser.add_argument(*args, **kwargs)
###############################################################################
def add_target_host_argument(name, parser, default_target='localhost'):
parser.add_argument('-t', '--publish-hostname', dest='publish_hostname',
type=str, default=default_target,
help='The hostname at which the client will publish the readings to {}'.format(name))
###############################################################################
def run_parser(parser):
'''
Run a parser created using create_parser
'''
return parser.parse_args()
###############################################################################
def get_field_ids(args):
# This indicates the user supplied a string, so we should override the
# default
if isinstance(args.field_ids, str):
tokens = args.field_ids.split(",")
field_ids = [int(token) for token in tokens]
return field_ids
# The default object should already be an array of ints. Just return it
else:
return args.field_ids
###############################################################################
def get_log_level(args):
levelStr = args.loglevel.upper()
if levelStr == '0' or levelStr == 'CRITICAL':
numeric_log_level = logging.CRITICAL
elif levelStr == '1' or levelStr == 'ERROR':
numeric_log_level = logging.ERROR
elif levelStr == '2' or levelStr == 'WARNING':
numeric_log_level = logging.WARNING
elif levelStr == '3' or levelStr == 'INFO':
numeric_log_level = logging.INFO
elif levelStr == '4' or levelStr == 'DEBUG':
numeric_log_level = logging.DEBUG
else:
print("Could not understand the specified --log-level '%s'" % (args.loglevel))
args.print_help()
sys.exit(2)
return numeric_log_level
###############################################################################
def parse_command_line(name, default_port, add_target_host=False):
# Fields we accept raw from the CLI
FIELDS_AS_IS = ['publish_port', 'interval', 'logfile', 'publish_hostname']
parser = create_parser(
name=name,
publish_port=default_port,
)
if add_target_host:
add_target_host_argument(name, parser)
args = run_parser(parser)
field_ids = get_field_ids(args)
log_level = get_log_level(args)
args_as_dict = vars(args)
settings = {i: args_as_dict[i] for i in FIELDS_AS_IS}
settings['dcgm_hostname'] = None if args.embedded else args.hostname
settings['field_ids'] = field_ids
settings['log_level'] = log_level
return settings
| DCGM-master | testing/python3/common/dcgm_client_cli_parser.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Allows for easily creating "anonymous" objects
# From http://norvig.com/python-iaq.html
class Struct:
def __init__(self, **entries): self.__dict__.update(entries)
| DCGM-master | testing/python3/common/Struct.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from time import sleep
from . import dcgm_client_cli_parser as cli
import signal
###############################################################################
def exit_handler(signum, frame):
# The Prometheus client does something smarter but more complex
# Here we just exit
exit()
###############################################################################
def initialize_signal_handlers():
signal.signal(signal.SIGINT, exit_handler)
signal.signal(signal.SIGTERM, exit_handler)
###############################################################################
def main(DRConstructor, name, default_port, add_target_host=False):
'''
This main function should work for most DCGM clients. It creates a
DcgmReader object using DRConstructor and enters a loop that queries DCGM
for data
Arguments
---------
DRConstructor: A constructor for a DcgmReader. The constructor must
accept the following keyword arguments:
- hostname: DCGM hostname
- publish_port: port on which the data is published
In some cases, the constructor will also need to accept:
- publish_hostname: hostname the data is published to
- field_ids: field ids to query and publish
name: The name of the client. This is displayed to the user
default_port: Default port to publish to
Keyword arguments
-----------------
add_target_host: Boolean that indicates whether this client accepts a
publish hostname
'''
initialize_signal_handlers()
settings = cli.parse_command_line(
name,
default_port,
add_target_host=add_target_host,
)
# Create a dictionary for the arguments because field_ids might not be
# provided (if it's None) when we want to use the default in DcgmReader
dr_args = {
'hostname': settings['dcgm_hostname'],
'publish_port': settings['publish_port'],
}
# publish_hostname is only available if we add the target_host parameter
if add_target_host:
dr_args['publish_hostname'] = settings['publish_hostname']
if settings['field_ids']:
dr_args['fieldIds'] = settings['field_ids']
dr = DRConstructor(**dr_args)
try:
while True:
dr.Process()
sleep(settings['interval'])
except KeyboardInterrupt:
print('Caught CTRL-C. Exiting')
| DCGM-master | testing/python3/common/dcgm_client_main.py |
DCGM-master | testing/python3/libs_3rdparty/__init__.py |
|
from .initialise import init, deinit, reinit
from .ansi import Fore, Back, Style
from .ansitowin32 import AnsiToWin32
VERSION = '0.2.4'
| DCGM-master | testing/python3/libs_3rdparty/colorama/__init__.py |
# from winbase.h
STDOUT = -11
STDERR = -12
try:
from ctypes import windll
except ImportError:
windll = None
SetConsoleTextAttribute = lambda *_: None
else:
from ctypes import (
byref, Structure, c_char, c_short, c_uint32, c_ushort
)
handles = {
STDOUT: windll.kernel32.GetStdHandle(STDOUT),
STDERR: windll.kernel32.GetStdHandle(STDERR),
}
SHORT = c_short
WORD = c_ushort
DWORD = c_uint32
TCHAR = c_char
class COORD(Structure):
"""struct in wincon.h"""
_fields_ = [
('X', SHORT),
('Y', SHORT),
]
class SMALL_RECT(Structure):
"""struct in wincon.h."""
_fields_ = [
("Left", SHORT),
("Top", SHORT),
("Right", SHORT),
("Bottom", SHORT),
]
class CONSOLE_SCREEN_BUFFER_INFO(Structure):
"""struct in wincon.h."""
_fields_ = [
("dwSize", COORD),
("dwCursorPosition", COORD),
("wAttributes", WORD),
("srWindow", SMALL_RECT),
("dwMaximumWindowSize", COORD),
]
def __str__(self):
return '(%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d)' % (
self.dwSize.Y, self.dwSize.X
, self.dwCursorPosition.Y, self.dwCursorPosition.X
, self.wAttributes
, self.srWindow.Top, self.srWindow.Left, self.srWindow.Bottom, self.srWindow.Right
, self.dwMaximumWindowSize.Y, self.dwMaximumWindowSize.X
)
def GetConsoleScreenBufferInfo(stream_id=STDOUT):
handle = handles[stream_id]
csbi = CONSOLE_SCREEN_BUFFER_INFO()
success = windll.kernel32.GetConsoleScreenBufferInfo(
handle, byref(csbi))
return csbi
def SetConsoleTextAttribute(stream_id, attrs):
handle = handles[stream_id]
return windll.kernel32.SetConsoleTextAttribute(handle, attrs)
def SetConsoleCursorPosition(stream_id, position):
position = COORD(*position)
# If the position is out of range, do nothing.
if position.Y <= 0 or position.X <= 0:
return
# Adjust for Windows' SetConsoleCursorPosition:
# 1. being 0-based, while ANSI is 1-based.
# 2. expecting (x,y), while ANSI uses (y,x).
adjusted_position = COORD(position.Y - 1, position.X - 1)
# Adjust for viewport's scroll position
sr = GetConsoleScreenBufferInfo(STDOUT).srWindow
adjusted_position.Y += sr.Top
adjusted_position.X += sr.Left
# Resume normal processing
handle = handles[stream_id]
return windll.kernel32.SetConsoleCursorPosition(handle, adjusted_position)
def FillConsoleOutputCharacter(stream_id, char, length, start):
handle = handles[stream_id]
char = TCHAR(char)
length = DWORD(length)
num_written = DWORD(0)
# Note that this is hard-coded for ANSI (vs wide) bytes.
success = windll.kernel32.FillConsoleOutputCharacterA(
handle, char, length, start, byref(num_written))
return num_written.value
def FillConsoleOutputAttribute(stream_id, attr, length, start):
''' FillConsoleOutputAttribute( hConsole, csbi.wAttributes, dwConSize, coordScreen, &cCharsWritten )'''
handle = handles[stream_id]
attribute = WORD(attr)
length = DWORD(length)
num_written = DWORD(0)
# Note that this is hard-coded for ANSI (vs wide) bytes.
return windll.kernel32.FillConsoleOutputAttribute(
handle, attribute, length, start, byref(num_written))
| DCGM-master | testing/python3/libs_3rdparty/colorama/win32.py |
import re
import sys
from .ansi import AnsiFore, AnsiBack, AnsiStyle, Style
from .winterm import WinTerm, WinColor, WinStyle
from .win32 import windll
if windll is not None:
winterm = WinTerm()
def is_a_tty(stream):
return hasattr(stream, 'isatty') and stream.isatty()
class StreamWrapper(object):
'''
Wraps a stream (such as stdout), acting as a transparent proxy for all
attribute access apart from method 'write()', which is delegated to our
Converter instance.
'''
def __init__(self, wrapped, converter):
# double-underscore everything to prevent clashes with names of
# attributes on the wrapped stream object.
self.__wrapped = wrapped
self.__convertor = converter
def __getattr__(self, name):
return getattr(self.__wrapped, name)
def write(self, text):
self.__convertor.write(text)
class AnsiToWin32(object):
'''
Implements a 'write()' method which, on Windows, will strip ANSI character
sequences from the text, and if outputting to a tty, will convert them into
win32 function calls.
'''
ANSI_RE = re.compile('\033\[((?:\d|;)*)([a-zA-Z])')
def __init__(self, wrapped, convert=None, strip=None, autoreset=False):
# The wrapped stream (normally sys.stdout or sys.stderr)
self.wrapped = wrapped
# should we reset colors to defaults after every .write()
self.autoreset = autoreset
# create the proxy wrapping our output stream
self.stream = StreamWrapper(wrapped, self)
on_windows = sys.platform.startswith('win')
# should we strip ANSI sequences from our output?
if strip is None:
strip = on_windows
self.strip = strip
# should we should convert ANSI sequences into win32 calls?
if convert is None:
convert = on_windows and is_a_tty(wrapped)
self.convert = convert
# dict of ansi codes to win32 functions and parameters
self.win32_calls = self.get_win32_calls()
# are we wrapping stderr?
self.on_stderr = self.wrapped is sys.stderr
def should_wrap(self):
'''
True if this class is actually needed. If false, then the output
stream will not be affected, nor will win32 calls be issued, so
wrapping stdout is not actually required. This will generally be
False on non-Windows platforms, unless optional functionality like
autoreset has been requested using kwargs to init()
'''
return self.convert or self.strip or self.autoreset
def get_win32_calls(self):
if self.convert and winterm:
return {
AnsiStyle.RESET_ALL: (winterm.reset_all, ),
AnsiStyle.BRIGHT: (winterm.style, WinStyle.BRIGHT),
AnsiStyle.DIM: (winterm.style, WinStyle.NORMAL),
AnsiStyle.NORMAL: (winterm.style, WinStyle.NORMAL),
AnsiFore.BLACK: (winterm.fore, WinColor.BLACK),
AnsiFore.RED: (winterm.fore, WinColor.RED),
AnsiFore.GREEN: (winterm.fore, WinColor.GREEN),
AnsiFore.YELLOW: (winterm.fore, WinColor.YELLOW),
AnsiFore.BLUE: (winterm.fore, WinColor.BLUE),
AnsiFore.MAGENTA: (winterm.fore, WinColor.MAGENTA),
AnsiFore.CYAN: (winterm.fore, WinColor.CYAN),
AnsiFore.WHITE: (winterm.fore, WinColor.GREY),
AnsiFore.RESET: (winterm.fore, ),
AnsiBack.BLACK: (winterm.back, WinColor.BLACK),
AnsiBack.RED: (winterm.back, WinColor.RED),
AnsiBack.GREEN: (winterm.back, WinColor.GREEN),
AnsiBack.YELLOW: (winterm.back, WinColor.YELLOW),
AnsiBack.BLUE: (winterm.back, WinColor.BLUE),
AnsiBack.MAGENTA: (winterm.back, WinColor.MAGENTA),
AnsiBack.CYAN: (winterm.back, WinColor.CYAN),
AnsiBack.WHITE: (winterm.back, WinColor.GREY),
AnsiBack.RESET: (winterm.back, ),
}
def write(self, text):
if self.strip or self.convert:
self.write_and_convert(text)
else:
self.wrapped.write(text)
self.wrapped.flush()
if self.autoreset:
self.reset_all()
def reset_all(self):
if self.convert:
self.call_win32('m', (0,))
elif is_a_tty(self.wrapped):
self.wrapped.write(Style.RESET_ALL)
def write_and_convert(self, text):
'''
Write the given text to our wrapped stream, stripping any ANSI
sequences from the text, and optionally converting them into win32
calls.
'''
cursor = 0
for match in self.ANSI_RE.finditer(text):
start, end = match.span()
self.write_plain_text(text, cursor, start)
self.convert_ansi(*match.groups())
cursor = end
self.write_plain_text(text, cursor, len(text))
def write_plain_text(self, text, start, end):
if start < end:
self.wrapped.write(text[start:end])
self.wrapped.flush()
def convert_ansi(self, paramstring, command):
if self.convert:
params = self.extract_params(paramstring)
self.call_win32(command, params)
def extract_params(self, paramstring):
def split(paramstring):
for p in paramstring.split(';'):
if p != '':
yield int(p)
return tuple(split(paramstring))
def call_win32(self, command, params):
if params == []:
params = [0]
if command == 'm':
for param in params:
if param in self.win32_calls:
func_args = self.win32_calls[param]
func = func_args[0]
args = func_args[1:]
kwargs = dict(on_stderr=self.on_stderr)
func(*args, **kwargs)
elif command in ('H', 'f'): # set cursor position
func = winterm.set_cursor_position
func(params, on_stderr=self.on_stderr)
elif command in ('J'):
func = winterm.erase_data
func(params, on_stderr=self.on_stderr)
| DCGM-master | testing/python3/libs_3rdparty/colorama/ansitowin32.py |
'''
This module generates ANSI character codes to printing colors to terminals.
See: http://en.wikipedia.org/wiki/ANSI_escape_code
'''
CSI = '\033['
def code_to_chars(code):
return CSI + str(code) + 'm'
class AnsiCodes(object):
def __init__(self, codes):
for name in dir(codes):
if not name.startswith('_'):
value = getattr(codes, name)
setattr(self, name, code_to_chars(value))
class AnsiFore:
BLACK = 30
RED = 31
GREEN = 32
YELLOW = 33
BLUE = 34
MAGENTA = 35
CYAN = 36
WHITE = 37
RESET = 39
class AnsiBack:
BLACK = 40
RED = 41
GREEN = 42
YELLOW = 43
BLUE = 44
MAGENTA = 45
CYAN = 46
WHITE = 47
RESET = 49
class AnsiStyle:
BRIGHT = 1
DIM = 2
NORMAL = 22
RESET_ALL = 0
Fore = AnsiCodes( AnsiFore )
Back = AnsiCodes( AnsiBack )
Style = AnsiCodes( AnsiStyle )
| DCGM-master | testing/python3/libs_3rdparty/colorama/ansi.py |
from . import win32
# from wincon.h
class WinColor(object):
BLACK = 0
BLUE = 1
GREEN = 2
CYAN = 3
RED = 4
MAGENTA = 5
YELLOW = 6
GREY = 7
# from wincon.h
class WinStyle(object):
NORMAL = 0x00 # dim text, dim background
BRIGHT = 0x08 # bright text, dim background
class WinTerm(object):
def __init__(self):
self._default = win32.GetConsoleScreenBufferInfo(win32.STDOUT).wAttributes
self.set_attrs(self._default)
self._default_fore = self._fore
self._default_back = self._back
self._default_style = self._style
def get_attrs(self):
return self._fore + self._back * 16 + self._style
def set_attrs(self, value):
self._fore = value & 7
self._back = (value >> 4) & 7
self._style = value & WinStyle.BRIGHT
def reset_all(self, on_stderr=None):
self.set_attrs(self._default)
self.set_console(attrs=self._default)
def fore(self, fore=None, on_stderr=False):
if fore is None:
fore = self._default_fore
self._fore = fore
self.set_console(on_stderr=on_stderr)
def back(self, back=None, on_stderr=False):
if back is None:
back = self._default_back
self._back = back
self.set_console(on_stderr=on_stderr)
def style(self, style=None, on_stderr=False):
if style is None:
style = self._default_style
self._style = style
self.set_console(on_stderr=on_stderr)
def set_console(self, attrs=None, on_stderr=False):
if attrs is None:
attrs = self.get_attrs()
handle = win32.STDOUT
if on_stderr:
handle = win32.STDERR
win32.SetConsoleTextAttribute(handle, attrs)
def set_cursor_position(self, position=None, on_stderr=False):
if position is None:
#I'm not currently tracking the position, so there is no default.
#position = self.get_position()
return
handle = win32.STDOUT
if on_stderr:
handle = win32.STDERR
win32.SetConsoleCursorPosition(handle, position)
def erase_data(self, mode=0, on_stderr=False):
# 0 (or None) should clear from the cursor to the end of the screen.
# 1 should clear from the cursor to the beginning of the screen.
# 2 should clear the entire screen. (And maybe move cursor to (1,1)?)
#
# At the moment, I only support mode 2. From looking at the API, it
# should be possible to calculate a different number of bytes to clear,
# and to do so relative to the cursor position.
if mode[0] not in (2,):
return
handle = win32.STDOUT
if on_stderr:
handle = win32.STDERR
# here's where we'll home the cursor
coord_screen = win32.COORD(0,0)
csbi = win32.GetConsoleScreenBufferInfo(handle)
# get the number of character cells in the current buffer
dw_con_size = csbi.dwSize.X * csbi.dwSize.Y
# fill the entire screen with blanks
win32.FillConsoleOutputCharacter(handle, ord(' '), dw_con_size, coord_screen)
# now set the buffer's attributes accordingly
win32.FillConsoleOutputAttribute(handle, self.get_attrs(), dw_con_size, coord_screen );
# put the cursor at (0, 0)
win32.SetConsoleCursorPosition(handle, (coord_screen.X, coord_screen.Y))
| DCGM-master | testing/python3/libs_3rdparty/colorama/winterm.py |
import atexit
import sys
from .ansitowin32 import AnsiToWin32
orig_stdout = sys.stdout
orig_stderr = sys.stderr
wrapped_stdout = sys.stdout
wrapped_stderr = sys.stderr
atexit_done = False
def reset_all():
AnsiToWin32(orig_stdout).reset_all()
def init(autoreset=False, convert=None, strip=None, wrap=True):
if not wrap and any([autoreset, convert, strip]):
raise ValueError('wrap=False conflicts with any other arg=True')
global wrapped_stdout, wrapped_stderr
sys.stdout = wrapped_stdout = \
wrap_stream(orig_stdout, convert, strip, autoreset, wrap)
sys.stderr = wrapped_stderr = \
wrap_stream(orig_stderr, convert, strip, autoreset, wrap)
global atexit_done
if not atexit_done:
atexit.register(reset_all)
atexit_done = True
def deinit():
sys.stdout = orig_stdout
sys.stderr = orig_stderr
def reinit():
sys.stdout = wrapped_stdout
sys.stderr = wrapped_stdout
def wrap_stream(stream, convert, strip, autoreset, wrap):
if wrap:
wrapper = AnsiToWin32(stream,
convert=convert, strip=strip, autoreset=autoreset)
if wrapper.should_wrap():
stream = wrapper.stream
return stream
| DCGM-master | testing/python3/libs_3rdparty/colorama/initialise.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from . import app_runner
import logger
import os
import utils
import test_utils
class RunNVpex2(app_runner.AppRunner):
""" Runs the nvpex2 app to inject errors during nvswitch testing """
paths = {
"Linux_64bit": "./apps/nvpex2/nvpex2",
"Linux_ppc64le": "./apps/nvpex2/nvpex2",
"Linux_aarch64": "./apps/nvpex2/nvpex2",
}
def __init__(self, args=None):
path = os.path.join(utils.script_dir, RunNVpex2.paths[utils.platform_identifier])
super(RunNVpex2, self).__init__(path, args)
def start(self):
"""
Runs the nvpex2 command
"""
super(RunNVpex2, self).start(timeout=10)
def __str__(self):
return "RunNVpex2 on all supported devices " + super(RunNVpex2, self).__str__()
| DCGM-master | testing/python3/apps/nvpex2.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import string
from . import app_runner
import utils
import test_utils
import logger
import option_parser
class DcgmiApp(app_runner.AppRunner):
# Including future supported architectures
paths = {
"Linux_32bit": "./apps/x86/dcgmi",
"Linux_64bit": "./apps/amd64/dcgmi",
"Linux_ppc64le": "./apps/ppc64le/dcgmi",
"Linux_aarch64": "./apps/aarch64/dcgmi",
"Windows_64bit": "./apps/amd64/dcgmi.exe"
}
forbidden_strings = [
# None of this error codes should be ever printed by nvcmi
"Unknown Error",
"Uninitialized",
"Invalid Argument",
"Already Initialized",
"Insufficient Size",
"Driver Not Loaded",
"Timeout",
"DCGM Shared Library Not Found",
"Function Not Found",
"(null)", # e.g. from printing %s from null ptr
]
def __init__(self, args=None):
path = DcgmiApp.paths[utils.platform_identifier]
self.dcgmi = None
self.output_filename = None
super(DcgmiApp, self).__init__(path, args)
if not test_utils.noLogging:
self.trace_fname = os.path.join(logger.log_dir, "app_%03d_dcgm_trace.log" % (self.process_nb))
self.env["__DCGM_DBG_FILE"] = self.trace_fname
self.env["__DCGM_DBG_LVL"] = test_utils.loggingLevel
else:
self.trace_fname = None
def _process_finish(self, stdout_buf, stderr_buf):
super(DcgmiApp, self)._process_finish(stdout_buf, stderr_buf)
# Skip this part if --no-logging option is used
if logger.log_dir is None:
return
# Verify that nv_hostengine doesn't print any strings that should never be printed on a working system
stdout = "\n".join(self.stdout_lines)
for forbidden_text in DcgmiApp.forbidden_strings:
assert stdout.find(forbidden_text) == -1, "dcgmi printed \"%s\", this should never happen!" % forbidden_text
def __str__(self):
return "dcgmi" + super(DcgmiApp, self).__str__()
| DCGM-master | testing/python3/apps/dcgmi_app.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import re
from . import app_runner
import dcgm_structs
import dcgm_agent_internal
import test_utils
import utils
import logger
class LsofApp(app_runner.AppRunner):
"""
Allows to query processes that have some file open (e.g. device node)
"""
paths = {
"Linux_32bit": "lsof",
"Linux_64bit": "lsof",
"Linux_ppc64le": "lsof",
"Linux_aarch64": "lsof",
}
def __init__(self, fname):
path = LsofApp.paths[utils.platform_identifier]
self.processes = None
self.fname = fname
super(LsofApp, self).__init__(path, ["-F", "-V", fname])
def start(self, timeout=app_runner.default_timeout):
# try to run as root, otherwise the list of processes might be incomplete
# (e.g. it won't report processes running by other users)
with test_utils.tryRunAsRoot():
super(LsofApp, self).start(timeout)
def _process_finish(self, stdout_buf, stderr_buf):
super(LsofApp, self)._process_finish(stdout_buf, stderr_buf)
if self.retvalue() == 1 and self.stdout_lines and self.stdout_lines[0].startswith("lsof: no file use located: "):
# lsof returns with error code 1 and prints message when no processes opened the file
self.validate()
self._retvalue = 0 # Fake success
self.processes = []
return
if self.retvalue() == 1 and self.stderr_lines and self.stderr_lines[0].endswith("No such file or directory"):
# lsof returns with error code 1 and prints message when target file doesn't exist
self.validate()
self._retvalue = 0 # Fake success
self.processes = [] # no file, no processes using it
return
if self.retvalue() != 0:
#Print out stdout and stderr so we can see it in eris
logger.warning("lsof with args %s had a retval of %s. stderr:" % (self.args, str(self._retvalue)))
if self.stderr_lines:
logger.warning(str(self.stderr_lines))
logger.warning("stdout:")
if self.stdout_lines:
logger.warning(str(self.stdout_lines))
assert self._retvalue == 0, "Failed to read processes that have the file opened. Read process log for more details"
assert len(self.stdout_lines) > 0, "Behavior of lsof changed. Returned 0 return code but stdout is empty"
self.processes = []
if utils.is_esx_hypervisor_system():
# ESX lsof ignores input args and outputs data in its own format
lsofre = re.compile("^(\d+)\s+(\S+)\s+(\S+)\s+(-?\d+)\s+(.*)")
for line in self.stdout_lines[2:]:
(pid, pname, fdtype, fd, fpath) = lsofre.match(line).groups()
if fpath == self.fname:
self.processes.append([int(pid), pname])
else:
last_value = [None, None]
for line in self.stdout_lines:
if not line:
continue # skip empty lines
tag = line[0]
content = line[1:]
if tag == "p":
last_value = [int(content), None]
self.processes.append(last_value)
elif tag == "c":
last_value[1] = content
def get_processes(self, ignore_pids=None, ignore_names=None):
"""
Returns list of processes (list of pairs (pid, process name)).
By default it filters out current process from the list.
ignore_pids - by default contains current pid
ignore_names - by default empty (but could be used e.g. to easily filter "Xorg")
"""
ignore_pids = ignore_pids or [os.getpid()]
ignore_names = ignore_names or []
if self.processes is None:
self.run()
result = self.processes[:]
if ignore_pids:
result = [p for p in result if p[0] not in ignore_pids]
if ignore_names:
result = [p for p in result if p[1] not in ignore_names]
return result
| DCGM-master | testing/python3/apps/lsof_app.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from . import app_runner
import os
import utils
class XidApp(app_runner.AppRunner):
paths = {
"Linux_32bit": "./apps/xid/xid_32bit",
"Linux_64bit": "./apps/xid/xid_64bit",
"Linux_ppc64le": "./apps/xid/xid_ppc64le",
"Windows_64bit": "./apps/xid/xid_64bit.exe"
}
def __init__(self, device):
self.device = device
path = os.path.join(utils.script_dir, XidApp.paths[utils.platform_identifier])
super(XidApp, self).__init__(path, ["-i", device.busId], cwd=os.path.dirname(path))
def start(self, timeout=app_runner.default_timeout):
"""
Blocks till XID has been delivered
Raises exception with EOFError if XID application cannot start.
"""
super(XidApp, self).start(timeout)
# if matching line is not found then EOFError exception is risen
self.stdout_readtillmatch(lambda x: x == "All done. Finishing.")
def __str__(self):
return "XidApp on device " + str(self.device) + " with " + super(XidApp, self).__str__()
| DCGM-master | testing/python3/apps/xid_app.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Library for executing processes
from apps.app_runner import *
# Libraries that wrap common command line applications
# and provide easier to use python interface
from apps.dcgm_stub_runner_app import *
from apps.nv_hostengine_app import *
from apps.dcgmi_app import *
from apps.dcgm_diag_unittests_app import *
from apps.dcgm_unittests_app import *
from apps.cuda_ctx_create_app import *
from apps.nvidia_smi_app import *
from apps.lsof_app import *
from apps.lspci_app import *
from apps.xid_app import *
from apps.cuda_assert_app import *
from apps.p2p_bandwidth import *
from apps.nvpex2 import *
from apps.dcgmproftester_app import *
| DCGM-master | testing/python3/apps/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import math
import sys
from operator import itemgetter
## Temp added to track down if log length issue happens again, Remove when nightly is fixed
import option_parser
import os
epsilon = sys.float_info.epsilon # add epsilon to all times to prevent division by 0
import test_utils
import logger
__all__ = ['PerformanceStats']
# TODO programmable switching
def verbose_debug( text ):
# print text
# logger.debug(text)
pass
def average(num_list):
return sum(num_list) / float(len(num_list))
def stdev(num_list):
avg = average(num_list)
return math.sqrt(sum((x - avg) ** 2 for x in num_list) / len(num_list))
def _time_str(num_list):
if len(num_list) > 1:
return "%.3fms\t%.3fms\t%.3fms" % (average(num_list) * 1000, stdev(num_list) * 1000, max(num_list) * 1000)
return "%.3fms" % (num_list[0] * 1000)
class DebugLine:
""" Class that matches DEBUG lines in trace log """
# tid timestamp path fname line content
regexpDebugLine = re.compile("^DEBUG:\s*\[tid \d*\]\s*\[(\d+\.\d+)s - (\w?:?[^:]+):?(\w+)?:([0-9]+)\]\s*(.*)")
@staticmethod
def construct(text):
try:
return DebugLine(text)
except ValueError:
return None
def __init__(self, text):
text = text.strip()
self.match = DebugLine.regexpDebugLine.match(text);
if not self.match:
raise ValueError
self.timestamp = float(self.match.group(1))
self.srcfilename = self.match.group(2).replace("\\", "/")
self.srcfunctionname = self.match.group(3)
self.srcline = int(self.match.group(4))
self.message = self.match.group(5)
def __str__(self):
return "(%s, %s, %s)" % (self.timestamp, self.srcStr(), self.message)
def srcStr(self):
if self.srcfunctionname:
return "%s:%s:%d" % (self.srcfilename, self.srcfunctionname, self.srcline)
else:
return "%s:%d" % (self.srcfilename, self.srcline)
def isInTheSamePlace(self, line):
return self.srcfilename == line.srcfilename and self.srcfunctionname == line.srcfunctionname and self.srcline == line.srcline
def isInTheSameFunction(self, line):
return self.srcfilename == line.srcfilename and self.srcfunctionname == line.srcfunctionname
class RmCall:
""" Class that matches Rm Calls """
regexpRmCallRelease = re.compile("^([0-9a-f]*) ([0-9a-f]*)$")
regexpRmCallReleaseReturn = re.compile("^([0-9a-f]*) ([0-9a-f]*) ## 0x([0-9a-f]*)$")
regexpRmCallDebug = re.compile("^dcgmRmCall\(([a-zA-Z0-9_.]* [0-9a-f]*), (\w*), \.\.\.\)$")
regexpRmCallDebugReturn = re.compile("^dcgmRmCall\(([a-zA-Z0-9_.]* [0-9a-f]*), (\w*), \.\.\.\) returned 0x([0-9a-f]*)$")
regexpFilePath = re.compile(".*dmal/rm/.*\.c$")
regexpRmCallSrc = re.compile(".*dcgmRmCall.*(NV\d{4}_CTRL_CMD_[A-Z0-9_]*).*")
@staticmethod
def construct(debugLines, i, dcgmParent):
try:
return RmCall(debugLines, i, dcgmParent)
except ValueError:
return None
def __init__(self, debugLines, i, dcgmParent):
if i + 1 >= len(debugLines):
raise ValueError
line1 = debugLines[i];
line2 = debugLines[i + 1];
verbose_debug("RmCall: Matching line1 %d %s" % (i, str(line1)))
verbose_debug("RmCall: Matching line2 %d %s" % (i+1, str(line2)))
if not line1.isInTheSamePlace(line2):
verbose_debug("RmCall: Failed because they are not in the same line")
raise ValueError
if not RmCall.regexpFilePath.match(line1.srcfilename):
verbose_debug("RmCall: Failed because they are not in the correct file in dmal/rm/rm_*")
raise ValueError
self.releaseLog = 1
self.rmCallName = 0
match1 = RmCall.regexpRmCallRelease.match(line1.message)
match2 = RmCall.regexpRmCallReleaseReturn.match(line2.message)
if not match1 or not match2:
self.releaseLog = 0
self.rmCallName = 1
verbose_debug("RmCall: Failed match regexpRmCallRelease* but still in trying other regexps")
match1 = RmCall.regexpRmCallDebug.match(line1.message)
match2 = RmCall.regexpRmCallDebugReturn.match(line2.message)
if not match1 or not match2:
verbose_debug("RmCall: Failed match regexpRmCallDebug*")
raise ValueError
if match1.group(1) != match2.group(1) or match1.group(2) != match2.group(2):
verbose_debug("RmCall: Failed check where device and function strings should be the same*")
raise ValueError
self.src = line1;
self.device = match1.group(1)
self.function = match1.group(2)
self.returnCode = match2.group(3)
self.time = line2.timestamp - line1.timestamp + epsilon
self.times = [self.time] # so that stats from multiple runs could be appended
self.dcgmParent = dcgmParent if dcgmParent and dcgmParent.isParentOfRmCall(self) else None
def is_simillar(self, b):
return isinstance(b, RmCall) and self.src.isInTheSamePlace(b.src) and self.function == b.function and self.returnCode == b.returnCode
def __str__(self):
dcgmParentStr = "(%.2f%% of %s)" % (self.time / self.dcgmParent.time * 100, self.dcgmParent.shortString()) if self.dcgmParent else ""
name = self.function if self.rmCallName else "RM CALL " + self.src.srcStr()
return "%s\t %s\t%s" % (_time_str(self.times), name, dcgmParentStr)
class NvmlCall:
""" Class that matches Nvml Calls """
# TODO doesn't handle apiEnter failures!
regexpNvmlCall = re.compile("^Entering (dcgm[a-zA-Z0-9_]*)(\(.*\)) *(\(.*\))$")
regexpNvmlIntRelCall = re.compile("^()()(\(.*\))$")
regexpNvmlCallReturn = re.compile("^Returning (\d*) \(([a-zA-Z ]*)\)$")
regexpNvmlIntRelCallReturn = re.compile("^(\d*) ([a-zA-Z ]*)$")
regexpFilePath = re.compile(".*entry_points.h$")
regexpFilePathNonTsapi = re.compile(".*dcgm.c$")
regexpNvmlCallSrc = re.compile("^ *NVML_INT_ENTRY_POINT\((dcgm[A-Z][a-zA-Z0-9_]*) *,.*")
@staticmethod
def construct(debugLines, i):
try:
return NvmlCall(debugLines, i)
except ValueError:
return None
def __init__(self, debugLines, i):
line1 = debugLines[i];
verbose_debug("NvmlCall: Matching line %d %s" % (i, str(line1)))
self.istsapi = 1
if not NvmlCall.regexpFilePath.match(line1.srcfilename):
self.istsapi = 0
verbose_debug("NvmlCall: Wrong file name also matching non tsapi regexp")
if not NvmlCall.regexpFilePathNonTsapi.match(line1.srcfilename):
verbose_debug("NvmlCall: Wrong file also doesn't match non tsapi regexp")
raise ValueError
self.internal = 0
self.dcgmCallName = 1
match1 = NvmlCall.regexpNvmlCall.match(line1.message)
if not match1:
self.internal = 1
self.dcgmCallName = 0
verbose_debug("NvmlCall: Failed match regexpNvmlCall but need to also try regexpNvmlIntRelCall")
match1 = NvmlCall.regexpNvmlIntRelCall.match(line1.message)
if not match1:
verbose_debug("NvmlCall: Failed match regexpNvmlIntRelCall")
raise ValueError
verbose_debug("NvmlCall: Matching the end of the dcgm call")
for j in range(i + 1, len(debugLines)):
line2 = debugLines[j];
if not line1.isInTheSameFunction(line2):
continue
if self.istsapi and not line1.isInTheSamePlace(line2):
continue
match2 = NvmlCall.regexpNvmlIntRelCallReturn.match(line2.message) if self.internal else NvmlCall.regexpNvmlCallReturn.match(line2.message)
if not match2:
verbose_debug("NvmlCall: Sth went wrong. Found line2 \"%s\" that doesn't match the return but is in the same line" % (str(line2)))
raise ValueError
return
verbose_debug("NvmlCall: Matched the end line %d %s" % (j, str(line2)))
# TODO match device
self.src = line1;
self.srcEnd = line2;
self.function = match1.group(1) if self.dcgmCallName else "NVML INT " + self.src.srcStr()
self.argsType = match1.group(2)
self.args = match1.group(3)
self.errcode = match2.group(1)
self.errcodeStr = match2.group(2)
self.time = line2.timestamp - line1.timestamp + epsilon
self.times = [self.time] # so that stats from multiple runs could be appended
return
verbose_debug("NvmlCall: End of dcgm call wasn't found")
raise ValueError
def isParentOfRmCall(self, rmCall):
return self.src.timestamp <= rmCall.src.timestamp and rmCall.src.timestamp <= self.srcEnd.timestamp
def shortString(self):
return "%s" % (self.function)
def is_simillar(self, b):
return isinstance(b, NvmlCall) and self.src.isInTheSamePlace(b.src) and self.function == b.function and self.errcode == b.errcode
def __str__(self):
return "%s\t%s\t%s" % (_time_str(self.times), self.function, self.args)
class PerformanceStats(object):
def __init__(self, input_fname):
verbose_debug("Decoding " + input_fname + " file")
# read from file
with open(input_fname, encoding='utf-8', errors='ignore') as fin:
rawlines = fin.readlines()
# Parse only DEBUG level trace lines
# only these contain start/stop function entry information
lines = [x for x in [DebugLine.construct(y) for y in rawlines] if x]
# look for dcgm function calls and RM function calls inside of trace lines
i = 0
lastNvmlCall = None
self.time_in_rm = 0.0 + epsilon
self.time_in_dcgm = 0.0 + epsilon
self.stats = []
for i in range(len(lines)):
line = lines[i]
verbose_debug(line.match.groups())
dcgmCall = NvmlCall.construct(lines, i)
if dcgmCall:
lastNvmlCall = dcgmCall
self.time_in_dcgm += dcgmCall.time
self.stats.append(dcgmCall)
continue
rmCall = RmCall.construct(lines, i, lastNvmlCall)
if rmCall:
self.time_in_rm += rmCall.time
self.stats.append(rmCall)
continue
if len(lines) > 0:
self.time_total = lines[-1].timestamp - lines[0].timestamp + epsilon
else:
self.time_total = -1
self.times_in_dcgm = [self.time_in_dcgm]
self.times_in_rm = [self.time_in_rm]
self.times_total = [self.time_total]
self._combined_stats_count = 1
def write_to_file(self, fname, dcgm_stats=True, rm_stats=True):
with open(fname, "w") as fout:
fout.write("Called functions (in order):\n")
if self._combined_stats_count > 1:
fout.write(" avg\t stdev\t max\t name\n")
else:
fout.write(" time\t name\n")
calls = dict() # for per function stats:
for stat in self.stats:
if not dcgm_stats and isinstance(stat, NvmlCall):
continue
if not rm_stats and isinstance(stat, RmCall):
continue
fout.write(str(stat))
fout.write("\n")
calls.setdefault(stat.function, []).extend(stat.times)
fout.write("%s\t%s\n" % (_time_str(self.times_total), "Total time"))
fout.write("%s\t%s (%.2f%% of total time)\n" % (_time_str(self.times_in_dcgm), "Time spent in NVML", average(self.times_in_dcgm) / average(self.times_total) * 100))
fout.write("%s\t%s (%.2f%% of total time)\n" % (_time_str(self.times_in_rm), "Time spent in RM", average(self.times_in_rm) / average(self.times_total) * 100))
fout.write("\n")
# Print per function stats
avgsum = "sum"
if self._combined_stats_count > 1:
avgsum = "avg sum" # if stats are combined then we return avg sum for all runs
fout.write("Per function stats (sorted by avg):\n")
fout.write(" avg\t stdev\t max\t%7s\t name\n" % avgsum)
per_function = [(average(calls[x]) * 1000, stdev(calls[x]) * 1000, max(calls[x]) * 1000, sum(calls[x]) * 1000 / self._combined_stats_count, x) for x in calls]
per_function.sort(reverse=True)
for function in per_function:
fout.write("%.3fms\t%.3fms\t%.3fms\t%.3fms\t%s\n" % function)
fout.write("\n")
fout.write("Per function stats (sorted by sum):\n")
fout.write(" avg\t stdev\t max\t%7s\t name\n" % avgsum)
per_function.sort(key=itemgetter(3), reverse=True)
for function in per_function:
fout.write("%.3fms\t%.3fms\t%.3fms\t%.3fms\t%s\n" % function)
fout.write("\n")
def write_to_file_dvs(self, fname, dcgm_stats=True, rm_stats=True):
with open(fname, "w") as fout:
def format_stats(name, num_list):
if len(num_list) > 1:
return "%s_avg, %.3f\n%s_stdev, %.3f\n%s_max,%.3f\n" % (name, average(num_list) * 1000, name, stdev(num_list) * 1000, name, max(num_list) * 1000)
return "%s, %.3f" % (name, num_list[0] * 1000)
calls = dict()
for stat in self.stats:
if not dcgm_stats and isinstance(stat, NvmlCall):
continue
if not rm_stats and isinstance(stat, RmCall):
continue
calls.setdefault(stat.function, []).extend(stat.times)
fout.write(format_stats("total_time", self.times_total))
fout.write(format_stats("total_dcgm_time", self.times_in_dcgm))
fout.write(format_stats("total_rm_time", self.times_in_rm))
for (name, times) in list(calls.items()):
fout.write(format_stats(name, times))
def combine_stat(self, perf_stat):
"""
Merges into self additional stats so that average and stdev for each entry could be calculated.
perf_stat must contain the same NVML/RM calls in the same order.
"""
## Temp added to track down if log length issue happens again, Remove when nightly is fixed
if (len(perf_stat.stats) != len(self.stats)):
log_perf_stat = ""
log_self_stat = ""
for i in range(len(perf_stat.stats)):
log_perf_stat += str(perf_stat.stats[i]) + "\n"
for i in range(len(self.stats)):
log_self_stat += str(self.stats[i]) + "\n"
if not test_utils.noLogging:
fname_log_perf_stat = os.path.relpath(os.path.join(logger.log_dir, "log_perf_stat.txt"))
fname_log_self_stat = os.path.relpath(os.path.join(logger.log_dir, "log_self_stat.txt"))
f1 = open(fname_log_perf_stat, "w")
f2 = open(fname_log_self_stat, "w")
f1.write(log_perf_stat)
f2.write(log_self_stat)
f1.close()
f2.close()
# TODO get rid of this requirement by merging the logs with difflib
# Some dcgm calls (e.g. dcgmDeviceGetCurrentClocksThrottleReasons) can take different RM calls depending on the
# state of the GPU (e.g. clock changes that can happen at any point).
## Comment strict matching of log length. The perf data will be collected for atleast 1 run anyways
## assert len(perf_stat.stats) == len(self.stats), "One of the logs is of different length"
if (len(perf_stat.stats) != len(self.stats)):
logger.warning("Perf logs mismatch. nvsmi perf data collected for %s run(s)" % str(self._combined_stats_count))
return
for i in range(len(self.stats)):
stat1 = self.stats[i]
stat2 = perf_stat.stats[i]
assert stat1.is_simillar(stat2), "stat %d: %s doesn't match %s and can't be combined" % (i, stat1, stat2)
stat1.times.extend(stat2.times)
self.times_total.extend(perf_stat.times_total)
self.times_in_rm.extend(perf_stat.times_in_rm)
self.times_in_dcgm.extend(perf_stat.times_in_dcgm)
self._combined_stats_count += 1
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print("Usage <app> <input_decoded_log> <output_stats_file>")
sys.exit(1)
stats = PerformanceStats(sys.argv[1])
stats.write_to_file(sys.argv[2])
print("Done")
| DCGM-master | testing/python3/apps/performance_stats.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import string
from . import app_runner
import utils
import test_utils
import logger
import option_parser
import datetime
import subprocess
class DcgmStubRunnerApp(app_runner.AppRunner):
# Including future supported architectures
paths = {
"Linux_32bit": "./apps/x86/stub_library_test",
"Linux_64bit": "./apps/amd64/stub_library_test",
"Linux_ppc64le": "./apps/ppc64le/stub_library_test",
"Linux_aarch64": "./apps/aarch64/stub_library_test",
"Windows_64bit": "./apps/amd64/stub_library_test.exe"
}
forbidden_strings = [
# None of this error codes should be ever printed by nv-hostengine
"Unknown Error",
"Uninitialized",
"Invalid Argument",
"(null)", # e.g. from printing %s from null ptr
]
def __init__(self, args=None):
path = DcgmStubRunnerApp.paths[utils.platform_identifier]
self.stub = None
self.output_filename = None
super(DcgmStubRunnerApp, self).__init__(path, args)
if not test_utils.noLogging:
self.nvml_trace_fname = os.path.join(logger.log_dir, "app_%03d_nvml_trace.log" % (self.process_nb))
self.env["__NVML_DBG_FILE"] = self.nvml_trace_fname
self.env["__NVML_DBG_LVL"] = test_utils.loggingLevel
self.dcgm_trace_fname = os.path.join(logger.log_dir, "app_%03d_dcgm_trace.log" % (self.process_nb))
self.env["__DCGM_DBG_FILE"] = self.dcgm_trace_fname
self.env["__DCGM_DBG_LVL"] = test_utils.loggingLevel
else:
self.nvml_trace_fname = None
self.dcgm_trace_fname = None
def _process_finish(self, stdout_buf, stderr_buf):
super(DcgmStubRunnerApp, self)._process_finish(stdout_buf, stderr_buf)
# Skip this part if --no-logging option is used
if logger.log_dir is None:
return
# Verify that stub_library_test doesn't print any strings that should never be printed
stdout = "\n".join(self.stdout_lines)
for forbidden_text in DcgmStubRunnerApp.forbidden_strings:
assert stdout.find(forbidden_text) == -1, "stub_library_test printed \"%s\", this should never happen!" % forbidden_text
def __str__(self):
return "stub_library_test" + super(DcgmStubRunnerApp, self).__str__()
def stdout(self):
stdout = "\n".join(self.stdout_lines)
return stdout
| DCGM-master | testing/python3/apps/dcgm_stub_runner_app.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from . import app_runner
import os
import utils
import test_utils
class RunCudaAssert(app_runner.AppRunner):
""" Class to assert a Cuda Kernel and generate a XID 43 Error """
paths = {
"Linux_64bit": "./apps/cuda_ctx_create/cuda_assert_64bit",
"Linux_ppc64le": "./apps/cuda_ctx_create/cuda_assert_ppc64le",
"Linux_aarch64": "./apps/cuda_ctx_create/cuda_assert_aarch64",
}
def __init__(self, args, env=None):
path = os.path.join(utils.script_dir, RunCudaAssert.paths[utils.platform_identifier])
super(RunCudaAssert, self).__init__(path, args, cwd=os.path.dirname(path), env=env)
def start(self, timeout=app_runner.default_timeout):
"""
Blocks till cuda ctx is really created
Raises Exception if assert does not work
"""
super(RunCudaAssert, self).start(timeout)
with test_utils.assert_raises(EOFError):
# if matching line is not found then EOFError exception is risen
self.stdout_readtillmatch(lambda x: x == "Assertion `false` failed")
def __str__(self):
return "RunCudaAssert on device " + super(RunCudaAssert, self).__str__()
| DCGM-master | testing/python3/apps/cuda_assert_app.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import string
from . import app_runner
import utils
import test_utils
import logger
import option_parser
class TestDcgmUnittestsApp(app_runner.AppRunner):
# Including future supported architectures
paths = {
"Linux_32bit": "./apps/x86/testdcgmunittests",
"Linux_64bit": "./apps/amd64/testdcgmunittests",
"Linux_ppc64le": "./apps/ppc64le/testdcgmunittests",
"Linux_aarch64": "./apps/aarch64/testdcgmunittests",
"Windows_64bit": "./apps/amd64/testdcgmunittests.exe"
}
forbidden_strings = [
# None of this error codes should be ever printed by testnvcmunittests
"Unknown Error",
"Uninitialized",
"Invalid Argument",
"Already Initialized",
"Insufficient Size",
"Driver Not Loaded",
"Timeout",
"DCGM Shared Library Not Found",
"Function Not Found",
"(null)", # e.g. from printing %s from null ptr
]
def __init__(self, args=None):
path = TestDcgmUnittestsApp.paths[utils.platform_identifier]
self.nv_hostengine = None
self.output_filename = None
super(TestDcgmUnittestsApp, self).__init__(path, args)
if not test_utils.noLogging:
self.trace_fname = os.path.join(logger.log_dir, "app_%03d_trace.log" % (self.process_nb))
self.env["__DCGM_DBG_FILE"] = self.trace_fname
self.env["__DCGM_DBG_LVL"] = test_utils.loggingLevel
else:
self.trace_fname = None
def _process_finish(self, stdout_buf, stderr_buf):
super(TestDcgmUnittestsApp, self)._process_finish(stdout_buf, stderr_buf)
# Skip this part if --no-logging option is used
if logger.log_dir is None:
return
# Verify that nv_hostengine doesn't print any strings that should never be printed on a working system
stdout = "\n".join(self.stdout_lines)
for forbidden_text in TestDcgmUnittestsApp.forbidden_strings:
assert stdout.find(forbidden_text) == -1, "testdcgmunittests printed \"%s\", this should never happen!" % forbidden_text
if self.retvalue() != 0:
stderr = "\n".join(self.stderr_lines)
logger.warning("testdcgmunittests returned %d" % self.retvalue())
logger.warning("stdout:\n%s\n" % stdout)
logger.warning("stderr:\n%s\n" % stderr)
def __str__(self):
return "nv_hostengine" + super(TestDcgmUnittestsApp, self).__str__()
| DCGM-master | testing/python3/apps/dcgm_unittests_app.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from . import app_runner
import os
import utils
class CudaCtxCreateApp(app_runner.AppRunner):
"""
Creates a cuda context on a single device and waits for a return char to terminate.
"""
paths = {
"Linux_32bit": "./apps/cuda_ctx_create/cuda_ctx_create_32bit",
"Linux_64bit": "./apps/cuda_ctx_create/cuda_ctx_create_64bit",
"Linux_ppc64le": "./apps/cuda_ctx_create/cuda_ctx_create_ppc64le",
"Linux_aarch64": "./apps/cuda_ctx_create/cuda_ctx_create_aarch64",
"Windows_64bit": "./apps/cuda_ctx_create/cuda_ctx_create_64bit.exe"
}
def __init__(self, device):
self.device = device
path = os.path.join(utils.script_dir, CudaCtxCreateApp.paths[utils.platform_identifier])
super(CudaCtxCreateApp, self).__init__(path, ["-i", device.busId, "--getchar"], cwd=os.path.dirname(path))
def start(self, timeout=app_runner.default_timeout):
"""
Blocks till cuda ctx is really created
Raises exception EOFError if ctx application cannot start
"""
super(CudaCtxCreateApp, self).start(timeout)
# if matching line is not found then EOFError exception is risen
self.stdout_readtillmatch(lambda x: x == "Context created")
def __str__(self):
return "CudaCtxCreateApp on device " + str(self.device) + " with " + super(CudaCtxCreateApp, self).__str__()
class CudaCtxCreateAdvancedApp(app_runner.AppRunner):
"""
More universal version of CudaCtxCreateApp which provides access to:
- creating multiple contexts
- launching kernels (that use quite a bit of power)
- allocate additional memory
See apps/cuda_ctx_create/cuda_ctx_create_32bit -h for more details.
"""
paths = {
"Linux_32bit": "./apps/cuda_ctx_create/cuda_ctx_create_32bit",
"Linux_64bit": "./apps/cuda_ctx_create/cuda_ctx_create_64bit",
"Linux_ppc64le": "./apps/cuda_ctx_create/cuda_ctx_create_ppc64le",
"Linux_aarch64": "./apps/cuda_ctx_create/cuda_ctx_create_aarch64",
"Windows_64bit": "./apps/cuda_ctx_create/cuda_ctx_create_64bit.exe"
}
def __init__(self, args, env=None):
path = os.path.join(utils.script_dir, CudaCtxCreateApp.paths[utils.platform_identifier])
super(CudaCtxCreateAdvancedApp, self).__init__(path, args, cwd=os.path.dirname(path), env=env)
def __str__(self):
return "CudaCtxCreateAdvancedApp with " + super(CudaCtxCreateAdvancedApp, self).__str__()
| DCGM-master | testing/python3/apps/cuda_ctx_create_app.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import string
from . import app_runner
import utils
import test_utils
import logger
import option_parser
class TestDcgmDiagUnittestsApp(app_runner.AppRunner):
paths = {
"Linux_32bit": "./apps/x86/testdiag",
"Linux_64bit": "./apps/amd64/testdiag",
"Linux_ppc64le": "./apps/ppc64le/testdiag",
"Linux_aarch64": "./apps/aarch64/testdiag",
"Windows_64bit": "./apps/amd64/testdiag.exe"
}
def __init__(self, args=None):
path = TestDcgmDiagUnittestsApp.paths[utils.platform_identifier]
super(TestDcgmDiagUnittestsApp, self).__init__(path, args)
| DCGM-master | testing/python3/apps/dcgm_diag_unittests_app.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from . import app_runner
import os
import utils
# it could take up to 360 seconds on dgx-2
P2P_BANDWIDTH_TIMEOUT_SECS = 360
class RunP2Pbandwidth(app_runner.AppRunner):
""" Runs the p2pb_bandwidth binary to generate traffic between Gpus using nvswitch """
paths = {
"Linux_64bit": "./apps/p2p_bandwidth/p2p_bandwidth",
"Linux_ppc64le": "./apps/p2p_bandwidth/p2p_bandwidth",
"Linux_aarch64": "./apps/p2p_bandwidth/p2p_bandwidth",
}
def __init__(self, args):
path = os.path.join(utils.script_dir, RunP2Pbandwidth.paths[utils.platform_identifier])
super(RunP2Pbandwidth, self).__init__(path, args)
def start(self):
"""
Runs the p2p_bandwidth test on available Gpus
Raises Exception if it does not work
"""
super(RunP2Pbandwidth, self).start(timeout=P2P_BANDWIDTH_TIMEOUT_SECS)
self.stdout_readtillmatch(lambda x: x.find("test PASSED") != -1)
def __str__(self):
return "RunP2Pbandwidth on all supported devices " + super(RunP2Pbandwidth, self).__str__()
| DCGM-master | testing/python3/apps/p2p_bandwidth.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import string
from . import app_runner
import dcgm_structs
import dcgm_agent_internal
import utils
import test_utils
import logger
import option_parser
from . import performance_stats
class NvidiaSmiApp(app_runner.AppRunner):
# TODO add option to also run just compiled nvidia-smi
paths = {
"Linux_32bit": "nvidia-smi", # it should be in the path
"Linux_64bit": "nvidia-smi", # it should be in the path
"Linux_ppc64le": "nvidia-smi", # it should be in the path
"Linux_aarch64": "nvidia-smi", # it should be in the path
"Windows_64bit": os.path.join(os.getenv("ProgramFiles", "C:/Program Files"), "NVIDIA Corporation/NVSMI/nvidia-smi.exe")
}
forbidden_strings = [
# None of this error codes should be ever printed by nvidia-smi
"Unknown Error",
"Uninitialized",
"Invalid Argument",
"Already Initialized",
"Insufficient Size",
"Insufficient External Power",
"Driver Not Loaded",
"Timeout",
"Interrupt Request Issue",
"NVML Shared Library Not Found",
"Function Not Found",
"Corrupted infoROM",
"ERR!", # from non-verbose output
"(null)", # e.g. from printing %s from null ptr
]
def __init__(self, args=None):
path = NvidiaSmiApp.paths[utils.platform_identifier]
self.output_filename = None
super(NvidiaSmiApp, self).__init__(path, args)
if not test_utils.noLogging:
self.trace_fname = os.path.join(logger.log_dir, "app_%03d_trace.log" % (self.process_nb))
self.env["__NVML_DBG_FILE"] = self.trace_fname
self.env["__NVML_DBG_LVL"] = test_utils.loggingLevel
else:
self.trace_fname = ""
def append_switch_filename(self, filename=None):
"""
Appends [-f | --filename] switch to args.
If filename is None than filename is generated automatically
"""
# Skip this part if --no-logging option is used
if logger.log_dir is None:
return None
if filename is None:
filename = os.path.join(logger.log_dir, "app_%03d_filename_output.txt" % (self.process_nb))
self.args.extend(["-f", filename])
self.output_filename = filename
return filename
def _process_finish(self, stdout_buf, stderr_buf):
super(NvidiaSmiApp, self)._process_finish(stdout_buf, stderr_buf)
# Skip this part if --no-logging option is used
if logger.log_dir is None:
return
# TODO, debug builds can print to stderr. We can check for release build here
#assert self.stderr_lines == [], "nvidia-smi printed something to stderr. It shouldn't ever do that!"
# Verify that nvidia smi doesn't print any strings that should never be printed on a working system
stdout = "\n".join(self.stdout_lines)
for forbidden_text in NvidiaSmiApp.forbidden_strings:
assert stdout.find(forbidden_text) == -1, "nvidia-smi printed \"%s\", this should never happen!" % forbidden_text
def __str__(self):
return "nvidia-smi" + super(NvidiaSmiApp, self).__str__()
| DCGM-master | testing/python3/apps/nvidia_smi_app.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import subprocess
import os
import threading
import string
import datetime
import signal
import logger
import option_parser
import utils
import test_utils
default_timeout = 10.0 # 10s
class AppRunner(object):
"""
Class for running command line applications. It handles timeouts, logging and reading stdout/stderr.
Stdout and stderr of an application is also stored in log output dir in files process_<NB>_stdout/stderr.txt
If application finished with non 0 error code you need to mark it with .validate() function. Otherwise testing
framework will fail a subtest. AppRunner is also "validated" when .terminate() is called.
You can access all lines read so far (or when the application terminates all lines printed) from attributes
.stdout_lines
.stderr_lines
You can see how long the application ran for +/- some minimal overhead (must run() for time to be accurate:
.runTime
# Sample usage
app = AppRunner("nvidia-smi", ["-l", "1"])
app.run(timeout=2.5)
print "\n".join(app.stdout_lines)
Notes: AppRunner works very closely with test_utils SubTest environment. SubTest at the end of the test
checks that all applications finished successfully and kills applications that didn't finish by
the end of the test.
"""
RETVALUE_TERMINATED = "Terminated"
RETVALUE_TIMEOUT = "Terminated - Timeout"
_processes = [] # Contains list of all processes running in the background
_processes_not_validated = [] # Contains list of processes that finished with non 0 error code
# and were not marked as validated
_process_nb = 0
def __init__(self, executable, args=None, cwd=None, env=None):
self.executable = executable
if args is None:
args = []
self.args = args
self.cwd = cwd
if env is None:
env = dict()
self.env = env
self._timer = None # to implement timeout
self._subprocess = None
self._retvalue = None # stored return code or string when the app was terminated
self._lock = threading.Lock() # to implement thread safe timeout/terminate
self.stdout_lines = [] # buff that stores all app's output
self.stderr_lines = []
self._logfile_stdout = None
self._logfile_stderr = None
self._is_validated = False
self._info_message = False
self.process_nb = AppRunner._process_nb
AppRunner._process_nb += 1
def run(self, timeout=default_timeout):
"""
Run the application and wait for it to finish.
Returns the app's error code/string
"""
self.start(timeout)
return self.wait()
def start(self, timeout=default_timeout):
"""
Begin executing the application.
The application may block if stdout/stderr buffers become full.
This should be followed by self.terminate() or self.wait() to finish execution.
Execution will be forcefully terminated if the timeout expires.
If timeout is None, then this app will never timeout.
"""
assert self._subprocess is None
logger.debug("Starting " + str(self))
env = self._create_subprocess_env()
if utils.is_linux():
if os.path.exists(self.executable):
# On linux, for binaries inside the package (not just commands in the path) test that they have +x
# e.g. if package is extracted on windows and copied to Linux, the +x privileges will be lost
assert os.access(self.executable, os.X_OK), "Application binary %s is not executable! Make sure that the testing archive has been correctly extracted." % (self.executable)
self.startTime = datetime.datetime.now()
self._subprocess = subprocess.Popen(
[self.executable] + self.args,
stdin=None,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=self.cwd,
env=env)
AppRunner._processes.append(self) # keep track of running processe
# Start timeout if we want one
self._timer = None
if timeout is not None:
self._timer = threading.Timer(timeout, self._trigger_timeout)
self._timer.start()
if not test_utils.noLogging:
def args_to_fname(args):
# crop each argument to 16 characters and make sure the output string is no longer than 50 chars
# Long file names are hard to read (hard to find the extension of the file)
# Also python sometimes complains about file names being too long.
# IOError: [Errno 36] File name too long
return "_".join([utils.string_to_valid_file_name(x)[:16] for x in self.args])[:50]
shortname = os.path.basename(self.executable) + "_" + args_to_fname(self.args)
stdout_fname = os.path.relpath(os.path.join(
logger.log_dir, "app_%03d_%s_stdout.txt" % (self.process_nb, shortname)))
stderr_fname = os.path.relpath(os.path.join(
logger.log_dir, "app_%03d_%s_stderr.txt" % (self.process_nb, shortname)))
# If the app fails, this message will get printed. If it succeeds it'll get popped in _process_finish
self._info_message = logger.info("Starting %s...\nstdout in %s\nstderr in %s" % (
str(self)[:64], # cut the string to make it more readable
stdout_fname, stderr_fname), defer=True)
self._logfile_stdout = open(stdout_fname, "w", encoding='utf-8')
self._logfile_stderr = open(stderr_fname, "w", encoding='utf-8')
def _process_finish(self, stdout_buf, stderr_buf):
"""
Logs return code/string and reads the remaining stdout/stderr.
"""
logger.debug("Application %s returned with status: %s" % (self.executable, self._retvalue))
self.runTime = datetime.datetime.now() - self.startTime
self._split_and_log_lines(stdout_buf, self.stdout_lines, self._logfile_stdout)
self._split_and_log_lines(stderr_buf, self.stderr_lines, self._logfile_stderr)
if self._logfile_stdout:
self._logfile_stdout.close()
if self._logfile_stderr:
self._logfile_stderr.close()
AppRunner._processes.remove(self)
if self._retvalue != 0 and self._retvalue != AppRunner.RETVALUE_TERMINATED:
AppRunner._processes_not_validated.append(self)
else:
self._is_validated = True
logger.pop_defered(self._info_message)
def wait(self):
"""
Wait for application to finish and return the app's error code/string
"""
if self._retvalue is not None:
return self._retvalue
logger.debug("Waiting for application %s, pid %d to finish" % (str(self), self._subprocess.pid))
stdout_buf, stderr_buf = self._subprocess.communicate()
if self._timer is not None:
self._timer.cancel()
with self._lock: # set ._retvalue in thread safe way. Make sure it wasn't set by timeout already
if self._retvalue is None:
self._retvalue = self._subprocess.returncode
self._process_finish(stdout_buf.decode('utf-8'), stderr_buf.decode('utf-8'))
return self._retvalue
def poll(self):
if self._retvalue is None:
self._retvalue = self._subprocess.poll()
if self._retvalue is not None:
stdout_buf = self._read_all_remaining(self._subprocess.stdout)
stderr_buf = self._read_all_remaining(self._subprocess.stderr)
self._process_finish(stdout_buf, stderr_buf)
return self._retvalue
def _trigger_timeout(self):
"""
Function called by timeout routine. Kills the app in a thread safe way.
"""
logger.warning("App %s with pid %d has timed out. Killing it." % (self.executable, self.getpid()))
with self._lock: # set ._retvalue in thread safe way. Make sure that app wasn't terminated already
if self._retvalue is not None:
return self._retvalue
self._subprocess.kill()
stdout_buf = self._read_all_remaining(self._subprocess.stdout)
stderr_buf = self._read_all_remaining(self._subprocess.stderr)
self._retvalue = AppRunner.RETVALUE_TIMEOUT
self._process_finish(stdout_buf, stderr_buf)
return self._retvalue
def _create_subprocess_env(self):
''' Merge additional env with current env '''
env = os.environ.copy()
for key in self.env:
env[key] = self.env[key]
return env
def validate(self):
"""
Marks the process that finished with error code as validated - the error was either expected or handled by the caller
If process finished with error but wasn't validated one of the subtest will fail.
"""
assert self.retvalue() != None, "This function shouldn't be called when process is still running"
if self._is_validated:
return
self._is_validated = True
self._processes_not_validated.remove(self)
logger.pop_defered(self._info_message)
def terminate(self):
"""
Forcfully terminates the application and return the app's error code/string.
"""
with self._lock: # set ._retvalue in thread safe way. Make sure that app didn't timeout
if self._retvalue is not None:
return self._retvalue
if self._timer is not None:
self._timer.cancel()
self._subprocess.kill()
stdout_buf = self._read_all_remaining(self._subprocess.stdout)
stderr_buf = self._read_all_remaining(self._subprocess.stderr)
self._retvalue = AppRunner.RETVALUE_TERMINATED
self._process_finish(stdout_buf, stderr_buf)
return self._retvalue
def signal(self, signal):
"""
Send a signal to the process
"""
self._subprocess.send_signal(signal)
def _read_all_remaining(self, stream):
"""
Return a string representing the entire remaining contents of the specified stream
This will block if the stream does not end
Should only be called on a terminated process
"""
out_buf = ""
while True:
rawline = stream.readline().decode('utf-8')
if rawline == "":
break
else:
out_buf += rawline
return out_buf
def _split_and_log_lines(self, input_string, buff, log_file):
"""
Splits string into lines, removes '\\n's, and appends to buffer & log file
"""
lines = input_string.splitlines()
for i in range(len(lines)):
lines[i] = lines[i].rstrip("\n\r")
if log_file:
log_file.write(lines[i])
log_file.write("\n")
buff.append(lines[i])
def stdout_readtillmatch(self, match_fn):
"""
Blocking function that reads input until function match_fn(line : str) returns True.
If match_fn didn't match anything function raises EOFError exception
"""
logger.debug("stdout_readtillmatch called", caller_depth=1)
while True:
rawline = self._subprocess.stdout.readline().decode("utf-8")
if rawline == "":
break
else:
rawline = rawline.rstrip("\n\r")
line = rawline
if self._logfile_stdout:
self._logfile_stdout.write(line)
self._logfile_stdout.write("\n")
self.stdout_lines.append(line)
if match_fn(rawline) is True:
return
raise EOFError("Process finished and requested match wasn't found")
def retvalue(self):
"""
Returns code/string if application finished or None otherwise.
"""
if self._subprocess.poll() is not None:
self.wait()
return self._retvalue
def getpid(self):
"""
Returns the pid of the process
"""
return self._subprocess.pid
def __str__(self):
return ("AppRunner #%d: %s %s (cwd: %s; env: %s)" %
(self.process_nb, self.executable, " ".join(self.args), self.cwd, self.env))
def __repr__(self):
return str(self)
@classmethod
def clean_all(cls):
"""
Terminate all processes that were created using this class and makes sure that all processes that were spawned were validated.
"""
import test_utils
def log_output(message, process):
"""
Prints last 10 lines of stdout and stderr for faster lookup
"""
logger.info("%s: %s" % (message, process))
numLinesToPrint = 100
#Print more lines for ERIS since this is all we'll have to go by
if option_parser.options.dvssc_testing or option_parser.options.eris:
numLinesToPrint = 500
logger.info("Last %d lines of stdout" % numLinesToPrint)
with logger.IndentBlock():
for line in process.stdout_lines[-numLinesToPrint:]:
logger.info(line)
logger.info("Last %d lines of stderr" % numLinesToPrint)
with logger.IndentBlock():
for line in process.stderr_lines[-numLinesToPrint:]:
logger.info(line)
with test_utils.SubTest("not terminated processes", quiet=True):
assert AppRunner._processes == [], "Some processes were not terminated by previous test: " + str(AppRunner._processes)
for process in AppRunner._processes[:]:
log_output("Unterminated process", process)
process.terminate()
with test_utils.SubTest("not validated processes", quiet=True):
for process in AppRunner._processes_not_validated:
log_output("Process returned %s ret code" % process.retvalue(), process)
assert AppRunner._processes_not_validated == [], "Some processes failed and were not validated by previous test: " + str(AppRunner._processes_not_validated)
AppRunner._processes_not_validated = []
| DCGM-master | testing/python3/apps/app_runner.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import re
from . import app_runner
import dcgm_structs
import dcgm_agent_internal
import test_utils
import utils
class LspciApp(app_runner.AppRunner):
"""
Run lspci
"""
paths = {
"Linux_32bit": "./lspci/Linux-x86/",
"Linux_64bit": "./lspci/Linux-x86_64/",
"Linux_ppc64le": "./lspci/Linux-ppc64le/",
"Linux_aarch64": "./lspci/Linux-aarch64/",
}
def __init__(self, busId, flags):
path = os.path.join(os.path.dirname(os.path.realpath(__file__)), LspciApp.paths[utils.platform_identifier])
exepath = path + "/sbin/lspci"
self.processes = None
args = ["-s", busId, "-i", path + "/share/pci.ids"]
for flag in flags:
args.append(flag)
super(LspciApp, self).__init__(exepath, args)
| DCGM-master | testing/python3/apps/lspci_app.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import string
import time
import datetime
import subprocess
from posix import wait
from subprocess import CalledProcessError
from . import app_runner
import option_parser
import logger
import utils
import test_utils
default_timeout = 10.0 # 10s
class NvHostEngineApp(app_runner.AppRunner):
# Including future supported architectures
paths = {
"Linux_32bit": "./apps/x86/nv-hostengine",
"Linux_64bit": "./apps/amd64/nv-hostengine",
"Linux_ppc64le": "./apps/ppc64le/nv-hostengine",
"Linux_aarch64": "./apps/aarch64/nv-hostengine",
"Windows_64bit": "./apps/amd64/nv-hostengine.exe"
}
forbidden_strings = [
# None of this error codes should be ever printed by nv-hostengine
"Unknown Error",
"Uninitialized",
"Invalid Argument",
"Already Initialized",
"Insufficient Size",
"Driver Not Loaded",
"Timeout",
"DCGM Shared Library Not Found",
"Function Not Found",
"(null)", # e.g. from printing %s from null ptr
]
supported_profile_tools = ['callgrind', 'massif']
def __init__(self, args=None, profile_dir=None, heEnv=None):
'''
args: special args to execute nv-hostengine with
profile_dir: output directory to create which will contain
profiling files if profiling is enabled.
heEnv: Dictionary of environmental variables to set for the host engine's process
'''
path = NvHostEngineApp.paths[utils.platform_identifier]
self.hostengine_executable = path
self.nv_hostengine = None
self._pid = None
self._retvalue = None
if test_utils.noLogging:
self._pidFilename = os.path.join(os.getcwd(), 'nv-hostengine.pid')
else:
self._pidFilename = os.path.join(logger.log_dir, 'nv-hostengine.pid')
if option_parser.options.profile:
self._check_valgrind_installed()
self.output_dir = self._create_output_dir(option_parser.options.profile, profile_dir)
args, path = self._create_profile_command(args, path, option_parser.options.profile)
logger.info('profiling output files available under %s' % utils.shorten_path(self.output_dir, 3))
#Make sure we're writing to a local .pid file in case we're running as non-root
pidArgs = []
if args is not None and '--pid' in args:
if self._pidFilename not in args: # When we retry, --pid self._pidFilename is already there. Don't fail in that case
raise Exception("Custom --pid parameter is not supported at this time. You must update _terminate_hostengine() as well. args: " + str(args))
else:
pidArgs = ['--pid', self._pidFilename]
if args is None:
args = pidArgs
else:
args.extend(pidArgs)
super(NvHostEngineApp, self).__init__(path, args, cwd=None, env=heEnv)
if not test_utils.noLogging and not option_parser.options.profile:
self.dcgm_trace_fname = os.path.join(logger.log_dir, "app_%03d_dcgm_trace.log" % (self.process_nb))
self.env["__DCGM_DBG_FILE"] = self.dcgm_trace_fname
self.env["__DCGM_DBG_LVL"] = test_utils.loggingLevel
else:
self.dcgm_trace_fname = None
#logger.error("env: %s; heEnv: %s" % (str(self.env), str(heEnv)))
def _check_valgrind_installed(self):
output = subprocess.check_output('which valgrind', shell=True).strip()
if output == '':
raise Exception('Valgrind must be installed in order to run profiling. ' +
'"which valgrind" could not find it.')
def _create_output_dir(self, profile_tool, profile_dir=None):
''' Create and return the output directory for the callgrind files '''
base_output_dir = os.path.join(logger.log_dir, profile_tool)
utils.create_dir(base_output_dir)
if profile_dir is not None:
output_dir = os.path.join(base_output_dir, profile_dir)
else:
# if no name specified, store in a folder for the current datetime, including microseconds
dir_name = datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%dT%H:%M:%S.%f')
output_dir = os.path.join(base_output_dir, dir_name)
utils.create_dir(output_dir)
return output_dir
def _create_profile_command(self, args, path, valgrind_tool):
'''
Return the proper (args, path) to initialize the AppRunner with in order
to run the hostengine under callgrind
'''
if valgrind_tool not in self.supported_profile_tools:
raise Exception('%s is not a supported tool for profiling' % valgrind_tool)
common_args = [
# logfile is needed instead of printing to stderr since AppRunner.terminate tries to gather all stderr
# but we are only able to REALLY terminate the process in self.terminate. This means stderr will
# not close as expected and the program will deadlock
'--tool=%s' % valgrind_tool,
'--log-file=%s' % os.path.join(self.output_dir, '%s.log.%%p' % valgrind_tool)
]
tool_args = []
tool_log_file = os.path.join(self.output_dir, valgrind_tool + '.out.%p')
if valgrind_tool == 'callgrind':
tool_args = [
'--separate-threads=yes',
'--dump-instr=yes', # allow to look at profiling data at machine instr lvl instead of src lines
'--collect-jumps=yes', # conditional jump info is collected
'--collect-systime=yes', # collect system call times
'--collect-bus=yes', # collect atomic instr. calls. Useful for finding excessive locking
'--cache-sim=yes', # collect memory and cache miss/hit info
'--callgrind-out-file=%s' % tool_log_file,
]
elif valgrind_tool == 'massif':
tool_args = [
'--stacks=yes', # include stack information
'--massif-out-file=%s' % tool_log_file,
]
args = common_args + tool_args + [path] + (args or [])
path = 'valgrind'
return args, path
def _process_finish(self, stdout_buf, stderr_buf):
super(NvHostEngineApp, self)._process_finish(stdout_buf, stderr_buf)
if logger.log_dir is None:
return
# Verify that nv_hostengine doesn't print any strings that should never be printed on a working system
stdout = "\n".join(self.stdout_lines)
for forbidden_text in NvHostEngineApp.forbidden_strings:
assert stdout.find(forbidden_text) == -1, "nv_hostengine printed \"%s\", this should never happen!" % forbidden_text
def __str__(self):
return "nv_hostengine" + super(NvHostEngineApp, self).__str__()
def start(self, timeout=default_timeout):
# if an existing hostengine is running, stop it
self._kill_hostengine(self._getpid())
#Don't timeout nv-hostengine for now. We already call it from
#RunStandaloneHostEngine, which will start and stop the host engine between tests
timeout = None
super(NvHostEngineApp, self).start(timeout=timeout)
# get and cache the pid
waitTime = 5.0
start = time.time()
while time.time() - start < waitTime:
self._pid = self._getpid()
if self._pid is not None:
break
time.sleep(0.050)
if self._pid is None:
retValue = super(NvHostEngineApp, self).poll() # Use super method to check status of subprocess object
if retValue is None:
# Hostengine did not start up correctly - terminate it so that we clean up subprocess object
# This prevents multiple zombie processes from being created.
self.terminate()
self.validate() # Prevent unecessary failure messages due to not validated processes
logger.error("Could not start nvhostengine. Output from the failed launch (if any) follows.")
# log whatever output we have available
for line in self.stdout_lines:
logger.info(line)
for line in self.stderr_lines:
logger.error(line)
raise RuntimeError('Failed to start hostengine app')
def _kill_hostengine(self, pid):
if pid is None:
return
self._terminate_hostengine()
utils.wait_for_pid_to_die(pid)
def _getpid_old(self):
# assuming that only one hostengine exists we do a pgrep for it
# we have to specify --P=1 (init process) or else we will also get the PID of
# the pgrep shell command. Use -P instead of --parent because some versions of pgrep only have -P
try:
pid = subprocess.check_output('pgrep -P 1 -f "%s"' % os.path.basename(self.hostengine_executable),
stderr=subprocess.PIPE,
shell=True).strip()
# verify only one hostengine exists
pids = pid.split()
if len(pids) > 1:
logger.warning('Multiple hostengine pids found: "%s". Using the last one and hoping for the best.' % pid)
return int(pids[len(pids) - 1])
except CalledProcessError:
return None
def _getpid(self):
#Try to read the PID file for the host engine
if not os.path.isfile(self._pidFilename):
logger.debug("Pid file %s not found" % self._pidFilename)
return None
try:
with open(self._pidFilename) as fp:
lines = fp.readlines()
if len(lines) == 0:
return self._getpid_old()
except FileNotFoundError: # Likely another process delete race
return None
pidStr = lines[0].strip()
#logger.error("pidStr %s" % pidStr)
procPath = "/proc/" + pidStr + "/"
#logger.error("exists? %s : %s" % (procPath, str(os.path.exists(procPath))))
if not os.path.exists(procPath):
logger.debug("Found pid file %s with pid %s but /proc/%s did not exist" % (self._pidFilename, pidStr, pidStr))
return None
return int(pidStr)
def getpid(self):
return self._pid
def poll(self):
# terminated via this apprunner
if self._retvalue is not None:
return self._retvalue
# still running
elif self._pid == self._getpid():
return None
# it was terminated or killed in some other way
else:
return 1
def terminate(self):
"""
Forcfully terminates the host engine daemon and return the app's error code/string.
"""
super(NvHostEngineApp, self).terminate()
if option_parser.options.profile:
self._remove_useless_profiling_files(option_parser.options.profile)
self._kill_hostengine(self._pid)
self._retvalue = 0
return self._retvalue
def _terminate_hostengine(self):
try:
subprocess.check_output([self.hostengine_executable, '--term', '--pid', self._pidFilename],
stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
# We do not want to propogate this error because it will obscure the actual reason why a test fails
# in some cases when an assertion fails for a test
logger.error("Failed to terminate host engine! (PID %s)" % self._getpid())
logger.error("Command: '%s' returned non-zero exit status %s.\nOutput:%s"
% (e.cmd, e.returncode, e.output))
# Log information about any running hostengine processes for better debugging info when failures occur
test_utils.check_for_running_hostengine_and_log_details(False)
def _remove_useless_profiling_files(self, profiling_tool):
'''
Remove any callgrind files that are not useful.
This happens since starting the nv-hostengine executable creates a new process
so the initial starting process also is profiled by the profiling tool
'''
def is_profiling_file(file):
return (profiling_tool + '.out.') in file
def profiling_file_is_useful(file):
if str(self._pid) in file:
return True
return False
for file in os.listdir(self.output_dir):
if is_profiling_file(file) and not profiling_file_is_useful(file):
logger.debug('deleting useless profiling file "%s"' % file)
os.remove(os.path.join(self.output_dir, file))
| DCGM-master | testing/python3/apps/nv_hostengine_app.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import string
from . import app_runner
import utils
import test_utils
import logger
import option_parser
class DcgmProfTesterApp(app_runner.AppRunner):
# Including future supported architectures
paths = {
"Linux_64bit": "./apps/amd64/dcgmproftester",
"Linux_ppc64le": "./apps/ppc64le/dcgmproftester",
"Linux_aarch64": "./apps/aarch64/dcgmproftester",
}
forbidden_strings = [
# None of this error codes should be ever printed by nvcmi
"Unknown Error",
"Uninitialized",
"Invalid Argument",
"Already Initialized",
"Insufficient Size",
"Driver Not Loaded",
"Timeout",
"DCGM Shared Library Not Found",
"Function Not Found",
"(null)", # e.g. from printing %s from null ptr
]
def __init__(self, args=None, gpuIds=None, cudaDriverMajorVersion=None):
args = args or []
assert(cudaDriverMajorVersion is not None)
path = DcgmProfTesterApp.paths[utils.platform_identifier]
#Append the 10,11..etc to the dcgmproftester command
path += str(int(cudaDriverMajorVersion))
if gpuIds is not None and "-i" in args:
raise Exception("Do not pass both gpuIds and args with a -i option")
self.dcgmi = None
self.output_filename = None
if gpuIds is not None:
args.append("-i")
gpuIdStr = ','.join(map(str, gpuIds))
args.append(gpuIdStr)
super(DcgmProfTesterApp, self).__init__(path, args)
if not test_utils.noLogging:
self.trace_fname = os.path.join(logger.log_dir, "app_%03d_dcgm_trace.log" % (self.process_nb))
self.env["__DCGM_DBG_FILE"] = self.trace_fname
self.env["__DCGM_DBG_LVL"] = test_utils.loggingLevel
def _process_finish(self, stdout_buf, stderr_buf):
super(DcgmProfTesterApp, self)._process_finish(stdout_buf, stderr_buf)
# Skip this part if --no-logging option is used
if logger.log_dir is None:
return
# Verify that nv_hostengine doesn't print any strings that should never be printed on a working system
stdout = "\n".join(self.stdout_lines)
for forbidden_text in DcgmProfTesterApp.forbidden_strings:
assert stdout.find(forbidden_text) == -1, "dcgmi printed \"%s\", this should never happen!" % forbidden_text
def __str__(self):
return "dcgmproftester" + super(DcgmProfTesterApp, self).__str__()
| DCGM-master | testing/python3/apps/dcgmproftester_app.py |
#!/usr/bin/env python3
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import subprocess
import sys
def get_branch_names(commit):
process = subprocess.run(["git", "branch", "-a", "--no-abbrev",
"--contains", commit, "--format",
"%(refname:lstrip=2)"], text=True,
capture_output=True)
out = process.stdout
status = process.returncode
if status != 0:
raise Exception("git branch returned an error")
return out.split()
def ignore_mr_branches(branches):
return filter(lambda branch: not re.match(r'^merge[-_]requests/', branch), branches)
def trim_branch_names(branches):
return map(lambda branch: re.sub(r'^(origin|remotes)/', '', branch), branches)
def pick_release_branch(branches):
found = filter(lambda branch: re.match(r'^rel_dcgm_\d+_\d+', branch), branches)
return next(found, None)
def pick_main_branch(branches):
found = filter(lambda branch: re.match(r'^master|main$', branch), branches)
return next(found, None)
def main():
commit = sys.argv[1]
b1 = get_branch_names(commit)
b2 = ignore_mr_branches(b1)
branches = list(trim_branch_names(b2))
release_branch = pick_release_branch(branches)
main_branch = pick_main_branch(branches)
out = ""
if release_branch:
out = release_branch
elif main_branch:
out = main_branch
elif branches:
out = branches[0]
print(out, end="")
if __name__ == '__main__':
main()
| DCGM-master | scripts/get_build_branch.py |
#!/usr/bin/env python
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import shutil
import os
import os.path
import sys
import tarfile
import time
sourceDirNames = [
'common',
'common/protobuf',
'common/transport',
'dcgmi',
'dcgmlib',
'dcgmlib/src',
'modules',
'modules/config',
'modules/diag',
'modules/health',
'modules/introspect',
'modules/policy',
'modules/vgpu'
]
copyExts = ['.c','.cpp','.h', '.proto']
#Files that we should exclude, even if they match the extension
excludeFiles = ['topology.proto', ]
tgzFilename = "./dcgm_source.tar.gz"
licenseFilename = "./source_code_license.txt"
licenseText = open(licenseFilename).read()
outputTempFolder = "./dcgm_source"
def removeOutputFile():
if os.path.isfile(tgzFilename):
os.remove(tgzFilename)
def tarfileFilterFunction(tarInfo):
tarInfo.mtime = time.time()
tarInfo.uid = 0
tarInfo.gid = 0
tarInfo.uname = "nvidia"
tarInfo.gname = "nvidia"
return tarInfo
print("Cleaning up previous runs")
removeOutputFile()
#Recreate our temp folder
if os.path.isdir(outputTempFolder):
shutil.rmtree(outputTempFolder)
os.mkdir(outputTempFolder)
tarFileObj = tarfile.open(tgzFilename, "w:gz")
sourceInputDir = '../'
for sourceDirName in sourceDirNames:
print("Working on directory " + sourceDirName)
createdDir = False
filenames = os.listdir(sourceInputDir + sourceDirName)
for filename in filenames:
#Should we exclude this file from the archive?
if filename in excludeFiles:
print("EXCLUDED: " + filename)
continue
inputFilename = sourceInputDir + sourceDirName + '/' + filename
keepFile = False
for copyExt in copyExts:
if inputFilename.endswith(copyExt):
keepFile = True
break
if not keepFile:
continue
print("Keeping file " + inputFilename)
if not createdDir:
os.mkdir(outputTempFolder + '/' + sourceDirName)
createdDir = True
outputFilename = outputTempFolder + '/' + sourceDirName + '/' + filename
outputFp = open(outputFilename, "wt")
outputFp.write(licenseText)
outputFp.write(open(inputFilename, "rt").read())
outputFp.close()
print("Wrote " + outputFilename)
#Write tar file
print("Writing " + tgzFilename)
tarFileObj.add(outputTempFolder, filter=tarfileFilterFunction)
print("Done")
| DCGM-master | scripts/gatherSourceCode.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Filter out lines in text files that are version dependant
# This is for filtering text in man pages, bindings, etc
#
import getopt
import sys
import datetime
# globals
hIn = None
hOut = None
vMajor = None
vMinor = None
now = datetime.datetime.now()
def CompareField(v1, v2):
isNum1 = v1.isdigit()
isNum2 = v2.isdigit()
# if both are numbers, higher numbers win
if isNum1 and isNum2:
return cmp(int(v1), int(v2))
# if one is a string and one is a number, the number wins
if isNum1 and not isNum2:
return 1
if not isNum1 and isNum2:
return -1
# otherwise, use strcmp()
return cmp(v1, v2)
def VersionCompare(v1, v2):
v1Fields = v1.split('.')
v2Fields = v2.split('.')
for (v1Field, v2Field) in zip(v1Fields, v2Fields):
diff = CompareField(v1Field, v2Field)
if diff != 0:
return diff
# If one version is longer than the other, the longer one wins
return len(v1) - len(v2)
def VersionMatch(opStr, v1, v2):
vDiff = VersionCompare(v1, v2)
if '==' == opStr:
return vDiff == 0
elif '>=' == opStr:
return vDiff >= 0
elif '<=' == opStr:
return vDiff <= 0
elif '<' == opStr:
return vDiff < 0
elif '>' == opStr:
return vDiff > 0
else:
print '"%s": operation string is unknown [==, <=, >=, <, >]' % opStr
exit(1)
# determine if lines should be shown or hidden
def GetLineMode(line):
parts = line.split("&&&") # ['', '_DCGM_VERSION_IF_', '>= 2.0']
stmt = parts[2].strip().split() # ['>=', '2.0']
op = stmt[0]
lineVersion = stmt[1]
if (VersionMatch(op, version, lineVersion)):
return 'show'
else:
return 'hide'
def ShowLine(line):
# convert all DCGM_VERSION_DEFINEs to the version number
truncatedVersion = ".".join(version.split('.')[:2])
line = line.replace("&&&_DCGM_VERSION_DEFINE_&&&", truncatedVersion)
# convert all DATE to the current date
line = line.replace("&&&_CURRENT_DATE_&&&", ("%d/%d/%d" % (now.year, now.month, now.day)))
line = line.replace("&&&_CURRENT_YEAR_&&&", ("%d" % (now.year)))
# Error on any DCGM_VERSION_ERRORs
if line.strip().startswith("&&&_DCGM_VERSION_ERROR_&&&"): # &&&_DCGM_VERSION_ERROR %%
print line
exit(1)
hOut.write(line)
def CheckForUnexpectedTokens(line):
if ("&&&_" in line) and ("_&&&" in line) and (not ("&&&_DCGM_VERSION_DEFINE_&&&" in line)) and (not ("&&&_DCGM_VERSION_ERROR_&&&" in line)) \
and (not ("&&&_CURRENT_DATE_&&&" in line)) and (not ("&&&_CURRENT_YEAR_&&&" in line)):
print '"%s": looks like a version filter token, but is malformed or misused.' % line
exit(1)
# Read and filter according to the versioning tags
def FilterFile():
lineMode = 'default'
for line in hIn:
# determine if the line should be printed
if 'default' == lineMode:
if line.strip().startswith("&&&_DCGM_VERSION_IF_&&&"): # &&&_DCGM_VERSION_IF %% >= 2.0
lineMode = GetLineMode(line)
else:
CheckForUnexpectedTokens(line)
ShowLine(line) # the default behavior is to show the line
else:
# inside a DCGM_VERSION_IF block
if line.strip().startswith("&&&_DCGM_VERSION_ELSE_IF_&&&"): # &&&_DCGM_VERSION_ELSE_IF %% >= 2.0
if ('show' == lineMode or 'ifdone' == lineMode):
lineMode = 'ifdone' # already shown, ignore rest of if block
else:
lineMode = GetLineMode(line)
elif line.strip().startswith("&&&_DCGM_VERSION_ELSE_&&&"): # &&&_DCGM_VERSION_ELSE %%
# The else shows lines when the if has not been completed (linemode = !ifdone) and all previous
# conditionals have not shown lines (linemode = hide)
if ('hide' == lineMode):
lineMode = 'show'
else:
lineMode = 'ifdone'
elif line.strip().startswith("&&&_DCGM_VERSION_END_IF_&&&"): # &&&_DCGM_VERSION_END_IF %%
# exit the block
lineMode = 'default'
elif 'show' == lineMode:
CheckForUnexpectedTokens(line)
ShowLine(line)
elif 'hide' == lineMode or 'ifdone' == lineMode:
CheckForUnexpectedTokens(line)
# ignore this line
else:
print '"%s": is not a valid mode. [default, show, hide, ifdone]' % lineMode
exit(1)
def usage(code=1):
print "python version_filter.py -v 2 infile.txt outfile.txt"
exit(code)
def main():
global hIn
global hOut
global version
opts, args = getopt.getopt(sys.argv[1:], 'v:')
version = ""
if len(args) != 2:
usage()
else:
inFile = args[0]
outFile = args[1]
for o, a in opts:
if "-v" == o:
version = a
else:
usage()
if "" == version:
usage()
hIn = open(inFile, 'r')
hOut = open(outFile, 'w')
FilterFile()
#
main()
| DCGM-master | build/version_filter.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from wsgiref.simple_server import make_server
import cgi
import os
import DcgmHandle, DcgmSystem, DcgmGroup
import dcgm_structs
import json
from dcgm_structs import dcgmExceptionClass, DCGM_ST_CONNECTION_NOT_VALID
###############################################################################
DCGM_HTTP_PORT = 1981
DCGM_HTTP_SERVE_FROM = None #'wwwroot/' #Local relative path to serve raw files from. None = Don't serve local files
DCGM_HTTP_JSON_DIR = 'dcgmjsonrest'
DCGM_JSON_VERSION = '1.0' #Change this major version to break compatibility. Change minor version to just inform of new versions
DCGM_IP_ADDRESS = "127.0.0.1" #Use None to run an embedded hostengine
DCGM_OP_MODE = dcgm_structs.DCGM_OPERATION_MODE_AUTO
###############################################################################
DCGM_MIME_TYPE_PLAIN = 0
DCGM_MIME_TYPE_JSON = 1
DCGM_MIME_TYPE_HTML = 2
DCGM_MIME_TYPE_PNG = 3
DCGM_MIME_TYPE_GIF = 4
DCGM_MIME_TYPE_JPG = 5
DCGM_MIME_TYPE_JS = 6
###############################################################################
DCGM_HTTP_CODE_OK = 200
DCGM_HTTP_CODE_BAD_REQUEST = 400
DCGM_HTTP_CODE_UNAUTHORIZED = 401
DCGM_HTTP_CODE_NOT_FOUND = 404
DCGM_HTTP_CODE_INT_ERROR = 500
###############################################################################
class DcgmHttpException(Exception):
pass
###############################################################################
class DcgmHttpServer:
###########################################################################
def __init__(self):
#Initialize some defaults
self.SetHttpResponseCode(200)
self.SetMimeType(DCGM_MIME_TYPE_PLAIN)
self._dcgmHandle = None
self._dcgmSystem = None
self._defaultGpuGroup = None
self._haveWatchedHealth = False
###########################################################################
def SetHttpResponseCode(self, codeId):
codeId = int(codeId)
if codeId == DCGM_HTTP_CODE_OK:
self._httpResponseCode = "%d OK" % codeId
elif codeId == DCGM_HTTP_CODE_BAD_REQUEST:
self._httpResponseCode = '400 Bad Request'
elif codeId == DCGM_HTTP_CODE_UNAUTHORIZED:
self._httpResponseCode = '401 Unauthorized'
elif codeId == DCGM_HTTP_CODE_NOT_FOUND:
self._httpResponseCode = '404 Not Found'
else: #DCGM_HTTP_CODE_INT_ERROR
self._httpResponseCode = '500 Internal Server Error' #default
###########################################################################
def SetMimeType(self, mimeType):
if mimeType == DCGM_MIME_TYPE_PLAIN:
self._httpMimeType = ('Content-Type','text/plain')
elif mimeType == DCGM_MIME_TYPE_JSON:
self._httpMimeType = ('Content-Type','application/json')
elif mimeType == DCGM_MIME_TYPE_HTML:
self._httpMimeType = ('Content-Type','text/html')
elif mimeType == DCGM_MIME_TYPE_JPG:
self._httpMimeType = ('Content-Type','image/jpeg')
elif mimeType == DCGM_MIME_TYPE_GIF:
self._httpMimeType = ('Content-Type','image/gif')
elif mimeType == DCGM_MIME_TYPE_PNG:
self._httpMimeType = ('Content-Type','image/png')
elif mimeType == DCGM_MIME_TYPE_JS:
self._httpMimeType = ('Content-Type','application/javascript')
else:
self._httpMimeType = ('Content-Type','text/plain')
###########################################################################
def SetMimeTypeFromExtension(self, extension):
extension = extension.lower()
if extension == 'html' or extension == 'htm':
self.SetMimeType(DCGM_MIME_TYPE_HTML)
elif extension == 'json':
self.SetMimeType(DCGM_MIME_TYPE_JSON)
elif extension == 'png':
self.SetMimeType(DCGM_MIME_TYPE_PNG)
elif extension == 'gif':
self.SetMimeType(DCGM_MIME_TYPE_GIF)
elif extension == 'jpg' or extension == 'jpeg':
self.SetMimeType(DCGM_MIME_TYPE_JPG)
elif extension == 'js':
self.SetMimeType(DCGM_MIME_TYPE_JS)
else:
self.SetMimeType(DCGM_MIME_TYPE_PLAIN)
###########################################################################
def GetJsonError(self, errorString):
responseObj = {'version':DCGM_JSON_VERSION,
'status':'ERROR',
'errorString':errorString}
retString = json.JSONEncoder().encode(responseObj)
self.SetMimeType(DCGM_MIME_TYPE_JSON)
return retString
###########################################################################
def GetJsonResponse(self, encodeObject):
responseObj = {'version':DCGM_JSON_VERSION,
'status':'OK',
'responseData':encodeObject}
#print str(responseObj)
retString = json.dumps(responseObj, cls=dcgm_structs.DcgmJSONEncoder)
self.SetMimeType(DCGM_MIME_TYPE_JSON)
return retString
###########################################################################
def WatchHealth(self):
if self._haveWatchedHealth:
return
self._defaultGpuGroup.health.Set(dcgm_structs.DCGM_HEALTH_WATCH_ALL)
#Make sure the health has updated at least once
self._dcgmSystem.UpdateAllFields(1)
self._haveWatchedHealth = True
###########################################################################
def CheckDcgmConnection(self):
'''
Check if we are connected to DCGM or not and try to connect to DCGM if we aren't connected.
Returns !0 on error. 0 on success
'''
if self._dcgmHandle != None:
return 0
self._dcgmHandle = DcgmHandle.DcgmHandle(handle=None, ipAddress=DCGM_IP_ADDRESS, opMode=DCGM_OP_MODE)
self._dcgmSystem = self._dcgmHandle.GetSystem()
self._defaultGpuGroup = self._dcgmSystem.GetDefaultGroup()
#Clear other connection state we can no longer guarantee
self._haveWatchedHealth = False
return 0
###########################################################################
def GetAllGpuIds(self, queryParams):
responseObj = []
gpuIds = self._dcgmSystem.discovery.GetAllGpuIds()
return self.GetJsonResponse(gpuIds)
###########################################################################
def GetGpuAttributes(self, queryParams):
if not queryParams.has_key("gpuid"):
self.SetHttpResponseCode(DCGM_HTTP_CODE_BAD_REQUEST)
return self.GetJsonError("Missing 'gpuid' parameter")
gpuId = int(cgi.escape(queryParams['gpuid'][0]))
#Validate gpuId
gpuIds = self._dcgmSystem.discovery.GetAllGpuIds()
if not gpuId in gpuIds:
self.SetHttpResponseCode(DCGM_HTTP_CODE_BAD_REQUEST)
return self.GetJsonError("gpuid parameter is invalid")
attributes = self._dcgmSystem.discovery.GetGpuAttributes(gpuId)
return self.GetJsonResponse(attributes)
###########################################################################
def CheckGpuHealth(self, queryParams):
#Make sure we have a health watch
self.WatchHealth()
healthObj = self._defaultGpuGroup.health.Check()
return self.GetJsonResponse(healthObj)
###########################################################################
def RunDiagnostic(self, queryParams):
validationLevel = 1
if queryParams.has_key('level'):
validationLevel = int(cgi.escape(queryParams['level'][0]))
if validationLevel < dcgm_structs.DCGM_POLICY_VALID_SV_SHORT or validationLevel > dcgm_structs.DCGM_POLICY_VALID_SV_XLONG:
self.SetHttpResponseCode(DCGM_HTTP_CODE_BAD_REQUEST)
return self.GetJsonError("\"level\" parameter must be between 1 and 4")
try:
diagResponse = self._defaultGpuGroup.action.Validate(validationLevel)
except dcgmExceptionClass(dcgm_structs.DCGM_ST_NOT_SUPPORTED):
return self.GetJsonError("The DCGM diagnostic program is not installed. Please install the Tesla-recommended driver.")
return self.GetJsonResponse(diagResponse)
###########################################################################
def GetJsonRestContents(self, queryParams):
if not queryParams.has_key("action"):
self.SetHttpResponseCode(DCGM_HTTP_CODE_BAD_REQUEST)
return "Missing 'action' parameter"
action = cgi.escape(queryParams['action'][0]).lower()
if action == 'getallgpuids':
return self.GetAllGpuIds(queryParams)
elif action == 'getgpuattributes':
return self.GetGpuAttributes(queryParams)
elif action == 'checkgpuhealth':
return self.CheckGpuHealth(queryParams)
elif action == 'rundiagnostic':
return self.RunDiagnostic(queryParams)
else:
self.SetMimeType(DCGM_MIME_TYPE_PLAIN)
return self.GetJsonError("Unknown action: %s" % action)
###########################################################################
def GetRawFile(self, filePath):
serveFilePath = DCGM_HTTP_SERVE_FROM + filePath
if os.path.exists(serveFilePath):
fp = open(serveFilePath, 'rb')
content = fp.read()
fp.close()
if filePath.find('.') >= 1:
extension = filePath.split(".")[-1]
self.SetMimeTypeFromExtension(extension)
else:
self.SetMimeType(DCGM_MIME_TYPE_PLAIN)
self.SetHttpResponseCode(DCGM_HTTP_CODE_OK)
else:
content = "%s not found" % serveFilePath
self.SetHttpResponseCode(DCGM_HTTP_CODE_NOT_FOUND)
return content
###########################################################################
def GetContents(self, queryParams, filePath):
filePathList = filePath.split('/')
print str(filePathList)
if filePathList[0] == DCGM_HTTP_JSON_DIR:
return self.GetJsonRestContents(queryParams)
#default to return a raw file from the filesystem
if DCGM_HTTP_SERVE_FROM != None:
return self.GetRawFile(filePath)
else:
self.SetHttpResponseCode(DCGM_HTTP_CODE_NOT_FOUND)
###########################################################################
'''
Web server main entry point. Call from wsgi callback
Returns string of http contents
'''
def WsgiMain(self, environ, start_response):
responseStr = ""
filePath = environ['PATH_INFO'].lstrip('/')
queryParams = cgi.parse_qs(environ['QUERY_STRING'])
#for k in environ.keys():
# responseStr += "%s => %s\n" % (k, environ[k])
numRetries = 0
retryLimit = 1
gotResponse = False
while (not gotResponse) and numRetries < retryLimit:
try:
self.CheckDcgmConnection()
responseStr = self.GetContents(queryParams, filePath)
gotResponse = True
except dcgmExceptionClass(dcgm_structs.DCGM_ST_CONNECTION_NOT_VALID):
#Just retry if we have a connection error
self._dcgmHandle = None
self._dcgmSystem = None
numRetries += 1
print "Got disconnected. Retrying"
pass
if not gotResponse:
responseStr = self.GetJsonError("Unable to connect to the DCGM daemon")
self.SetHttpResponseCode(DCGM_HTTP_CODE_INT_ERROR)
responseHeaders = [self._httpMimeType]
start_response(self._httpResponseCode, responseHeaders)
return responseStr
###############################################################################
def dcmg_http_app(environ, start_response):
'''
Main entry point
'''
responseStr = g_dcgmServer.WsgiMain(environ, start_response)
return [responseStr]
###############################################################################
def application(environ, start_response):
'''
Callback for uWSGI
'''
return dcmg_http_app(environ, start_response)
#Try to load the DCGM library
dcgm_structs._dcgmInit()
g_dcgmServer = DcgmHttpServer()
###############################################################################
if __name__ == '__main__':
httpd = make_server('', DCGM_HTTP_PORT, dcmg_http_app)
print "Serving HTTP on port %d..." % DCGM_HTTP_PORT
# Respond to requests until process is killed
httpd.serve_forever()
###############################################################################
| DCGM-master | dcgm_wsgi/dcgm_wsgi.py |
#!/usr/bin/env python3
#
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
import sys
import os
import re
# Regular expression for pulling out the different pieces of the nvml entry points
# It will match something in the form of:
# funcname, tsapiFuncname, (argument list), "(argument type matching)", arg1[, arg2, ...])
# We place funcname, (argument list), and arg1[, arg2, ...] into groups for use later
preg = re.compile("(nvml\w+),[^)]+(\([^)]+\)),\s+\"[^\"]+\",\s+([^)]+)")
MAX_NVML_ARGS = 20
INJECTION_ARG_COUNT_STR = 'InjectionArgCount'
NVML_RET = 'nvmlReturn_t'
#######################################################################################
# Globals
g_key_to_function = {}
#######################################################################################
# Generated file names
STUB_PATH = 'src/nvml_generated_stubs.cpp'
INJECTION_ARGUMENT_HEADER = 'InjectionArgument.h'
INJECTION_STRUCTS_NAME = 'nvml_injection_structs.h'
INJECTION_STRUCTS_PATH = 'include/%s' % INJECTION_STRUCTS_NAME
INJECTION_ARGUMENT_PATH = 'include/%s' % INJECTION_ARGUMENT_HEADER
INJECTION_CPP_PATH = 'src/InjectionArgument.cpp'
FUNCTION_INFO_PATH = 'src/FunctionInfo.cpp'
FUNCTION_DECLARATIONS_HEADER = 'nvml_generated_declarations.h'
FUNCTION_DECLARATIONS_PATH = 'include/%s' % FUNCTION_DECLARATIONS_HEADER
PASS_THRU_GENERATED_SRC_PATH = 'src/nvml_pass_through_generated.cpp'
KEY_LIST_PATH = 'src/InjectionKeys.cpp'
KEY_LIST_HEADER_PATH = 'include/InjectionKeys.h'
LINUX_DEFS_PATH = 'src/nvml-injection.linux_defs'
#######################################################################################
AUTO_GENERATED_NOTICE = '/*\n * NOTE: This code is auto-generated by generate_nvml_stubs.py\n * DO NOT EDIT MANUALLY\n */\n\n\n'
#######################################################################################
skip_functions = [ 'nvmlGetBlacklistDeviceCount', 'nvmlGetBlacklistDeviceInfoByIndex' ]
#######################################################################################
uint_aliases = [
'nvmlBusType_t',
'nvmlVgpuTypeId_t',
'nvmlVgpuInstance_t',
'nvmlBusType_t',
'nvmlDeviceArchitecture_t',
'nvmlPowerSource_t',
'nvmlAffinityScope_t',
]
def get_version(funcname):
if funcname and funcname[-3:-1] == '_v':
return int(funcname[-1])
return 0
#######################################################################################
class AllFunctionTypes(object):
def __init__(self):
self.all_func_declarations = []
self.all_argument_type_strs = []
self.funcname_to_func_type = {}
self.arg_types_to_func_type = {}
def AddFunctionType(self, funcname, funcinfo):
# arg_types = funcinfo.GetArgumentTypes()
arg_type_str = funcinfo.GetArgumentTypesAsString()
if arg_type_str not in self.all_argument_type_strs:
func_declaration = "typedef nvmlReturn_t (*%s_f)%s;" % (funcname, funcinfo.GetArgumentList())
func_type = "%s_f" % funcname
self.all_func_declarations.append(func_declaration)
self.arg_types_to_func_type[arg_type_str] = func_type
self.funcname_to_func_type[funcname] = func_type
self.all_argument_type_strs.append(arg_type_str)
# print("Adding arg_list: '%s'" % arg_types)
else:
self.funcname_to_func_type[funcname] = self.arg_types_to_func_type[arg_type_str]
def GetAllFunctionDeclarations(self):
return self.all_func_declarations
def GetFunctionType(self, funcname):
return self.funcname_to_func_type[funcname]
#######################################################################################
class AllFunctions(object):
###################################################################################
def __init__(self):
self.func_dict = {}
self.versioned_funcs = {}
###################################################################################
def AddFunction(self, funcinfo):
if funcinfo.GetName() not in skip_functions:
funcname = funcinfo.GetName()
self.func_dict[funcname] = funcinfo
version = get_version(funcname)
if version > 0:
self.versioned_funcs[funcname] = version
###################################################################################
def GetFunctionDict(self):
return self.func_dict
###################################################################################
def RemoveEarlierVersions(self):
for funcname in self.versioned_funcs:
version = self.versioned_funcs[funcname]
without_version = funcname[:-3]
for i in range(1, version):
try:
if i == 1:
del self.func_dict[without_version]
else:
to_remove = "%s_v%d" % (without_version, i)
del self.func_dict[to_remove]
except KeyError:
pass
self.versioned_funcs = {}
#######################################################################################
class FunctionInfo(object):
###################################################################################
def __init__(self, funcname, arg_list, arg_names):
self.funcname = funcname.strip()
self.arg_list = self.CleanArgList(arg_list)
self.arg_names = arg_names
self.arg_types = get_argument_types_from_argument_list(arg_list)
###################################################################################
def CleanArgList(self, arg_list):
# Make sure '*' is always 'T *' and not 'T* ' for our formatter
tokens = arg_list.split('*')
new_list = ''
for token in tokens:
if not new_list:
new_list = token
else:
if token[0] == ' ':
new_list = new_list + '*%s' % token[1:]
else:
new_list = new_list + '*%s' % token
if token[-1] != ' ':
new_list += ' '
return new_list
###################################################################################
def GetName(self):
return self.funcname
###################################################################################
def GetArgumentList(self):
return self.arg_list
###################################################################################
def GetArgumentNames(self):
return self.arg_names
###################################################################################
def GetArgumentTypes(self):
return self.arg_types
###################################################################################
def GetArgumentTypesAsString(self):
type_str = ''
for arg_type in self.arg_types:
if type_str == '':
type_str = str(arg_type)
else:
type_str = type_str + ",%s" % str(arg_type)
return type_str
#######################################################################################
def get_true_arg_type(arg_type):
if is_pointer_type(arg_type):
if arg_type[:-2] in uint_aliases:
return 'unsigned int *'
elif arg_type in uint_aliases:
return 'unsigned int'
return arg_type
#######################################################################################
def remove_extra_spaces(text):
while text.find(' ') != -1:
text = text.replace(' ', ' ')
return text
#######################################################################################
def get_function_signature(entry_point, first):
# Remove all line breaks, remove the extra whitespace on the ends, and then get
# get rid of the parenthesis around the string
# We are left with something in the form of:
# funcname, tsapiFuncname, (argument list), "(argument type matching)", arg1, arg2, ...)
entry_point = entry_point.replace('\n', ' ').strip()[1:-1]
m = preg.search(entry_point)
if m:
return remove_extra_spaces(m.group(1)), remove_extra_spaces(m.group(2)), remove_extra_spaces(m.group(3))
else:
if entry_point == "include \"nvml.h":
pass
# Ignore errors on the first token because it is everything from before the first entry point
elif not first:
print("no match found in entry point = '%s'" % entry_point)
return None, None, None
#######################################################################################
def print_body_line(line, file, extra_indent):
indent = " "
for i in range(0, extra_indent):
indent += " "
file.write("%s%s\n" % (indent, line))
#######################################################################################
def add_function_type(function_type_dict, funcname, arg_list, function_types):
func_declaration = "typedef nvmlReturn_t (*%s_f)%s;" % (funcname, arg_list)
function_types.append(func_declaration)
keyPrefixes = [
'nvmlDeviceGetHandleBy',
'nvmlDeviceGet',
'nvmlSystemGet',
'nvmlDeviceSet',
'nvmlUnitGet',
'nvmlUnitSet',
'nvmlVgpuTypeGet',
'nvmlVgpuInstanceGet',
'nvmlVgpuInstanceSet',
'nvmlGet',
'nvmlSet',
'nvmlGpuInstanceGet',
'nvmlComputeInstanceGet',
'nvmlDeviceClear',
'nvmlDeviceFreeze',
'nvmlDeviceModify',
'nvmlDeviceQuery',
'nvmlDeviceCreate',
'nvmlDeviceReset',
'nvmlDeviceIs',
'nvmlDevice',
]
gpmPrefix = 'nvmlGpm'
#######################################################################################
def get_suffix_if_match(funcname, prefix):
if funcname[:len(prefix)] == prefix:
key = funcname[len(prefix):]
return key
return None
#######################################################################################
def get_function_info_from_name(funcname):
key = None
version = 1
matched = False
for prefix in keyPrefixes:
key = get_suffix_if_match(funcname, prefix)
if key:
break
if not key:
key = get_suffix_if_match(funcname, gpmPrefix)
if key:
if key[-3:] == 'Get':
key = key[:-3]
else:
print("Can't get key for %s" % funcname)
# Check for version at the end
if key:
if key[-3:-1] == '_v':
version = int(key[-1])
key = key[:-3]
if key in g_key_to_function:
func_list = "%s, %s" % (g_key_to_function[key], funcname)
# print("Key %s maps to two functions: %s" % (key, func_list))
g_key_to_function[key] = func_list
else:
g_key_to_function[key] = funcname
return key, version
#######################################################################################
def check_and_write_get_string_body(stub_file, key, arg_types, arg_names):
if len(arg_types) != 3 or arg_types[1] != CHAR:
return False
if is_pointer(arg_types[0]):
return False
if arg_types[2] != UINT and arg_types[2] != UINT_PTR:
return False
# InjectionNvml::GetString will return a std::string associated with two keys
print_body_line("InjectionArgument arg(%s);" % arg_names[0], stub_file, 1)
print_body_line("std::string buf = InjectedNvml->GetString(arg, \"%s\");" % (key), stub_file, 1)
if arg_types[2] == UINT:
print_body_line("snprintf(%s, %s, \"%s\", buf.c_str());" % (arg_names[1], arg_names[2], '%s'), stub_file, 1)
elif arg_types[2] == UINT_PTR:
print_body_line("snprintf(%s, *%s, \"%s\", buf.c_str());" % (arg_names[1], arg_names[2], '%s'), stub_file, 1)
return True
#######################################################################################
def is_pointer(arg_type):
if arg_type[-1] == '*':
return True
return False
CONST_CHAR = 'const char *'
CHAR = 'char *'
NVML_DEVICE = 'nvmlDevice_t'
NVML_DEVICE_PTR = 'nvmlDevice_t *'
UINT_PTR = 'unsigned int *'
CLOCKTYPE = 'nvmlClockType_t'
UINT = 'unsigned int'
UINT_PTR = 'unsigned int *'
VGPUTYPEID = 'nvmlVgpuTypeId_t'
UNIT = 'nvmlUnit_t'
VGPU_INSTANCE = 'nvmlVgpuInstance_t'
#######################################################################################
def print_ungenerated_function(funcname, arg_types):
arg_type_string = ''
for arg_type in arg_types:
if len(arg_type_string):
arg_type_string = arg_type_string + ',%s' % arg_type
else:
arg_type_string = arg_type
#print("Not generated: %s with (%d) arg_types: %s" % (funcname, len(arg_types), arg_type_string))
#######################################################################################
def generate_getter_functions(stub_file, funcname, arg_list, arg_types, arg_names, justifyLen):
generated = True
key, version = get_function_info_from_name(funcname)
if funcname == "nvmlDeviceGetFieldValues":
print_body_line("if (%s == nullptr)" % arg_names[2], stub_file, 1)
print_body_line("{", stub_file, 1)
print_body_line("return NVML_ERROR_INVALID_ARGUMENT;", stub_file, 2)
print_body_line("}\n", stub_file, 1)
print_body_line("InjectedNvml->GetFieldValues(%s, %s, %s);" % (arg_names[0], arg_names[1], arg_names[2]), stub_file, 1)
elif len(arg_types) == 2 and arg_types[1] == NVML_DEVICE_PTR:
# InjectedNvml::GetNvmlDevice returns an nvmlDevice_t and accepts a string identifier, a string describing
# the identifier
print_body_line("InjectionArgument identifier(%s);" % arg_names[0], stub_file, 1)
print_body_line("*%s = InjectedNvml->GetNvmlDevice(identifier, \"%s\");" % (arg_names[1], key), stub_file, 1)
# print("GetNvmlDevice: %s - %s" % (funcname, key))
elif len(arg_types) >= 3 and arg_types[1] == CLOCKTYPE:
if len(arg_types) >= 5: # need to write code to handle the 5 argument version
generated = False
if len(arg_types) == 4:
# InjectedNvml::GetClock returns an unsigned int and receives an nvmlDevice_t, a clock type, and a clock ID
print_body_line("*%s = InjectedNvml->GetClock(%s, %s, %s);" % (arg_names[3].ljust(justifyLen-1), arg_names[0], arg_names[1], arg_names[2]), stub_file, 1)
elif len(arg_types) == 3:
print_body_line("*%s = InjectedNvml->GetClockInfo(%s, \"%s\", %s);" % (arg_names[2].ljust(justifyLen-1), arg_names[0], key, arg_names[1]), stub_file, 1)
elif len(arg_types) == 2 and arg_types[0] == NVML_DEVICE:
print_body_line("InjectionArgument arg(%s);" % (arg_names[1]), stub_file, 1)
# SimpleDeviceGet() is a function that accepts an nvmlDevice_t and a function name, and
# returns an InjectionArgument populated with the associated value
print_body_line("arg.SetValueFrom(InjectedNvml->SimpleDeviceGet(%s, \"%s\"));" % (arg_names[0], key), stub_file, 1)
elif check_and_write_get_string_body(stub_file, key, arg_types, arg_names):
pass
#print("GetString: %s - %s" % (funcname, key))
elif len(arg_types) == 3 and arg_types[0] == NVML_DEVICE and is_pointer(arg_types[2]):
if is_pointer(arg_types[1]):
print_body_line("std::vector<InjectionArgument> values;", stub_file, 1)
print_body_line("values.push_back(InjectionArgument(%s));" % arg_names[1], stub_file, 1)
print_body_line("values.push_back(InjectionArgument(%s));" % arg_names[2], stub_file, 1)
print_body_line("CompoundValue cv(values);", stub_file, 1)
# GetCompoundValue will set the variables in the vector, reflecting the order they're supplied in
print_body_line("InjectedNvml->GetCompoundValue(%s, \"%s\", cv);" % (arg_names[0], key), stub_file, 1)
#print("Get compound value is covering: %s" % funcname)
else:
print_body_line("InjectionArgument output(%s);" % arg_names[2], stub_file, 1)
print_body_line("InjectionArgument arg(%s);" % arg_names[1], stub_file, 1)
print_body_line("output.SetValueFrom(InjectedNvml->DeviceGetWithExtraKey(%s, \"%s\", arg));" % (arg_names[0], key), stub_file, 1)
# print("DeviceGetWithExtraKey for %s" % funcname)
elif len(arg_types) == 1 and is_pointer(arg_types[0]):
print_body_line("InjectionArgument arg(%s);" % arg_names[0], stub_file, 1)
print_body_line("arg.SetValueFrom(InjectedNvml->ObjectlessGet(\"%s\"));" % (key), stub_file, 1)
# print("ObjectlessGet for %s" % funcname)
elif len(arg_types) == 2 and arg_types[0] == CHAR and arg_types[1] == UINT:
lhand = "std::string str"
print_body_line("%s = InjectedNvml->ObjectlessGet(\"%s\").AsString();" % (lhand.ljust(justifyLen), key), stub_file, 1)
print_body_line("snprintf(%s, %s, \"%s\", str.c_str());" % (arg_names[0], arg_names[1], "%s"), stub_file, 1)
#print("GetString: %s - %s" % (funcname, key))
elif len(arg_types) == 2 and arg_types[0] == UNIT:
print_body_line("InjectionArgument output(%s);" % arg_names[1], stub_file, 1)
print_body_line("output.SetValueFrom(InjectedNvml->UnitGet(%s, \"%s\"));" % (arg_names[0], key), stub_file, 1)
elif len(arg_types) == 2 and arg_types[0] == VGPU_INSTANCE:
print_body_line("InjectionArgument output(%s);" % arg_names[1], stub_file, 1)
print_body_line("output.SetValueFrom(InjectedNvml->VgpuInstanceGet(%s, \"%s\"));" % (arg_names[0], key), stub_file, 1)
elif len(arg_types) == 2 and arg_types[0] == VGPUTYPEID:
print_body_line("InjectionArgument output(%s);" % arg_names[1], stub_file, 1)
print_body_line("output.SetValueFrom(InjectedNvml->GetByVgpuTypeId(%s, \"%s\"));" % (arg_names[0], key), stub_file, 1)
else:
print_ungenerated_function(funcname, arg_types)
generated = False
if generated:
print_body_line('return NVML_SUCCESS;', stub_file, 1)
return generated
#######################################################################################
def is_getter(funcname):
if funcname.find("Get") != -1:
return True
return False
#######################################################################################
def is_setter(funcname):
if funcname.find("Set") != -1:
return True
return False
#######################################################################################
def generate_setter_functions(stub_file, funcname, arg_list, arg_types, arg_names):
generated = True
key, version = get_function_info_from_name(funcname)
if len(arg_types) == 2 and arg_types[0] == NVML_DEVICE:
print_body_line("InjectionArgument arg(%s);" % (arg_names[1].strip()), stub_file, 1)
print_body_line("InjectedNvml->SimpleDeviceSet(%s, \"%s\", arg);" % (arg_names[0], key), stub_file, 1)
elif len(arg_types) == 3 and arg_types[0] == NVML_DEVICE:
if funcname == 'nvmlDeviceSetFanSpeed_v2' or funcname == 'nvmlDeviceSetTemperatureThreshold':
print_body_line("InjectionArgument extraKey(%s);" % arg_names[1], stub_file, 1)
print_body_line("InjectionArgument value(%s);" % arg_names[2], stub_file, 1)
print_body_line("InjectedNvml->DeviceSetWithExtraKey(%s, \"%s\", extraKey, value);" % (arg_names[0], key), stub_file, 1)
else:
print_body_line("std::vector<InjectionArgument> values;", stub_file, 1)
print_body_line("values.push_back(InjectionArgument(%s));" % arg_names[1].strip(), stub_file, 1)
print_body_line("values.push_back(InjectionArgument(%s));" % arg_names[2].strip(), stub_file, 1)
print_body_line("CompoundValue cv(values);", stub_file, 1)
print_body_line("InjectedNvml->DeviceSetCompoundValue(%s, \"%s\", cv);" % (arg_names[0], key), stub_file, 1)
else:
generated = False
print_ungenerated_function(funcname, arg_types)
if generated:
print_body_line("return NVML_SUCCESS;", stub_file, 1)
return generated
cant_generate = [
'nvmlDeviceGetVgpuMetadata',
'nvmlDeviceSetDriverModel', # Windows only
'nvmlDeviceSetMigMode', # Too unique - requires specific checks
]
#######################################################################################
def generate_injection_function(stub_file, funcname, arg_list, arg_types, arg_names, justifyLen):
if funcname in cant_generate:
return False
generated = False
if is_getter(funcname):
generated = generate_getter_functions(stub_file, funcname, arg_list, arg_types, arg_names, justifyLen)
elif is_setter(funcname):
generated = generate_setter_functions(stub_file, funcname, arg_list, arg_types, arg_names)
else:
print_ungenerated_function(funcname, arg_types)
return generated
#######################################################################################
def write_function_definition_start(fileHandle, funcname, arg_list):
first_part = "%s %s" % (NVML_RET, funcname)
line = "%s%s" % (first_part, arg_list)
line = remove_extra_spaces(line).strip()
if len(line) <= 120:
fileHandle.write("%s\n{\n" % line)
else:
tokens = arg_list.split(',')
count = len(tokens)
fileHandle.write("%s%s,\n" % (first_part, tokens[0]))
if count > 2:
index = 1
while index < count - 1:
fileHandle.write("%s%s,\n" % (" ".ljust(len(first_part)), tokens[index]))
index = index + 1
fileHandle.write("%s %s\n{\n" % (" ".ljust(len(first_part)), tokens[-1].strip()))
#######################################################################################
def write_function(stub_file, funcinfo, all_functypes):
funcname = funcinfo.GetName()
arg_list = funcinfo.GetArgumentList()
arg_names = funcinfo.GetArgumentNames()
arg_types = funcinfo.GetArgumentTypes()
key, version = get_function_info_from_name(funcname)
generated = False
write_function_definition_start(stub_file, funcname, arg_list)
# Write the body
print_body_line("if (GLOBAL_PASS_THROUGH_MODE)", stub_file, 0)
print_body_line("{", stub_file, 0)
print_body_line("auto PassThruNvml = PassThruNvml::GetInstance();", stub_file, 1)
print_body_line("if (PassThruNvml->IsLoaded(__func__) == false)", stub_file, 1)
print_body_line("{", stub_file, 1)
print_body_line("PassThruNvml->LoadFunction(__func__);", stub_file, 2)
print_body_line("}", stub_file, 1)
print_body_line("return NVML_ERROR_NOT_SUPPORTED;", stub_file, 1)
# print_body_line("auto func = reinterpret_cast<decltype(%s)>(PassThruNvml->GetFunction(__func__));" % (funcname), stub_file, 1)
#print_body_line("// auto func = (decltype(%s))(PassThruNvml->GetFunction(__func__));" % (funcname), stub_file, 1)
#print_body_line("// return func(%s);" % (arg_names), stub_file, 1)
print_body_line("}", stub_file, 0)
print_body_line("else", stub_file, 0)
print_body_line("{", stub_file, 0)
unstripped_arguments = arg_names.split(",")
arguments = []
for arg in unstripped_arguments:
arguments.append(arg.strip())
start = "auto InjectedNvml"
print_body_line("%s = InjectedNvml::GetInstance();" % start, stub_file, 1)
if generate_injection_function(stub_file, funcname, arg_list, arg_types, arguments, len(start)):
generated = True
else:
useDevice = arg_types[0] == NVML_DEVICE
first = True
print_body_line("std::vector<InjectionArgument> args;", stub_file, 1)
for argument in arguments:
if first and useDevice:
pass
else:
print_body_line("args.push_back(InjectionArgument(%s));" % argument.strip(), stub_file, 1)
stub_file.write("\n")
print_body_line("if (InjectedNvml->IsGetter(__func__))", stub_file, 1)
print_body_line("{", stub_file, 1)
if useDevice:
print_body_line("return InjectedNvml->DeviceGetWrapper(__func__, \"%s\", %s, args);" % (key, arguments[0]), stub_file, 2)
else:
print_body_line("return InjectedNvml->GetWrapper(__func__, args);", stub_file, 2)
print_body_line("}", stub_file, 1)
print_body_line("else", stub_file, 1)
print_body_line("{", stub_file, 1)
if useDevice:
print_body_line("return InjectedNvml->DeviceSetWrapper(__func__, \"%s\", %s, args);" % (key, arguments[0]), stub_file, 2)
else:
print_body_line("return InjectedNvml->SetWrapper(__func__, args);", stub_file, 2)
print_body_line("}", stub_file, 1)
print_body_line("}", stub_file, 0)
print_body_line("return NVML_SUCCESS;", stub_file, 0)
# Write the end of the function
stub_file.write("}\n\n")
return generated
#######################################################################################
def write_declarations_file(function_declarations, output_dir):
declFilePath = '%s/%s' % (output_dir, FUNCTION_DECLARATIONS_PATH)
with open(declFilePath, 'w') as decl_file:
decl_file.write("#pragma once\n\n")
decl_file.write(AUTO_GENERATED_NOTICE)
decl_file.write('#include <nvml.h>\n\n')
decl_file.write("#define MAX_NVML_ARGS %d\n" % MAX_NVML_ARGS)
decl_file.write("typedef struct\n")
decl_file.write("{\n")
print_body_line('const char *funcname;', decl_file, 0)
print_body_line('unsigned int argCount;', decl_file, 0)
print_body_line('injectionArgType_t argTypes[MAX_NVML_ARGS];', decl_file, 0)
decl_file.write('} functionInfo_t;\n\n')
decl_file.write('// clang-format off\n')
for declaration in function_declarations:
decl_file.write("%s\n" % declaration)
#######################################################################################
def write_stub_file_header(stub_file):
stub_file.write(AUTO_GENERATED_NOTICE)
stub_file.write("#include \"InjectedNvml.h\"\n")
stub_file.write("#include \"nvml.h\"\n")
stub_file.write("#include \"%s\"\n\n" % FUNCTION_DECLARATIONS_HEADER)
stub_file.write("#include \"PassThruNvml.h\"\n\n")
stub_file.write("#ifdef __cplusplus\n")
stub_file.write("extern \"C\"\n{\n#endif\n\n")
stub_file.write("bool GLOBAL_PASS_THROUGH_MODE = false;\n\n")
#######################################################################################
def get_argument_types_from_argument_list(arg_list):
argument_types = []
arg_list = arg_list.strip()
if arg_list[0] == '(':
arg_list = arg_list[1:]
if arg_list[-1] == ')':
arg_list = arg_list[:-1]
arguments = arg_list.split(',')
for argument in arguments:
words = argument.strip().split(' ')
arg_type = words[0]
if len(words) == 2:
arg_name = words[1].strip()[0]
else:
for i in range(1, len(words)-1):
arg_type += ' %s' % words[i]
arg_name = words[len(words)-1]
if arg_name[0] == '*':
arg_type += ' *'
elif arg_type[-1] == '*' and arg_type[-2] != ' ':
arg_type = arg_type[:-1] + ' *'
argument_types.append(arg_type)
return argument_types
#######################################################################################
def build_argument_type_list(arg_list, all_argument_types):
argument_types = get_argument_types_from_argument_list(arg_list)
for arg_type in argument_types:
check_type = get_true_arg_type(arg_type)
if check_type not in all_argument_types:
all_argument_types.append(check_type)
return argument_types
#######################################################################################
def is_pointer_type(arg_type):
return arg_type[-2:] == ' *'
#######################################################################################
def is_nvml_enum(arg_type):
return arg_type[:4] == 'nvml'
#######################################################################################
def ends_with_t(arg_type):
return arg_type[-2:] == '_t'
#######################################################################################
def transform_arg_type(arg_type, arg_type_dict):
originalType = arg_type
if arg_type == 'char *':
arg_type_dict[originalType] = ['str', True, 'Str']
return 'str', True
elif arg_type == 'const char *':
arg_type_dict[originalType] = ['const_str', True, 'ConstStr']
return 'const_str', True
isPtr = False
if is_pointer_type(arg_type):
isPtr = True
# Remove the ' *' to generate the name
arg_type = arg_type[:-2]
arg_type_name = ''
arg_type_as_suffix = ''
if arg_type == 'nvmlBAR1Memory_t':
arg_type_name = 'bar1Memory'
arg_type_as_suffix = 'BAR1Memory'
elif is_nvml_enum(arg_type):
# Handle nvml enum type e.g. nvmlDevice_t => device
if ends_with_t(arg_type):
arg_type_name = '%s%s' % (arg_type[4].lower(), arg_type[5:-2])
arg_type_as_suffix = '%s' % (arg_type[4:-2])
else:
arg_type_name = '%s%s' % (arg_type[4].lower(), arg_type[5:])
arg_type_as_suffix = '%s' % (arg_type[4:])
else:
words = arg_type.strip().split(' ')
# Make the variable name the first letter of each word
for word in words:
arg_type_name += word[0]
if word == 'unsigned':
arg_type_as_suffix = 'U'
else:
arg_type_as_suffix += '%s%s' % (word[0].upper(), word[1:])
if isPtr:
arg_type_name += 'Ptr'
arg_type_as_suffix += 'Ptr'
arg_type_dict[originalType] = [arg_type_name, isPtr, arg_type_as_suffix]
return arg_type_name, isPtr
#######################################################################################
def get_enum_name(arg_type):
prefix = 'INJECTION_'
isPointer = False
suffix = ''
if is_pointer_type(arg_type):
isPointer = True
arg_type = arg_type[:-2]
if is_nvml_enum(arg_type):
if isPointer:
suffix = '_PTR'
if ends_with_t(arg_type):
enum_name = '%s%s%s' % (prefix, arg_type[4:-2].upper(), suffix)
else:
enum_name = '%s%s%s' % (prefix, arg_type[4:].upper(), suffix)
else:
words = arg_type.strip().split(' ')
if len(words) == 1:
if isPointer:
suffix = '_PTR'
enum_name = '%s%s%s' % (prefix, arg_type.upper(), suffix)
else:
enum_name = prefix
for word in words:
if word == 'unsigned':
enum_name += 'U'
else:
enum_name += '%s_' % word.upper()
if isPointer:
enum_name += 'PTR'
else:
enum_name = enum_name[:-1]
return enum_name
#######################################################################################
def print_memcpy(fileHandle, indentLevel, destName, srcName, destIsRef, srcIsRef):
firstPos = ''
secondPos = ''
thirdPos = ''
if destIsRef:
firstPos = '&m_value.%s' % destName
thirdPos = 'sizeof(m_value.%s)' % destName
else:
firstPos = 'm_value.%s' % destName
thirdPos = 'sizeof(*m_value.%s)' % destName
if srcIsRef:
secondPos = '&other.m_value.%s' % srcName
else:
secondPos = 'other.m_value.%s' % srcName
line = 'memcpy(%s, %s, %s);' % (firstPos, secondPos, thirdPos)
size = len(line) + (4 * indentLevel) + 4
if size <= 120:
print_body_line(line, fileHandle, indentLevel)
elif size < 124:
print_body_line('memcpy(', fileHandle, indentLevel)
print_body_line('%s, %s, %s);' % (firstPos, secondPos, thirdPos), fileHandle, indentLevel + 1)
else:
print_body_line('memcpy(%s,' % firstPos, fileHandle, indentLevel)
print_body_line(' %s,' % secondPos, fileHandle, indentLevel + 1)
print_body_line(' %s);' % thirdPos, fileHandle, indentLevel + 1)
#######################################################################################
def print_equals_and_set(fileHandle, indentLevel, lhName, rhName, lIsPtr, rIsPtr):
left_hand = ''
if lIsPtr:
left_hand = "*this->m_value.%s" % lhName
else:
left_hand = "this->m_value.%s" % lhName
if not rhName:
rhName = lhName
if rIsPtr:
print_body_line("%s = *other.m_value.%s;" % (left_hand, rhName), fileHandle, indentLevel)
else:
print_body_line("%s = other.m_value.%s;" % (left_hand, rhName), fileHandle, indentLevel)
print_body_line("%s = true;" % "set".ljust(len(left_hand)), fileHandle, indentLevel)
#######################################################################################
def write_string_case_entry(injectionCpp):
print_body_line('case INJECTION_STRING:', injectionCpp, 1)
print_body_line('{', injectionCpp, 1)
print_body_line('if (other.m_type == INJECTION_STRING)', injectionCpp, 2)
print_body_line('{', injectionCpp, 2)
print_body_line('this->m_str = other.m_str;', injectionCpp, 3)
print_body_line('set = true;', injectionCpp, 3)
print_body_line('}', injectionCpp, 2)
print_body_line('else if (other.m_type == INJECTION_CHAR_PTR && other.m_value.str != nullptr)', injectionCpp, 2)
print_body_line('{', injectionCpp, 2)
print_body_line('this->m_str = other.m_value.str;', injectionCpp, 3)
print_body_line('set = true;', injectionCpp, 3)
print_body_line('}', injectionCpp, 2)
print_body_line('else if (other.m_type == INJECTION_CONST_CHAR_PTR && other.m_value.const_str != nullptr)', injectionCpp, 2)
print_body_line('{', injectionCpp, 2)
print_body_line('this->m_str = other.m_value.const_str;', injectionCpp, 3)
print_body_line('set = true;', injectionCpp, 3)
print_body_line('}', injectionCpp, 2)
print_body_line('break;', injectionCpp, 2)
print_body_line('}', injectionCpp, 1)
#######################################################################################
def write_case_entry(enum_name, enum_name_to_type_dict, injectionCpp, arg_type_dict):
if enum_name == 'INJECTION_CONST_CHAR_PTR' or enum_name == 'INJECTION_CONST_NVMLGPUINSTANCEPLACEMENT_T_PTR':
# Don't support setting const pointers
return
print_body_line('case %s:' % enum_name, injectionCpp, 1)
print_body_line('{', injectionCpp, 1)
print_body_line('if (other.m_type == %s)' % enum_name, injectionCpp, 2)
print_body_line('{', injectionCpp, 2)
arg_type = enum_name_to_type_dict[enum_name]
argInfoTuple = arg_type_dict[arg_type]
structVarName = argInfoTuple[0]
isPtr = argInfoTuple[1]
if arg_type[:4] == 'nvml':
# Handle ptr types
if isPtr:
print_memcpy(injectionCpp, 3, structVarName, structVarName, False, False)
else:
print_memcpy(injectionCpp, 3, structVarName, structVarName, True, True)
print_body_line('set = true;', injectionCpp, 3)
elif enum_name == 'INJECTION_CHAR_PTR':
print_body_line('return NVML_ERROR_INVALID_ARGUMENT;', injectionCpp, 3)
pass
else:
print_equals_and_set(injectionCpp, 3, structVarName, structVarName, isPtr, isPtr)
print_body_line('}', injectionCpp, 2) # close generated if statement
if not isPtr:
# Add setting a non-pointer from a pointer to the same arg
ptrVersion = '%s_PTR' % enum_name
structVarPtrVersion = '%sPtr' % structVarName
if ptrVersion in enum_name_to_type_dict:
print_body_line('else if (other.m_type == %s)' % ptrVersion, injectionCpp, 2)
print_body_line('{', injectionCpp, 2)
if arg_type[:4] == 'nvml':
print_memcpy(injectionCpp, 3, structVarName, structVarPtrVersion, True, False)
print_body_line('set = true;', injectionCpp, 3)
else:
print_equals_and_set(injectionCpp, 3, structVarName, structVarPtrVersion, False, True)
print_body_line('}', injectionCpp, 2) # close generated if statement
else:
sansPtr = enum_name[:-4]
sansPtrStruct = structVarName[:-3]
if sansPtr in enum_name_to_type_dict:
print_body_line('else if (other.m_type == %s)' % sansPtr, injectionCpp, 2)
print_body_line('{', injectionCpp, 2)
if arg_type[:4] == 'nvml':
print_memcpy(injectionCpp, 3, structVarName, sansPtrStruct, False, True)
print_body_line('set = true;', injectionCpp, 3)
else:
print_equals_and_set(injectionCpp, 3, structVarName, sansPtrStruct, True, False)
print_body_line('}', injectionCpp, 2)
if enum_name == 'INJECTION_UINT':
print_body_line('else if (other.m_type == INJECTION_INT && other.m_value.i > 0)', injectionCpp, 2)
print_body_line('{', injectionCpp, 2)
print_body_line('this->m_value.ui = other.m_value.i;', injectionCpp, 3)
print_body_line('set = true;', injectionCpp, 3)
print_body_line('}', injectionCpp, 2)
print_body_line('else if (other.m_type == INJECTION_INT_PTR && *other.m_value.iPtr > 0)', injectionCpp, 2)
print_body_line('{', injectionCpp, 2)
print_body_line('this->m_value.ui = *other.m_value.iPtr;', injectionCpp, 3)
print_body_line('set = true;', injectionCpp, 3)
print_body_line('}', injectionCpp, 2)
elif enum_name == 'INJECTION_UINT_PTR':
print_body_line('else if (other.m_type == INJECTION_INT && other.m_value.i > 0)', injectionCpp, 2)
print_body_line('{', injectionCpp, 2)
print_body_line('*this->m_value.uiPtr = other.m_value.i;', injectionCpp, 3)
print_body_line('set = true;', injectionCpp, 3)
print_body_line('}', injectionCpp, 2)
print_body_line('else if (other.m_type == INJECTION_INT_PTR && *other.m_value.iPtr > 0)', injectionCpp, 2)
print_body_line('{', injectionCpp, 2)
print_body_line('*this->m_value.uiPtr = *other.m_value.iPtr;', injectionCpp, 3)
print_body_line('set = true;', injectionCpp, 3)
print_body_line('}', injectionCpp, 2)
elif enum_name == 'INJECTION_INT':
print_body_line('else if (other.m_type == INJECTION_UINT && other.m_value.ui <= INT_MAX)', injectionCpp, 2)
print_body_line('{', injectionCpp, 2)
print_body_line('this->m_value.i = other.m_value.ui;', injectionCpp, 3)
print_body_line('set = true;', injectionCpp, 3)
print_body_line('}', injectionCpp, 2)
print_body_line('else if (other.m_type == INJECTION_UINT_PTR && *other.m_value.uiPtr <= INT_MAX)', injectionCpp, 2)
print_body_line('{', injectionCpp, 2)
print_body_line('this->m_value.i = *other.m_value.uiPtr;', injectionCpp, 3)
print_body_line('set = true;', injectionCpp, 3)
print_body_line('}', injectionCpp, 2)
elif enum_name == 'INJECTION_INT_PTR':
print_body_line('else if (other.m_type == INJECTION_UINT && other.m_value.ui <= INT_MAX)', injectionCpp, 2)
print_body_line('{', injectionCpp, 2)
print_body_line('*this->m_value.iPtr = other.m_value.ui;', injectionCpp, 3)
print_body_line('set = true;', injectionCpp, 3)
print_body_line('}', injectionCpp, 2)
print_body_line('else if (other.m_type == INJECTION_UINT_PTR && *other.m_value.uiPtr <= INT_MAX)', injectionCpp, 2)
print_body_line('{', injectionCpp, 2)
print_body_line('*this->m_value.iPtr = *other.m_value.uiPtr;', injectionCpp, 3)
print_body_line('set = true;', injectionCpp, 3)
print_body_line('}', injectionCpp, 2)
print_body_line('break;', injectionCpp, 2)
print_body_line('}', injectionCpp, 1) # close case block
#######################################################################################
def write_injection_argument_cpp(enum_name_to_type_dict, output_dir, arg_type_dict):
injectionCppPath = '%s/%s' % (output_dir, INJECTION_CPP_PATH)
with open(injectionCppPath, 'w') as injectionCpp:
injectionCpp.write(AUTO_GENERATED_NOTICE)
injectionCpp.write('#include <%s>\n' % INJECTION_ARGUMENT_HEADER)
injectionCpp.write('#include <limits.h>\n')
injectionCpp.write('#include <cstring>\n\n\n')
injectionCpp.write('nvmlReturn_t InjectionArgument::SetValueFrom(const InjectionArgument &other)\n{\n')
print_body_line('bool set = false;\n', injectionCpp, 0)
print_body_line('if (other.IsEmpty())', injectionCpp, 0)
print_body_line('{', injectionCpp, 0)
print_body_line('return NVML_ERROR_NOT_FOUND;', injectionCpp, 1)
print_body_line('}', injectionCpp, 0)
print_body_line('switch (this->m_type)', injectionCpp, 0)
print_body_line('{', injectionCpp, 0)
for enum_name in enum_name_to_type_dict:
write_case_entry(enum_name, enum_name_to_type_dict, injectionCpp, arg_type_dict)
write_string_case_entry(injectionCpp)
print_body_line('default:', injectionCpp, 1)
print_body_line('break;', injectionCpp, 2)
print_body_line('}', injectionCpp, 0)
print_body_line('if (set)', injectionCpp, 0)
print_body_line('{', injectionCpp, 0)
print_body_line('return NVML_SUCCESS;', injectionCpp, 1)
print_body_line('}', injectionCpp, 0)
print_body_line('else', injectionCpp, 0)
print_body_line('{', injectionCpp, 0)
print_body_line('return NVML_ERROR_INVALID_ARGUMENT;', injectionCpp, 1)
print_body_line('}', injectionCpp, 0)
injectionCpp.write('}\n')
#######################################################################################
def write_injection_structs_header(all_argument_types, output_dir):
injection_structs_path = "%s/%s" % (output_dir, INJECTION_STRUCTS_PATH)
enum_dict = {}
enum_name_to_type_dict = {}
arg_type_dict = {}
with open(injection_structs_path, 'w') as injectionStructs:
injectionStructs.write(AUTO_GENERATED_NOTICE)
injectionStructs.write('#pragma once\n\n')
injectionStructs.write('#include <nvml.h>\n')
# write the union for simple value types
injectionStructs.write('typedef union\n{\n')
for arg_type in all_argument_types:
arg_type_name, isPtr = transform_arg_type(arg_type, arg_type_dict)
if isPtr:
print_body_line('%s%s;' % (arg_type, arg_type_name), injectionStructs, 0)
else:
print_body_line('%s %s;' % (arg_type, arg_type_name), injectionStructs, 0)
injectionStructs.write('} simpleValue_t;\n\n')
# write the enum for the types
injectionStructs.write('typedef enum injectionArg_enum\n{\n')
index = 0
NAME_SPACE_LEN = 0
for arg_type in all_argument_types:
enum_name = get_enum_name(arg_type)
enum_dict[arg_type] = enum_name
enum_name_to_type_dict[enum_name] = arg_type
if len(enum_name) > NAME_SPACE_LEN:
NAME_SPACE_LEN = len(enum_name)
for arg_type in all_argument_types:
enum_name = enum_dict[arg_type]
print_body_line("%s = %d," % (enum_name.ljust(NAME_SPACE_LEN), index), injectionStructs, 0)
index += 1
print_body_line('%s = %d,' % ("INJECTION_STRING".ljust(NAME_SPACE_LEN), index), injectionStructs, 0)
print_body_line(INJECTION_ARG_COUNT_STR, injectionStructs, 0)
injectionStructs.write('} injectionArgType_t;\n\n')
injectionStructs.write('typedef struct\n{\n')
print_body_line('simpleValue_t value;', injectionStructs, 0)
print_body_line('injectionArgType_t type;', injectionStructs, 0)
injectionStructs.write('} injectNvmlVal_t;\n\n')
return enum_dict, enum_name_to_type_dict, arg_type_dict
#######################################################################################
def write_injection_argument_header(all_argument_types, output_dir):
all_argument_types.sort()
injectionHeaderPath = '%s/%s' % (output_dir, INJECTION_ARGUMENT_PATH)
enum_dict, enum_name_to_type_dict, arg_type_dict = write_injection_structs_header(all_argument_types, output_dir)
with open(injectionHeaderPath, 'w') as injectionHeader:
injectionHeader.write(AUTO_GENERATED_NOTICE)
injectionHeader.write('#pragma once\n\n')
injectionHeader.write('#include <cstring>\n')
injectionHeader.write('#include <nvml.h>\n')
injectionHeader.write('#include <string>\n\n')
injectionHeader.write('#include "%s"\n\n' % INJECTION_STRUCTS_NAME)
injectionHeader.write('class InjectionArgument\n{\nprivate:\n')
print_body_line('injectionArgType_t m_type;', injectionHeader, 0)
print_body_line('simpleValue_t m_value;', injectionHeader, 0)
print_body_line('std::string m_str;\n', injectionHeader, 0)
injectionHeader.write('public:\n')
print_body_line('InjectionArgument()', injectionHeader, 0)
print_body_line(': m_type(%s)' % INJECTION_ARG_COUNT_STR, injectionHeader, 1)
print_body_line('{', injectionHeader, 0)
print_body_line('Clear();', injectionHeader, 1)
print_body_line('}\n', injectionHeader, 0)
print_body_line('InjectionArgument(const injectNvmlVal_t &value)', injectionHeader, 0)
print_body_line(': m_type(value.type)', injectionHeader, 1)
print_body_line(', m_value(value.value)', injectionHeader, 1)
print_body_line('{}\n', injectionHeader, 0)
print_body_line('/**', injectionHeader, 0)
print_body_line(' * SetValueFrom - Sets this injection argument based other\'s value', injectionHeader, 0)
print_body_line(' * @param other - the InjectionArgument whose value we flexibly copy if possible.', injectionHeader, 0)
print_body_line(' *', injectionHeader, 0)
print_body_line(' * @return 0 if we could set from other\'s value, 1 if incompatible', injectionHeader, 0)
print_body_line(' **/', injectionHeader, 0)
print_body_line('nvmlReturn_t SetValueFrom(const InjectionArgument &other);\n', injectionHeader, 0)
print_body_line('injectionArgType_t GetType() const', injectionHeader, 0)
print_body_line('{', injectionHeader, 0)
print_body_line('return m_type;', injectionHeader, 1)
print_body_line('}\n', injectionHeader, 0)
print_body_line('simpleValue_t GetSimpleValue() const', injectionHeader, 0)
print_body_line('{', injectionHeader, 0)
print_body_line('return m_value;', injectionHeader, 1)
print_body_line('}\n', injectionHeader, 0)
print_body_line('void Clear()', injectionHeader, 0)
print_body_line('{', injectionHeader, 0)
print_body_line('memset(&m_value, 0, sizeof(m_value));', injectionHeader, 1)
print_body_line('}', injectionHeader, 0)
print_body_line('int Compare(const InjectionArgument &other) const', injectionHeader, 0)
print_body_line('{', injectionHeader, 0)
print_body_line('if (m_type < other.m_type)', injectionHeader, 1)
print_body_line('{', injectionHeader, 1)
print_body_line('return -1;', injectionHeader, 2)
print_body_line('}', injectionHeader, 1)
print_body_line('else if (m_type > other.m_type)', injectionHeader, 1)
print_body_line('{', injectionHeader, 1)
print_body_line('return 1;', injectionHeader, 2)
print_body_line('}', injectionHeader, 1)
print_body_line('else', injectionHeader, 1)
print_body_line('{', injectionHeader, 1)
print_body_line('if (m_type == INJECTION_STRING)', injectionHeader, 2)
print_body_line('{', injectionHeader, 2)
print_body_line('if (m_str < other.m_str)', injectionHeader, 3)
print_body_line('{', injectionHeader, 3)
print_body_line('return -1;', injectionHeader, 4)
print_body_line('}', injectionHeader, 3)
print_body_line('else if (m_str > other.m_str)', injectionHeader, 3)
print_body_line('{', injectionHeader, 3)
print_body_line('return 1;', injectionHeader, 4)
print_body_line('}', injectionHeader, 3)
print_body_line('else', injectionHeader, 3)
print_body_line('{', injectionHeader, 3)
print_body_line('return 0;', injectionHeader, 4)
print_body_line('}', injectionHeader, 3)
print_body_line('}', injectionHeader, 2)
print_body_line('else', injectionHeader, 2)
print_body_line('{', injectionHeader, 2)
print_body_line('switch (m_type)', injectionHeader, 3)
print_body_line('{', injectionHeader, 3)
for arg_type in all_argument_types:
typeTuple = arg_type_dict[arg_type]
enumName = enum_dict[arg_type]
print_body_line('case %s:' % enumName, injectionHeader, 4)
print_body_line('{', injectionHeader, 4)
if typeTuple[1]:
if enumName == 'INJECTION_CHAR_PTR' or enumName == 'INJECTION_CONST_CHAR_PTR':
print_body_line('return strcmp(m_value.%s, other.m_value.%s);' % (typeTuple[0], typeTuple[0]), injectionHeader, 5)
elif len(typeTuple[0]) <= 12:
print_body_line('return memcmp(m_value.%s, other.m_value.%s, sizeof(*m_value.%s));' % (typeTuple[0], typeTuple[0], typeTuple[0]), injectionHeader, 5)
elif len(typeTuple[0]) <= 15:
print_body_line('return memcmp(', injectionHeader, 5)
print_body_line('m_value.%s, other.m_value.%s, sizeof(*m_value.%s));' % (typeTuple[0], typeTuple[0], typeTuple[0]), injectionHeader, 6)
else:
print_body_line('return memcmp(m_value.%s,' % typeTuple[0], injectionHeader, 5)
print_body_line(' other.m_value.%s,' % typeTuple[0], injectionHeader, 8)
print_body_line(' sizeof(*m_value.%s));' % typeTuple[0], injectionHeader, 8)
else:
print_body_line('if (m_value.%s < other.m_value.%s)' % (typeTuple[0], typeTuple[0]), injectionHeader, 5)
print_body_line('{', injectionHeader, 5)
print_body_line('return -1;', injectionHeader, 6)
print_body_line('}', injectionHeader, 5)
print_body_line('else if (m_value.%s > other.m_value.%s)' % (typeTuple[0], typeTuple[0]), injectionHeader, 5)
print_body_line('{', injectionHeader, 5)
print_body_line('return 1;', injectionHeader, 6)
print_body_line('}', injectionHeader, 5)
print_body_line('else', injectionHeader, 5)
print_body_line('{', injectionHeader, 5)
print_body_line('return 0;', injectionHeader, 6)
print_body_line('}', injectionHeader, 5)
print_body_line('break; // NOT REACHED', injectionHeader, 5)
print_body_line('}', injectionHeader, 4)
print_body_line('default:', injectionHeader, 4)
print_body_line('break;', injectionHeader, 5)
print_body_line('}', injectionHeader, 3)
print_body_line('}', injectionHeader, 2)
print_body_line('}', injectionHeader, 1)
print_body_line('return true;', injectionHeader, 1)
print_body_line('}', injectionHeader, 0)
print_body_line('bool operator<(const InjectionArgument &other) const', injectionHeader, 0)
print_body_line('{', injectionHeader, 0)
print_body_line('return this->Compare(other) == -1;', injectionHeader, 1)
print_body_line('}\n', injectionHeader, 0)
print_body_line('bool operator==(const InjectionArgument &other) const', injectionHeader, 0)
print_body_line('{', injectionHeader, 0)
print_body_line('return this->Compare(other) == 0;', injectionHeader, 1)
print_body_line('}\n', injectionHeader, 0)
print_body_line('bool IsEmpty() const', injectionHeader, 0)
print_body_line('{', injectionHeader, 0)
print_body_line('return m_type == %s;' % INJECTION_ARG_COUNT_STR, injectionHeader, 1)
print_body_line('}\n', injectionHeader, 0)
for arg_type in all_argument_types:
# Write constructor
typeTuple = arg_type_dict[arg_type]
if is_pointer_type(arg_type):
print_body_line('InjectionArgument(%s%s)' % (arg_type, typeTuple[0]), injectionHeader, 0)
else:
print_body_line('InjectionArgument(%s %s)' % (arg_type, typeTuple[0]), injectionHeader, 0)
print_body_line(': m_type(%s)' % (enum_dict[arg_type]), injectionHeader, 1)
print_body_line('{', injectionHeader, 0)
print_body_line('memset(&m_value, 0, sizeof(m_value));', injectionHeader, 1)
print_body_line('m_value.%s = %s;' % (typeTuple[0], typeTuple[0]), injectionHeader, 1)
print_body_line('}', injectionHeader, 0)
# Write As* function
if typeTuple[1]:
print_body_line('%sAs%s() const' % (arg_type, typeTuple[2]), injectionHeader, 0)
else:
print_body_line('%s As%s() const' % (arg_type, typeTuple[2]), injectionHeader, 0)
print_body_line('{', injectionHeader, 0)
print_body_line('return m_value.%s;' % typeTuple[0], injectionHeader, 1)
print_body_line('}\n', injectionHeader, 0)
print_body_line('InjectionArgument(const std::string &val)', injectionHeader, 0)
print_body_line(': m_type(INJECTION_STRING)', injectionHeader, 1)
print_body_line(', m_str(val)', injectionHeader, 1)
print_body_line('{', injectionHeader, 0)
print_body_line('memset(&m_value, 0, sizeof(m_value));', injectionHeader, 1)
print_body_line('}', injectionHeader, 0)
print_body_line('std::string AsString() const', injectionHeader, 0)
print_body_line('{', injectionHeader, 0)
print_body_line('switch (m_type)', injectionHeader, 1)
print_body_line('{', injectionHeader, 1)
print_body_line('case INJECTION_STRING:', injectionHeader, 2)
print_body_line('{', injectionHeader, 2)
print_body_line('return m_str;', injectionHeader, 3)
print_body_line('}', injectionHeader, 2)
print_body_line('break;', injectionHeader, 3)
print_body_line('case INJECTION_CHAR_PTR:', injectionHeader, 2)
print_body_line('{', injectionHeader, 2)
print_body_line('if (m_value.str != nullptr)', injectionHeader, 3)
print_body_line('{', injectionHeader, 3)
print_body_line('return std::string(m_value.str);', injectionHeader, 4)
print_body_line('}', injectionHeader, 3)
print_body_line('break;', injectionHeader, 3)
print_body_line('}', injectionHeader, 2)
print_body_line('case INJECTION_CONST_CHAR_PTR:', injectionHeader, 2)
print_body_line('{', injectionHeader, 2)
print_body_line('if (m_value.const_str != nullptr)', injectionHeader, 3)
print_body_line('{', injectionHeader, 3)
print_body_line('return std::string(m_value.const_str);', injectionHeader, 4)
print_body_line('}', injectionHeader, 3)
print_body_line('break;', injectionHeader, 3)
print_body_line('}', injectionHeader, 2)
print_body_line('default:', injectionHeader, 2)
print_body_line('break;', injectionHeader, 3)
print_body_line('}', injectionHeader, 1)
print_body_line('return "";', injectionHeader, 1)
print_body_line('}', injectionHeader, 0)
injectionHeader.write('};\n')
write_injection_argument_cpp(enum_name_to_type_dict, output_dir, arg_type_dict)
return enum_dict
#######################################################################################
def get_enum_from_arg_type(enum_dict, arg_type):
check_type = get_true_arg_type(arg_type)
return enum_dict[check_type]
#######################################################################################
def write_key_file(output_dir):
key_file_path = "%s/%s" % (output_dir, KEY_LIST_PATH)
with open(key_file_path, 'w') as key_file:
key_file.write(AUTO_GENERATED_NOTICE)
key_file.write("// clang-format off\n")
for key in g_key_to_function:
key_file.write("const char *INJECTION_%s_KEY = \"%s\"; // Function name(s): %s\n" % (key.upper(), key, g_key_to_function[key]))
key_header_path = "%s/%s" % (output_dir, KEY_LIST_HEADER_PATH)
with open(key_header_path, 'w') as key_header:
key_header.write(AUTO_GENERATED_NOTICE)
for key in g_key_to_function:
key_header.write("extern const char *INJECTION_%s_KEY;\n" % (key.upper()))
#######################################################################################
def write_linux_defs(output_dir, func_dict):
linux_defs_path = '%s/%s' % (output_dir, LINUX_DEFS_PATH)
manually_written_functions = [
'injectionNvmlInit',
'nvmlDeviceSimpleInject',
'nvmlDeviceInjectExtraKey',
'nvmlDeviceInjectFieldValue',
]
with open(linux_defs_path, 'w') as linux_defs_file:
linux_defs_file.write('{\n global:\n')
for funcname in manually_written_functions:
print_body_line('%s;' % funcname, linux_defs_file, 1)
for funcname in func_dict:
print_body_line('%s;' % funcname, linux_defs_file, 1)
print_body_line('extern "C++" {', linux_defs_file, 1)
print_body_line('_ZTI*;', linux_defs_file, 2)
print_body_line('_ZTS*;', linux_defs_file, 2)
print_body_line('};\n', linux_defs_file, 1)
print_body_line('local:', linux_defs_file, 0)
print_body_line('*;', linux_defs_file, 1)
print_body_line('extern "C++" {', linux_defs_file, 1)
print_body_line('*;', linux_defs_file, 2)
print_body_line('};', linux_defs_file, 1)
linux_defs_file.write('};')
#######################################################################################
def parse_entry_points_contents(contents, output_dir):
function_dict = {}
all_argument_types = []
all_functions = AllFunctions()
all_functypes = AllFunctionTypes()
entry_points = contents.split('NVML_ENTRY_POINT')
total_funcs = 0
auto_generated = 0
not_generated = []
outputStubPath = '%s/%s' % (output_dir, STUB_PATH)
with open(outputStubPath, 'w') as stub_file:
write_stub_file_header(stub_file)
first = True
for entry_point in entry_points:
funcname, arg_list, arg_names = get_function_signature(entry_point, first)
first = False
if funcname and arg_list:
fi = FunctionInfo(funcname, arg_list, arg_names)
all_functions.AddFunction(fi)
#all_functions.RemoveEarlierVersions()
for funcname in all_functions.func_dict:
funcinfo = all_functions.func_dict[funcname]
all_functypes.AddFunctionType(funcname, funcinfo)
for funcname in all_functions.func_dict:
funcinfo = all_functions.func_dict[funcname]
build_argument_type_list(funcinfo.GetArgumentList(), all_argument_types)
if write_function(stub_file, funcinfo, all_functypes):
auto_generated = auto_generated + 1
else:
not_generated.append(funcname)
total_funcs = total_funcs + 1
function_dict[funcname] = arg_list
stub_file.write("#ifdef __cplusplus\n}\n")
stub_file.write("#endif\n")
stub_file.write('// END nvml_generated_stubs')
write_key_file(output_dir)
write_linux_defs(output_dir, all_functions.func_dict)
enum_dict = write_injection_argument_header(all_argument_types, output_dir)
write_declarations_file(all_functypes.GetAllFunctionDeclarations(), output_dir)
#write_function_info(enum_dict, function_dict, output_dir)
print("I was able to generate the injection body for %d of %d functions" % (auto_generated, total_funcs))
with open('ungenerated.txt', 'w') as ungenerated:
ungenerated.write('The following were not auto-generated:\n\n')
for ungen in not_generated:
ungenerated.write("%s\n" % ungen)
#######################################################################################
def parse_entry_points(inputPath, output_dir):
with open(inputPath, 'r') as entryFile:
contents = entryFile.read()
parse_entry_points_contents(contents, output_dir)
#######################################################################################
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--input-file', default='sdk/nvml/entry_points.h', dest='inputPath')
parser.add_argument('-o', '--output-dir', default='.', dest='outputDir')
args = parser.parse_args()
parse_entry_points(args.inputPath, args.outputDir)
if __name__ == '__main__':
main()
# TODO: delete this once we're sure we aren't using it
#######################################################################################
def write_function_info(enum_dict, function_dict, output_dir):
function_info_path = '%s/%s' % (output_dir, FUNCTION_INFO_PATH)
function_args_dict = {}
with open(function_info_path, 'w') as funcInfoFile:
funcInfoFile.write(AUTO_GENERATED_NOTICE)
funcInfoFile.write('#include <nvml.h>\n')
funcInfoFile.write('#include <%s>\n\n' % INJECTION_ARGUMENT_HEADER)
funcInfoFile.write('functionInfo_t functionInfos[] = {\n')
for functionName in function_dict:
argument_types = get_argument_types_from_argument_list(function_dict[functionName])
function_args_dict[functionName] = [function_dict[functionName], argument_types]
type_list_str = ''
argCount = 0
for arg_type in argument_types:
argCount += 1
if type_list_str == '':
type_list_str = '%s' % get_enum_from_arg_type(enum_dict, arg_type)
else:
type_list_str += ', %s' % get_enum_from_arg_type(enum_dict, arg_type)
type_list_str += ' '
for i in range(0, MAX_NVML_ARGS - argCount):
type_list_str += ', %s' % INJECTION_ARG_COUNT_STR
print_body_line('{ \"%s\", %d, { %s } },\n' % (functionName, argCount, type_list_str), funcInfoFile, 0)
funcInfoFile.write('};\n\n')
| DCGM-master | nvml-injection/scripts/generate_nvml_stubs.py |
# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
""" Config module """
import os
import configparser
class Config(configparser.SectionProxy): #pylint: disable=too-many-ancestors
""" Wraps a ConfigParser dict-like to allow overriding the agent config via environment variables """
def __init__(self, filename):
parser = configparser.ConfigParser()
parser.read(filename)
super().__init__(parser, 'AGENT')
def __getitem__(self, key):
env_prefix = 'AIR_AGENT_'
return os.getenv(env_prefix + key, super().__getitem__(key))
| air_agent-main | config.py |
# SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
AGENT_VERSION = '3.0.1'
| air_agent-main | version.py |
air_agent-main | __init__.py |
|
# SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
"""
Platform detection functions
"""
import logging
import subprocess
import traceback
def detect():
"""
Detect the current platform
"""
os = None
release = None
try:
os = subprocess.run(['lsb_release', '-i'], check=True, stdout=subprocess.PIPE)
os = os.stdout.decode().split('\t')[1].rstrip()
try:
release = subprocess.run(['lsb_release', '-r'], check=True, stdout=subprocess.PIPE)
release = release.stdout.decode().split('\t')[1].rstrip()
except:
logging.debug(traceback.format_exc())
logging.warning('Platform detection failed to determine Release')
except:
logging.debug(traceback.format_exc())
logging.warning('Platform detection failed to determine OS')
logging.debug(f'Detected OS: {os} and Release: {release}')
return os, release
| air_agent-main | platform_detect.py |
# SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
"""
Executor functions for post-clone instructions
"""
import json
import logging
import subprocess
import traceback
def shell(instructions):
"""
Executor for shell commands
Arguments:
instructions (list) - A list of '\n' delimited commands to execute in the system's default shell
Returns:
bool - True if all commands executed successfully
"""
for line in instructions.split('\n'):
logging.info(f'EXEC shell :: {line}')
try:
subprocess.run(line, shell=True, check=True)
except:
logging.error(f'Command `{line}` failed')
logging.debug(traceback.format_exc())
return False
return True
def file(instructions):
"""
Executor for file transfers
Arguments:
instructions (dict) - A dictionary in the form of {'filename': 'contents', 'post_cmd': ['cmd']}
Returns:
bool - True if all files were copied and all post_cmds were executed successfully
"""
success = True
post_cmd = []
try:
json_data = json.loads(instructions)
except json.decoder.JSONDecodeError as err:
logging.error(f'Failed to decode instructions as JSON: {err}')
return False
if 'post_cmd' in json_data.keys():
post_cmd = json_data.pop('post_cmd')
if not isinstance(post_cmd, list):
post_cmd = [post_cmd]
for filename, content in json_data.items():
logging.info(f'EXEC file :: writing {filename}')
logging.debug(content)
try:
with open(filename, 'w') as outfile:
outfile.write(content)
except:
logging.debug(traceback.format_exc())
logging.error(f'Failed to write {filename}')
success = False
for cmd in post_cmd:
logging.info(f'EXEC file :: {cmd}')
try:
subprocess.run(cmd, shell=True, check=True)
except:
logging.debug(traceback.format_exc())
logging.error(f'post_cmd `{cmd}` failed')
success = False
return success
def init(data):
"""
Executor for init instructions
Arguments:
data (dict) - A dictionary containing the init instruction data
Returns:
bool - True if all init instructions were executed successfully
"""
success = True
try:
json_data = json.loads(data)
except json.decoder.JSONDecodeError as err:
logging.error(f'Failed to decode init data as JSON: {err}')
return False
if 'hostname' in json_data.keys():
logging.info(f'EXEC init :: setting hostname to {json_data["hostname"]}')
success = shell(f'hostnamectl set-hostname {json_data["hostname"]}')
return success
EXECUTOR_MAP = {'file': file, 'shell': shell, 'init': init}
| air_agent-main | executors.py |
# SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
"""
The NVIDIA Air Agent is a systemd service that detects if a VM has been cloned.
When a clone operation has been detected, it calls out to the Air API to see if there are any
post-clone instructions available to execute.
"""
import argparse
import glob
import json
import logging
import os
import re
import shutil
import subprocess
import sys
import threading
import time
import traceback
from datetime import datetime, timedelta
from pathlib import Path
from time import sleep
from cryptography.fernet import Fernet
import git
import requests
import executors
import platform_detect
from config import Config
from version import AGENT_VERSION
class Agent:
"""
Agent daemon
"""
def __init__(self, config):
self.config = config
self.config['AIR_API'] = self.config['AIR_API'].replace('cumulusnetworks.com', 'nvidia.com')
self.identity = self.get_identity()
self.lock = threading.Lock()
self.monitoring = False
self.hwclock_switch = None
self.verify_ssl = self.config.getboolean('VERIFY_SSL', True)
self.set_hwclock_switch()
fix_clock()
self.auto_update()
logging.info(f'Initializing with identity {self.identity}')
self.os, self.release = platform_detect.detect()
def unlock(self):
""" Unlocks the agent lock if locked """
try:
self.lock.release()
except RuntimeError:
pass
def set_hwclock_switch(self):
"""
Detects util-linux's hwclock version. Versions >= 2.32 should use --verbose
when reading the hardware clock. Older versions should use --debug.
Returns:
str - The appropriate switch to use. Defaults to --debug.
"""
try:
output = subprocess.check_output('hwclock --version', shell=True)
match = re.match(r'.*(\d+\.\d+\.\d+)', output.decode('utf-8'))
version = match.groups()[0]
if version >= '2.32':
logging.debug('Detected hwclock switch: --verbose')
self.hwclock_switch = '--verbose'
return
except Exception:
logging.debug(traceback.format_exc())
logging.info('Failed to detect hwclock switch, falling back to --debug')
logging.debug('Detected hwclock switch: --debug')
self.hwclock_switch = '--debug'
def get_identity(self):
"""
Gets the VM's identity (UUID) via its key drive
Returns:
str - The VM UUID (ex: 'abcdefab-0000-1111-2222-123456789012')
"""
key_dir = self.config['KEY_DIR']
if not mount_device(self.config):
logging.error(f'Failed to refresh {key_dir}')
logging.debug(traceback.format_exc())
return None
uuid_path = glob.glob(f'{key_dir}uuid*.txt')
if uuid_path:
uuid_path = uuid_path[0]
logging.debug(f'Checking for identity at {uuid_path}')
with open(uuid_path) as uuid_file:
return uuid_file.read().strip().lower()
else:
logging.error('Failed to find identity file')
return None
def check_identity(self):
"""
Checks the VM's current identity against its initialized identity
Returns:
bool - True if the VM's identity is still the same
"""
current_identity = self.get_identity()
logging.debug(f'Initialized identity: {self.identity}, ' + \
f'Current identity: {current_identity}')
return self.identity == current_identity
def get_key(self, identity):
"""
Fetch's the VM's decryption key from its key drive
Arguments:
identity (str) - The VM's current UUID. This is used to validate we have the correct
key file.
Returns:
str - The decryption key
"""
logging.debug(f'Getting key for {identity}')
filename = identity.split('-')[0]
key_dir = self.config['KEY_DIR']
key_path = f'{key_dir}{filename}.txt'
logging.debug(f'Checking for key at {key_path}')
if Path(key_path).is_file():
with open(key_path) as key_file:
return key_file.read().strip()
else:
logging.error(f'Failed to find decryption key for {identity}')
return None
def decrypt_instructions(self, instructions, identity):
"""
Decrypts a set of instructions received from the Air API
Arguments:
instructions (list) - A list of encrypted instructions received from the API
identity (str) - The VM's current UUID
Returns:
list - A list of decrypted instructions
"""
decrypted_instructions = []
key = self.get_key(identity)
if key:
logging.debug('Decrypting post-clone instructions')
crypto = Fernet(key)
for instruction in instructions:
clear_text = crypto.decrypt(bytes(instruction['instruction'], 'utf-8'))
decrypted_instructions.append(json.loads(clear_text))
return decrypted_instructions
def get_instructions(self):
"""
Fetches a set of post-clone instructions from the Air API
Returns:
list - A list of instructions on success, or False if an error occurred
"""
logging.debug('Getting post-clone instructions')
identity = self.get_identity()
url = self.config['AIR_API']
url += f'simulation-node/{identity}/instructions/'
try:
if not identity:
raise Exception('No identity')
res = requests.get(url, timeout=10, verify=self.verify_ssl)
instructions = res.json()
logging.debug(f'Encrypted instructions: {instructions}')
except:
logging.error('Failed to get post-clone instructions')
logging.debug(traceback.format_exc())
return False
instructions = self.decrypt_instructions(instructions, identity)
logging.debug(f'Decrypted instructions: {instructions}')
return instructions
def delete_instructions(self):
"""
Deletes instructions via the Air API. This serves as an indication that the instructions
have been successfully executed (i.e. they do not need to be re-tried)
"""
logging.debug('Deleting post-clone instructions')
url = self.config['AIR_API']
url += f'simulation-node/{self.identity}/instructions/'
try:
requests.delete(url, verify=self.verify_ssl)
except:
logging.error('Failed to delete post-clone instructions')
logging.debug(traceback.format_exc())
def signal_watch(self, attempt=1, test=False):
"""
Waits for a signal from the Air Worker and proceeds accordingly. This runs in a loop
so it is intended to be executed in a separate thread.
Arguments:
attempt [int] - The attempt number used for retries
test [bool] - Used for CI testing to avoid infinite loops (default: False)
"""
try:
channel = open(self.config['CHANNEL_PATH'], 'wb+', buffering=0)
logging.debug(f'Opened channel path {self.config["CHANNEL_PATH"]}')
while True:
data = channel.readline().decode('utf-8')
logging.debug(f'Got signal data {data}')
signal = ''
if data:
(timestamp, signal) = data.split(':')
if signal == 'checkinst\n':
logging.debug('signal_watch :: Checking for instructions')
res = parse_instructions(self, channel=channel)
if res:
logging.debug('Channel success')
channel.write(f'{timestamp}:success\n'.encode('utf-8'))
else:
logging.debug('Channel error')
channel.write(f'{timestamp}:error\n'.encode('utf-8'))
elif signal.startswith('resize'):
_, rows, cols = signal.strip('\n').split('_')
logging.debug(f'resizing serial console: {rows}x{cols}')
subprocess.run(['stty', '-F', '/dev/ttyS0', 'rows', str(rows)], check=False)
subprocess.run(['stty', '-F', '/dev/ttyS0', 'cols', str(cols)], check=False)
sleep(1)
if test:
break
except Exception as err:
try:
channel.close()
except Exception:
pass
logging.debug(traceback.format_exc())
if attempt <= 3:
backoff = attempt * 10
logging.warning(f'signal_watch :: {err} (attempt #{attempt}). ' + \
f'Trying again in {backoff} seconds...')
sleep(backoff)
self.signal_watch(attempt + 1, test=test)
else:
logging.error(f'signal_watch :: {err}. Ending thread.')
def monitor(self, channel, test=False, **kwargs):
"""
Worker target for a monitor thread. Writes any matching updates to the channel.
Arguments:
channel (fd) - Comm channel with worker
test [bool] - If True, the monitor loop ends after 1 iteration (used for unit testing)
**kwargs (dict) - Required:
- file: full path of the file to monitor
- pattern: regex that should be considered a match for a progress update
"""
filename = kwargs.get('file')
if not filename:
return
pattern = kwargs.get('pattern')
logging.info(f'Starting monitor for {filename}')
while self.monitoring and not os.path.exists(filename):
time.sleep(1)
with open(filename, 'r') as monitor_file:
while self.monitoring:
line = monitor_file.readline()
if line:
logging.debug(f'monitor :: {line}')
match = re.match(pattern, line)
if match and match.groups():
logging.debug(f'monitor :: found match {match.groups()[0]}')
channel.write(f'{int(time.time())}:{match.groups()[0]}\n'.encode('utf-8'))
time.sleep(0.5)
if test:
break
logging.info(f'Stopping monitor for {filename}')
def clock_jumped(self):
"""
Returns True if the system's time has drifted by +/- 30 seconds from the hardware clock
"""
system_time = datetime.now()
try:
cmd = f'hwclock {self.hwclock_switch} | grep "Hw clock"'
hwclock_output = subprocess.check_output(cmd, shell=True)
match = re.match(r'.* (\d+) seconds since 1969', hwclock_output.decode('utf-8'))
if match:
hw_time = datetime.fromtimestamp(int(match.groups()[0]))
else:
raise Exception('Unable to parse hardware clock')
except:
logging.debug(traceback.format_exc())
hw_time = datetime.fromtimestamp(0)
logging.warning('Something went wrong. Syncing clock to be safe...')
delta = system_time - hw_time
logging.debug(f'System time: {system_time}, Hardware time: {hw_time}, Delta: {delta}')
return (delta > timedelta(seconds=30)) or (-delta > timedelta(seconds=30))
def auto_update(self):
""" Checks for and applies new agent updates if available """
if not self.config.getboolean('AUTO_UPDATE'):
logging.debug('Auto update is disabled')
return
logging.info('Checking for updates')
try:
res = requests.get(self.config['VERSION_URL'])
#pylint: disable=invalid-string-quote
latest = res.text.split(' = ')[1].strip().strip("'")
if AGENT_VERSION != latest:
logging.debug('New version is available')
else:
logging.debug('Already running the latest version')
return
except Exception as err:
logging.debug(traceback.format_exc())
logging.error(f'Failed to check for updates: {err}')
return
logging.info('Updating agent')
try:
shutil.rmtree('/tmp/air-agent')
except Exception:
pass
try:
git.Repo.clone_from(self.config['GIT_URL'], '/tmp/air-agent',
branch=self.config['GIT_BRANCH'])
cwd = os.getcwd()
for filename in os.listdir('/tmp/air-agent'):
if '.py' in filename:
shutil.move(f'/tmp/air-agent/{filename}', f'{cwd}/{filename}')
except Exception as err:
logging.debug(traceback.format_exc())
logging.error(f'Failed to update agent: {err}')
return
logging.info('Restarting agent')
os.execv(sys.executable, ['python3'] + sys.argv)
def clock_watch(self, **kwargs):
"""
Watches for clock jumps and updates the clock
"""
logging.debug('Starting clock watch thread')
while True:
wait = self.config.getint('CHECK_INTERVAL')
if self.clock_jumped():
fix_clock()
wait += 300
sleep(wait)
if kwargs.get('test'):
break
def parse_args():
"""
Helper function to provide command line arguments for the agent
"""
default_config_file = '/mnt/air/agent.ini'
year = datetime.now().year
parser = argparse.ArgumentParser(description=f'Air Agent service (NVIDIA © {year})')
parser.add_argument('-c', '--config-file',
help='Location of the service\'s config file. ' + \
'Normally this will be injected automatically by the Air platform ' + \
f'(default: {default_config_file})',
default=default_config_file)
return parser.parse_args()
def parse_instructions(agent, attempt=1, channel=None, lock=True):
"""
Parses and executes a set of instructions from the Air API
Arguments:
agent (Agent) - An Agent instance
attempt [int] - The attempt number used for retries
channel [fd] - Comm channel to the worker
lock [bool] - Block the agent instance from performing other operations
"""
if lock:
agent.lock.acquire()
results = []
backoff = attempt * 10
instructions = agent.get_instructions()
if instructions == []:
logging.info('Received no instructions')
agent.identity = agent.get_identity()
agent.unlock()
return True
if instructions is False and attempt <= 3:
logging.warning(f'Failed to fetch instructions on attempt #{attempt}.' + \
f'Retrying in {backoff} seconds...')
sleep(backoff)
return parse_instructions(agent, attempt + 1, channel, lock=False)
if instructions is False:
logging.error('Failed to fetch instructions. Giving up.')
agent.unlock()
return False
for instruction in instructions:
executor = instruction['executor']
if executor == 'init' and not agent.os:
logging.debug('Skipping init instructions due to missing os')
continue
if instruction.get('monitor'):
agent.monitoring = True
threading.Thread(target=agent.monitor, args=(channel,),
kwargs=json.loads(instruction['monitor'])).start()
if executor in executors.EXECUTOR_MAP.keys():
results.append(executors.EXECUTOR_MAP[executor](instruction['data']))
else:
logging.warning(f'Received unsupported executor {executor}')
agent.monitoring = False
if results and all(results):
logging.debug('All instructions executed successfully')
agent.identity = agent.get_identity()
agent.delete_instructions()
agent.unlock()
return True
if results and attempt <= 3:
logging.warning(f'Failed to execute all instructions on attempt #{attempt}. ' + \
f'Retrying in {backoff} seconds...')
sleep(backoff)
return parse_instructions(agent, attempt + 1, channel, lock=False)
if results:
logging.error('Failed to execute all instructions. Giving up.')
agent.unlock()
return False
def restart_ntp():
"""
Restarts any running ntpd or chrony service that might be running. Includes support for
services running in a VRF.
"""
services = subprocess.check_output('systemctl list-units -t service --plain --no-legend',
shell=True)
for line in services.decode('utf-8').split('\n'):
service = line.split(' ')[0]
if re.match(r'(ntp|chrony).*\.service', service):
logging.info(f'Restarting {service}')
subprocess.call(f'systemctl restart {service}', shell=True)
def fix_clock():
"""
Fixes the system's time by 1) syncing the clock from the hypervisor, and
2) Restarting any running NTP/chrony service
"""
try:
logging.info('Syncing clock from hypervisor')
subprocess.run('hwclock -s', shell=True) # sync from hardware
restart_ntp()
except:
logging.debug(traceback.format_exc())
logging.error('Failed to fix clock')
def start_daemon(agent, test=False):
"""
Main worker function. Starts an infinite loop that periodically checks its identity and,
if changed, asks the API for post-clone instructions.
Arguments:
agent (Agent) - An Agent instance
[test] (bool) - Used in unit testing to avoid infinite loop
"""
threading.Thread(target=agent.clock_watch).start()
parse_instructions(agent) # do an initial check for instructions
threading.Thread(target=agent.signal_watch).start()
while True:
same_id = agent.check_identity()
if not same_id:
logging.info('Identity has changed!')
fix_clock()
agent.auto_update()
parse_instructions(agent)
sleep(agent.config.getint('CHECK_INTERVAL'))
if test:
break
def check_devices(config):
"""
Tests for the presence of the /dev/vdb device. If it exists, allow the agent to continue. If it does not
exist, then exit with a success code so the service doesn't fail, but do not start the daemon thread
"""
device = config.get('KEY_DEVICE', '/dev/vdb')
if not os.path.exists(device):
logging.info(f'{device} does not exist - agent will not be started')
return False
if not mount_device(config):
return False
try:
subprocess.run('ls /mnt/air/uuid*', shell=True)
except:
logging.info(f'Failed to find expected files on {device} filesystem - agent will not be started')
logging.debug(traceback.format_exc())
return False
return True
def mount_device(config):
"""
Mounts /dev/vdb to the directory specified in the config file. Unmounts the directory before attempting
the mount to refresh the contents in the event this node was cloned.
"""
device = config.get('KEY_DEVICE', '/dev/vdb')
key_dir = config.get('KEY_DIR', '/mnt/air')
if not os.path.exists(key_dir):
logging.debug(f'{key_dir} does not exist, creating')
try:
os.makedirs(key_dir)
except:
logging.error(f'Failed to create the {key_dir} directory')
logging.debug(traceback.format_exc())
return False
else:
try:
subprocess.run(f'umount {key_dir} 2>/dev/null', shell=True)
except subprocess.CalledProcessError:
logging.debug(f'{key_dir} exists but is not mounted')
try:
subprocess.run(f'mount {device} {key_dir} 2>/dev/null', shell=True)
except:
logging.error(f'Failed to mount {device} to {key_dir}')
logging.debug(traceback.format_exc())
return False
return True
if __name__ == '__main__':
ARGS = parse_args()
CONFIG = Config(ARGS.config_file)
if CONFIG.get('LOG_LEVEL', '').upper() in ('CRITICAL', 'ERROR', 'WARNING', 'INFO', 'DEBUG'):
LOG_LEVEL = CONFIG['LOG_LEVEL'].upper()
else:
LOG_LEVEL = 'WARNING'
LOG_FILE = CONFIG.get('LOG_FILE', '/var/log/air-agent.log')
logging.basicConfig(filename=LOG_FILE, level=LOG_LEVEL,
format='%(asctime)s %(levelname)s %(message)s')
if check_devices(CONFIG):
CONFIG = Config(ARGS.config_file) # reload config in case key_dir was remounted
AGENT = Agent(CONFIG)
logging.info(f'Starting Air Agent daemon v{AGENT_VERSION}')
start_daemon(AGENT)
# the necessary filesystem was not present or config files were missing,
# exit with a success code so the service does not fail
logging.critical('The agent was not started because the necessary files were not found!')
sys.exit(0)
| air_agent-main | agent.py |
# SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
"""
Test utils
"""
import configparser
CONFIG_FILE = 'tests/agent.ini'
def load_config():
"""
Helper function to load the test config file.
Returns:
dict - A dictionary of all loaded config values
"""
config = configparser.ConfigParser()
config.read(CONFIG_FILE)
return config['AGENT']
| air_agent-main | tests/util.py |
air_agent-main | tests/__init__.py |
|
# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
"""
Unit tests for Config module
"""
#pylint: disable=unused-argument,missing-class-docstring,missing-function-docstring
#pylint: disable=arguments-differ,no-self-use,too-many-public-methods,too-many-arguments
import tempfile
from unittest import TestCase
from unittest.mock import patch
import config
class TestConfig(TestCase):
def setUp(self):
self.key = 'TEST_KEY'
self.value = 'testing'
with tempfile.NamedTemporaryFile() as cfg_file:
cfg_file.write(b'[AGENT]\n')
cfg_file.write(f'{self.key}={self.value}\n'.encode('utf-8'))
cfg_file.seek(0)
self.config = config.Config(cfg_file.name)
def test_getitem(self):
self.assertEqual(self.config[self.key], self.value)
def test_getitem_env_override(self):
value = 'foo'
with patch('config.os.getenv', return_value=value) as mock_env:
self.assertEqual(self.config[self.key], value)
mock_env.assert_called_once_with(f'AIR_AGENT_{self.key}', self.value)
| air_agent-main | tests/test_config.py |
# SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
"""
Unit tests for Agent module
"""
#pylint: disable=unused-argument,missing-class-docstring,missing-function-docstring
#pylint: disable=arguments-differ,no-self-use,too-many-public-methods,too-many-arguments
import json
import subprocess
import sys
import threading
from datetime import datetime
from unittest import TestCase
from unittest.mock import call, MagicMock, patch
from cryptography.fernet import Fernet
import agent
import executors
import platform_detect
from agent import Agent
from . import util
class TestAgentIdentity(TestCase):
def setUp(self):
self.config = util.load_config()
@patch('subprocess.run')
@patch('glob.glob', return_value=['./uuid_123.txt'])
@patch('builtins.open')
@patch('agent.parse_instructions')
@patch('agent.fix_clock')
def test_get_identity(self, mock_fix, mock_parse, mock_open, mock_glob, mock_run):
mock_file = MagicMock()
mock_file.read = MagicMock(return_value='ABC\n')
mock_open.return_value.__enter__.return_value = mock_file
agent_obj = Agent(self.config)
res = agent_obj.get_identity()
key_dir = self.config['KEY_DIR']
mock_open.assert_called_with(f'{key_dir}uuid_123.txt')
self.assertEqual(res, 'abc')
@patch('subprocess.run', side_effect=[True, True, True, subprocess.CalledProcessError(1, 'a'),
True])
@patch('logging.debug')
@patch('agent.parse_instructions')
@patch('agent.fix_clock')
@patch('agent.platform_detect.detect', return_value=(None, None))
def test_get_identity_failed_umount(self, mock_detect, mock_fix, mock_parse, mock_log,
mock_run):
agent_obj = Agent(self.config)
res = agent_obj.get_identity()
self.assertIsNone(res)
key_dir = self.config['KEY_DIR']
mock_log.assert_called_with(f'{key_dir} exists but is not mounted')
@patch('subprocess.run', side_effect=[True, True, True, subprocess.CalledProcessError(1, 'a')])
@patch('logging.error')
@patch('agent.parse_instructions')
@patch('agent.fix_clock')
@patch('agent.platform_detect.detect', return_value=(None, None))
def test_get_identity_failed_mount(self, mock_detect, mock_fix, mock_parse, mock_log, mock_run):
agent_obj = Agent(self.config)
res = agent_obj.get_identity()
self.assertIsNone(res)
key_dir = self.config['KEY_DIR']
mock_log.assert_called_with(f'Failed to refresh {key_dir}')
@patch('subprocess.run')
@patch('glob.glob', return_value=[])
@patch('logging.error')
@patch('agent.parse_instructions')
@patch('agent.fix_clock')
def test_get_identity_no_file(self, mock_fix, mock_parse, mock_log, mock_glob, mock_run):
agent_obj = Agent(self.config)
res = agent_obj.get_identity()
self.assertIsNone(res)
mock_log.assert_called_with('Failed to find identity file')
class TestAgent(TestCase):
@patch('agent.parse_instructions')
@patch('agent.fix_clock')
def setUp(self, mock_fix, mock_parse):
self.config = util.load_config()
self.mock_id = MagicMock(return_value='123-456')
Agent.get_identity = self.mock_id
self.agent = Agent(self.config)
def test_init(self):
self.assertEqual(self.agent.config, self.config)
self.mock_id.assert_called()
self.assertEqual(self.agent.identity, '123-456')
self.assertFalse(self.agent.monitoring)
self.assertIsInstance(self.agent.lock, type(threading.Lock()))
self.assertTrue(self.agent.verify_ssl)
@patch('agent.parse_instructions')
@patch('agent.Agent.auto_update')
@patch('agent.fix_clock')
def test_init_no_verify_ssl(self, _mock_parse, _mock_update, _mock_fix):
self.config['VERIFY_SSL'] = 'False'
test_agent = Agent(self.config)
self.assertFalse(test_agent.verify_ssl)
@patch('agent.parse_instructions')
@patch('agent.Agent.auto_update')
@patch('agent.fix_clock')
def test_init_update(self, mock_fix, mock_update, mock_parse):
Agent(self.config)
mock_update.assert_called()
@patch('agent.parse_instructions')
@patch('agent.Agent.auto_update')
@patch('agent.fix_clock')
def test_init_fix_clock(self, mock_fix, mock_update, mock_parse):
Agent(self.config)
mock_fix.assert_called()
@patch('agent.parse_instructions')
@patch('agent.Agent.auto_update')
@patch('agent.fix_clock')
def test_init_redirect(self, _mock_fix, _mock_update, _mock_parse):
self.config['AIR_API'] = 'http://air.cumulusnetworks.com'
test_agent = Agent(self.config)
self.assertEqual(test_agent.config['AIR_API'], 'http://air.nvidia.com')
def test_check_identity(self):
res = self.agent.check_identity()
self.assertTrue(res)
self.mock_id.return_value = '456'
res = self.agent.check_identity()
self.assertFalse(res)
@patch('agent.Path')
@patch('builtins.open')
def test_get_key(self, mock_open, mock_path):
mock_path.is_file = MagicMock(return_value=True)
mock_file = MagicMock()
mock_file.read = MagicMock(return_value='foo\n')
mock_open.return_value.__enter__.return_value = mock_file
res = self.agent.get_key('123-456')
self.assertEqual(res, 'foo')
@patch('agent.Path')
@patch('logging.error')
def test_get_key_failed(self, mock_log, mock_path):
mock_path.return_value.is_file = MagicMock(return_value=False)
res = self.agent.get_key('123-456')
self.assertIsNone(res)
mock_log.assert_called_with('Failed to find decryption key for 123-456')
def test_decrypt_instructions(self):
key = Fernet.generate_key()
crypto = Fernet(key)
self.agent.get_key = MagicMock(return_value=key)
token1 = crypto.encrypt(b'{"instruction": "echo foo"}').decode('utf-8')
token2 = crypto.encrypt(b'{"instruction": "echo bar"}').decode('utf-8')
instructions = [{'instruction': token1}, {'instruction': token2}]
res = self.agent.decrypt_instructions(instructions, '123-456')
self.assertListEqual(res, [{'instruction': 'echo foo'}, {'instruction': 'echo bar'}])
@patch('requests.get')
def test_get_instructions(self, mock_get):
instructions = {'foo': 'bar'}
mock_get.json = MagicMock(return_value={'foo': 'encrypted'})
self.mock_id.return_value = '000-000'
self.agent.decrypt_instructions = MagicMock(return_value=instructions)
res = self.agent.get_instructions()
self.assertEqual(res, instructions)
url = self.config['AIR_API'] + 'simulation-node/000-000/instructions/'
mock_get.assert_called_with(url, timeout=10, verify=self.agent.verify_ssl)
@patch('requests.get', side_effect=Exception)
@patch('logging.error')
def test_get_instructions_failed(self, mock_log, mock_get):
instructions = {'foo': 'bar'}
mock_get.json = MagicMock(return_value={'foo': 'encrypted'})
self.mock_id.return_value = '000-000'
self.agent.decrypt_instructions = MagicMock(return_value=instructions)
res = self.agent.get_instructions()
self.assertEqual(res, False)
mock_log.assert_called_with('Failed to get post-clone instructions')
@patch('agent.Agent.get_identity', return_value=False)
@patch('builtins.Exception')
def test_get_instructions_no_identity(self, mock_exception, mock_identity):
self.agent.get_instructions()
mock_exception.assert_called_with('No identity')
@patch('requests.delete')
def test_delete_instructions(self, mock_delete):
url = self.config['AIR_API'] + f'simulation-node/{self.agent.identity}/instructions/'
self.agent.delete_instructions()
mock_delete.assert_called_with(url, verify=self.agent.verify_ssl)
@patch('requests.delete', side_effect=Exception)
@patch('logging.error')
def test_delete_instructions_failed(self, mock_log, mock_delete):
self.agent.delete_instructions()
mock_log.assert_called_with('Failed to delete post-clone instructions')
@patch('builtins.open')
@patch('agent.parse_instructions', return_value=True)
@patch('agent.sleep')
def test_signal_watch(self, mock_sleep, mock_parse, mock_open):
mock_channel = MagicMock()
mock_channel.readline.return_value = b'123456:checkinst\n'
mock_open.return_value = mock_channel
self.agent.signal_watch(test=True)
mock_open.assert_called_with(self.agent.config['CHANNEL_PATH'], 'wb+', buffering=0)
mock_channel.readline.assert_called()
mock_parse.assert_called_with(self.agent, channel=mock_channel)
mock_channel.write.assert_called_with('123456:success\n'.encode('utf-8'))
mock_sleep.assert_called_with(1)
@patch('builtins.open')
@patch('agent.parse_instructions')
@patch('agent.sleep')
def test_signal_watch_unknown_signal(self, mock_sleep, mock_parse, mock_open):
mock_channel = MagicMock()
mock_channel.readline.return_value = b'123456:foo\n'
mock_open.return_value = mock_channel
self.agent.signal_watch(test=True)
mock_parse.assert_not_called()
mock_channel.write.assert_not_called()
@patch('builtins.open')
@patch('agent.parse_instructions', return_value=False)
@patch('agent.sleep')
def test_signal_watch_error(self, mock_sleep, mock_parse, mock_open):
mock_channel = MagicMock()
mock_channel.readline.return_value = b'123456:checkinst\n'
mock_open.return_value = mock_channel
self.agent.signal_watch(test=True)
mock_channel.write.assert_called_with('123456:error\n'.encode('utf-8'))
@patch('builtins.open', side_effect=Exception('foo'))
@patch('agent.sleep')
def test_signal_watch_exception(self, mock_sleep, mock_open):
self.agent.signal_watch(attempt=3, test=True)
mock_sleep.assert_called_with(30)
@patch('agent.sleep')
@patch('builtins.open')
def test_signal_watch_empty(self, mock_open, *args):
mock_channel = MagicMock()
mock_channel.readline.return_value = b''
mock_open.return_value = mock_channel
self.agent.signal_watch(test=True)
mock_channel.close.assert_not_called()
@patch('agent.parse_instructions', return_value=True)
@patch('agent.sleep')
@patch('subprocess.run')
@patch('builtins.open')
def test_signal_watch_resize(self, mock_open, mock_run, *args):
rows = 10
cols = 20
mock_channel = MagicMock()
mock_channel.readline.return_value = f'123456:resize_{rows}_{cols}\n'.encode('utf-8')
mock_open.return_value = mock_channel
self.agent.signal_watch(test=True)
mock_for_assert = MagicMock()
mock_for_assert(['stty', '-F', '/dev/ttyS0', 'rows', str(rows)], check=False)
mock_for_assert(['stty', '-F', '/dev/ttyS0', 'cols', str(cols)], check=False)
self.assertEqual(mock_run.mock_calls, mock_for_assert.mock_calls)
@patch('builtins.open')
@patch('time.time', return_value=123456.90)
@patch('os.path.exists', side_effect=[False, True])
@patch('time.sleep')
def test_monitor(self, mock_sleep, mock_exists, mock_time, mock_open):
mock_file = MagicMock()
mock_file.readline.return_value = 'bar\n'
mock_open.return_value.__enter__.return_value = mock_file
mock_channel = MagicMock()
self.agent.monitoring = True
self.agent.monitor(mock_channel, file='/tmp/foo', pattern=r'(bar)', test=True)
mock_channel.write.assert_called_with('123456:bar\n'.encode('utf-8'))
mock_sleep.assert_called_with(0.5)
self.assertEqual(mock_sleep.call_count, 2)
@patch('builtins.open')
@patch('time.time', return_value=123456.90)
@patch('os.path.exists', side_effect=[False, True])
@patch('time.sleep')
def test_monitor_no_match(self, mock_sleep, mock_exists, mock_time, mock_open):
mock_file = MagicMock()
mock_file.readline.return_value = 'foo\n'
mock_open.return_value.__enter__.return_value = mock_file
mock_channel = MagicMock()
self.agent.monitoring = True
self.agent.monitor(mock_channel, file='/tmp/foo', pattern=r'(bar)', test=True)
mock_channel.write.assert_not_called()
self.assertEqual(mock_sleep.call_count, 1)
@patch('builtins.open')
def test_monitor_no_file(self, mock_open):
self.agent.monitor(MagicMock())
mock_open.assert_not_called()
@patch('subprocess.check_output', return_value=b' 10000 seconds since 1969')
@patch('agent.datetime')
def test_clock_jumped_past(self, mock_datetime, mock_sub):
mock_datetime.now = MagicMock(return_value=datetime(2020, 2, 1))
mock_datetime.fromtimestamp = datetime.fromtimestamp
res = self.agent.clock_jumped()
self.assertTrue(res)
@patch('subprocess.check_output', return_value=b' 99999999999999 seconds since 1969')
@patch('agent.datetime')
def test_clock_jumped_future(self, mock_datetime, mock_sub):
mock_datetime.now = MagicMock(return_value=datetime(2020, 4, 1))
mock_datetime.fromtimestamp = datetime.fromtimestamp
res = self.agent.clock_jumped()
self.assertTrue(res)
@patch('subprocess.check_output', return_value=b' 1583038800 seconds since 1969')
@patch('agent.datetime')
def test_clock_jumped_no_jump(self, mock_datetime, mock_sub):
mock_datetime.now = MagicMock(return_value=datetime.fromtimestamp(1583038800))
mock_datetime.fromtimestamp = datetime.fromtimestamp
res = self.agent.clock_jumped()
self.assertFalse(res)
@patch('subprocess.check_output', side_effect=Exception)
@patch('agent.datetime')
@patch('logging.warning')
def test_clock_jumped_exception(self, mock_log, mock_datetime, mock_sub):
mock_datetime.now = MagicMock(return_value=datetime(2020, 3, 1))
mock_datetime.fromtimestamp = datetime.fromtimestamp
res = self.agent.clock_jumped()
self.assertTrue(res)
mock_log.assert_called_with('Something went wrong. Syncing clock to be safe...')
@patch('subprocess.check_output', return_value=b'foo')
@patch('builtins.Exception')
def test_clock_jumped_raised(self, mock_exception, mock_sub):
self.agent.clock_jumped()
mock_exception.assert_called_with('Unable to parse hardware clock')
@patch('subprocess.check_output', return_value=b'hwclock from util-linux 2.34.2')
def test_set_hwclock_switch_new(self, mock_output):
self.agent.set_hwclock_switch()
self.assertEqual(self.agent.hwclock_switch, '--verbose')
@patch('subprocess.check_output', return_value=b'hwclock from util-linux 2.31.1')
@patch('logging.info')
def test_set_hwclock_switch_old(self, mock_log, mock_output):
self.agent.set_hwclock_switch()
self.assertEqual(self.agent.hwclock_switch, '--debug')
mock_log.assert_not_called()
@patch('subprocess.check_output', return_value=b'foo')
@patch('logging.info')
def test_set_hwclock_switch_fallback(self, mock_log, mock_output):
self.agent.set_hwclock_switch()
self.assertEqual(self.agent.hwclock_switch, '--debug')
mock_log.assert_called_with('Failed to detect hwclock switch, falling back to --debug')
@patch('requests.get')
@patch('logging.debug')
def test_auto_update_disabled(self, mock_log, mock_get):
self.agent.auto_update()
mock_get.assert_not_called()
mock_log.assert_called_with('Auto update is disabled')
@patch('requests.get')
@patch('agent.AGENT_VERSION', '1.4.3')
@patch('shutil.rmtree')
@patch('git.Repo.clone_from')
@patch('os.getcwd', return_value='/tmp/foo')
@patch('os.listdir', return_value=['test.txt', 'test.py'])
@patch('shutil.move')
@patch('os.execv')
@patch('agent.parse_instructions')
@patch('agent.fix_clock')
def test_auto_update(self, mock_fix, mock_parse, mock_exec, mock_move, mock_ls, mock_cwd,
mock_clone, mock_rm, mock_get):
mock_get.return_value.text = 'AGENT_VERSION = \'2.0.0\'\n'
testagent = Agent(self.config)
testagent.config['AUTO_UPDATE'] = 'True'
testagent.auto_update()
mock_get.assert_called_with(self.config['VERSION_URL'])
mock_rm.assert_called_with('/tmp/air-agent')
mock_clone.assert_called_with(self.config['GIT_URL'], '/tmp/air-agent',
branch=self.config['GIT_BRANCH'])
mock_move.assert_called_with('/tmp/air-agent/test.py', '/tmp/foo/test.py')
mock_exec.assert_called_with(sys.executable, ['python3'] + sys.argv)
@patch('requests.get')
@patch('agent.AGENT_VERSION', '1.4.3')
@patch('git.Repo.clone_from')
@patch('agent.parse_instructions')
@patch('logging.debug')
@patch('agent.fix_clock')
def test_auto_update_latest(self, mock_fix, mock_log, mock_parse, mock_clone, mock_get):
mock_get.return_value.text = 'AGENT_VERSION = \'1.4.3\'\n'
testagent = Agent(self.config)
testagent.config['AUTO_UPDATE'] = 'True'
testagent.auto_update()
mock_clone.assert_not_called()
mock_log.assert_called_with('Already running the latest version')
@patch('requests.get', side_effect=Exception('foo'))
@patch('agent.AGENT_VERSION', '1.4.3')
@patch('git.Repo.clone_from')
@patch('agent.parse_instructions')
@patch('logging.error')
@patch('agent.fix_clock')
def test_auto_update_check_fail(self, mock_fix, mock_log, mock_parse, mock_clone, mock_get):
testagent = Agent(self.config)
testagent.config['AUTO_UPDATE'] = 'True'
testagent.auto_update()
mock_clone.assert_not_called()
mock_log.assert_called_with('Failed to check for updates: foo')
@patch('requests.get')
@patch('agent.AGENT_VERSION', '1.4.3')
@patch('shutil.rmtree', side_effect=Exception('foo'))
@patch('git.Repo.clone_from')
@patch('os.getcwd', return_value='/tmp/foo')
@patch('os.listdir', return_value=['test.txt', 'test.py'])
@patch('shutil.move')
@patch('os.execv')
@patch('agent.parse_instructions')
@patch('agent.fix_clock')
def test_auto_update_rm_safe(self, mock_fix, mock_parse, mock_exec, mock_move, mock_ls,
mock_cwd, mock_clone, mock_rm, mock_get):
mock_get.return_value.text = 'AGENT_VERSION = \'2.0.0\'\n'
testagent = Agent(self.config)
testagent.config['AUTO_UPDATE'] = 'True'
testagent.auto_update()
mock_get.assert_called_with(self.config['VERSION_URL'])
mock_rm.assert_called_with('/tmp/air-agent')
mock_clone.assert_called_with(self.config['GIT_URL'], '/tmp/air-agent',
branch=self.config['GIT_BRANCH'])
mock_move.assert_called_with('/tmp/air-agent/test.py', '/tmp/foo/test.py')
mock_exec.assert_called_with(sys.executable, ['python3'] + sys.argv)
@patch('requests.get')
@patch('agent.AGENT_VERSION', '1.4.3')
@patch('shutil.rmtree')
@patch('git.Repo.clone_from', side_effect=Exception('foo'))
@patch('os.getcwd', return_value='/tmp/foo')
@patch('os.listdir', return_value=['test.txt', 'test.py'])
@patch('shutil.move')
@patch('os.execv')
@patch('agent.parse_instructions')
@patch('logging.error')
@patch('agent.fix_clock')
def test_auto_update_error(self, mock_fix, mock_log, mock_parse, mock_exec, mock_move, mock_ls,
mock_cwd, mock_clone, mock_rm, mock_get):
mock_get.return_value.text = 'AGENT_VERSION = \'2.0.0\'\n'
testagent = Agent(self.config)
testagent.config['AUTO_UPDATE'] = 'True'
testagent.auto_update()
mock_exec.assert_not_called()
mock_log.assert_called_with('Failed to update agent: foo')
@patch('agent.Agent.clock_jumped', return_value=True)
@patch('agent.fix_clock')
@patch('agent.sleep')
def test_clock_watch(self, mock_sleep, mock_fix, mock_jump):
self.agent.clock_watch(test=True)
mock_fix.assert_called()
mock_sleep.assert_called_with(self.agent.config.getint('CHECK_INTERVAL') + 300)
@patch('agent.Agent.clock_jumped', return_value=False)
@patch('agent.fix_clock')
@patch('agent.sleep')
def test_clock_watch_no_jump(self, mock_sleep, mock_fix, mock_jump):
self.agent.clock_watch(test=True)
mock_fix.assert_not_called()
mock_sleep.assert_called_with(self.agent.config.getint('CHECK_INTERVAL'))
def test_unlock(self):
self.agent.lock.acquire()
self.agent.unlock()
self.assertFalse(self.agent.lock.locked())
def test_unlock_pass(self):
self.agent.unlock()
self.assertFalse(self.agent.lock.locked())
class TestAgentFunctions(TestCase):
class MockConfigParser(dict):
def __init__(self):
super().__init__()
self.read = MagicMock()
def setUp(self):
self.config = util.load_config()
self.mock_parse = self.MockConfigParser()
@patch('argparse.ArgumentParser')
def test_parse_args(self, mock_argparse):
default_config_file = '/mnt/air/agent.ini'
mock_parser = MagicMock()
mock_argparse.return_value = mock_parser
mock_parser.add_argument = MagicMock()
mock_parser.parse_args.return_value = 'foo'
res = agent.parse_args()
year = datetime.now().year
mock_argparse.assert_called_with(description='Air Agent service ' + \
f'(NVIDIA © {year})')
mock_parser.add_argument \
.assert_called_with('-c', '--config-file', default=default_config_file,
help='Location of the service\'s config file. ' + \
'Normally this will be injected automatically by the Air platform ' + \
f'(default: {default_config_file})')
self.assertEqual(res, 'foo')
@patch('agent.executors')
@patch('agent.sleep')
@patch('agent.Agent.get_instructions', return_value=[{'data': 'foo', 'executor': 'shell',
'monitor': None}])
@patch('threading.Thread')
@patch('agent.Agent.auto_update')
@patch('agent.fix_clock')
def test_start_daemon(self, mock_fix, mock_update, mock_threading, mock_parse, mock_sleep,
mock_exec):
mock_signal_thread = MagicMock()
mock_clock_thread = MagicMock()
mock_threading.side_effect = [mock_signal_thread, mock_clock_thread]
mock_exec.EXECUTOR_MAP = {'shell': MagicMock()}
Agent.get_identity = MagicMock(return_value='123-456')
agent_obj = Agent(self.config)
agent_obj.check_identity = MagicMock(return_value=False)
agent_obj.delete_instructions = MagicMock()
agent.start_daemon(agent_obj, test=True)
mock_exec.EXECUTOR_MAP['shell'].assert_called_with('foo')
agent_obj.delete_instructions.assert_called()
mock_sleep.assert_called_with(self.config.getint('CHECK_INTERVAL'))
mock_for_assert = MagicMock()
mock_for_assert(target=agent_obj.clock_watch)
mock_for_assert(target=agent_obj.signal_watch)
self.assertEqual(mock_threading.mock_calls, mock_for_assert.mock_calls)
mock_signal_thread.start.assert_called()
mock_clock_thread.start.assert_called()
mock_update.assert_called()
@patch('agent.executors')
@patch('agent.sleep')
@patch('agent.parse_instructions')
@patch('threading.Thread')
@patch('agent.fix_clock')
def test_start_daemon_no_change(self, mock_fix, mock_threading, mock_parse, mock_sleep,
mock_exec):
Agent.get_identity = MagicMock(return_value='123-456')
agent_obj = Agent(self.config)
agent_obj.get_instructions = MagicMock()
agent_obj.check_identity = MagicMock(return_value=True)
agent.start_daemon(agent_obj, test=True)
agent_obj.get_instructions.assert_not_called()
mock_parse.assert_called()
@patch('agent.executors')
@patch('agent.sleep')
@patch('agent.Agent.get_instructions', return_value=[{'data': 'foo', 'executor': 'shell',
'monitor': None}])
@patch('threading.Thread')
@patch('agent.fix_clock')
def test_start_daemon_command_failed(self, mock_fix, mock_threading, mock_parse, mock_sleep,
mock_exec):
mock_exec.EXECUTOR_MAP = {'shell': MagicMock(return_value=False)}
Agent.get_identity = MagicMock(return_value='123-456')
agent_obj = Agent(self.config)
agent_obj.check_identity = MagicMock(return_value=False)
agent_obj.identity = '000-000'
agent_obj.delete_instructions = MagicMock()
agent.start_daemon(agent_obj, test=True)
mock_exec.EXECUTOR_MAP['shell'].assert_called_with('foo')
agent_obj.delete_instructions.assert_not_called()
self.assertEqual(agent_obj.identity, '000-000')
@patch('agent.executors')
def test_parse_instructions(self, mock_exec):
mock_exec.EXECUTOR_MAP = {'shell': MagicMock(side_effect=[1, 2])}
mock_agent = MagicMock()
mock_agent.get_instructions.return_value = [
{'executor': 'shell', 'data': 'foo', 'monitor': None},
{'executor': 'shell', 'data': 'bar', 'monitor': None}
]
mock_agent.delete_instructions = MagicMock()
mock_agent.identity = 'xzy'
mock_agent.get_identity = MagicMock(return_value='abc')
agent.parse_instructions(mock_agent)
mock_agent.delete_instructions.assert_called()
mock_for_assert = MagicMock()
mock_for_assert('foo')
mock_for_assert('bar')
self.assertEqual(mock_exec.EXECUTOR_MAP['shell'].mock_calls, mock_for_assert.mock_calls)
self.assertEqual(mock_agent.identity, 'abc')
mock_agent.lock.acquire.assert_called()
mock_agent.unlock.assert_called()
@patch('agent.executors')
@patch('logging.warning')
def test_parse_instructions_unsupported(self, mock_log, mock_exec):
mock_exec.EXECUTOR_MAP = {'shell': MagicMock(side_effect=[1, 2])}
mock_agent = MagicMock()
mock_agent.get_instructions.return_value = [{'executor': 'test', 'data': 'foo',
'monitor': None}]
agent.parse_instructions(mock_agent)
mock_log.assert_called_with('Received unsupported executor test')
@patch('agent.sleep')
def test_parse_instructions_retry(self, mock_sleep):
mock_agent = MagicMock()
mock_agent.get_instructions.side_effect = [False, []]
res = agent.parse_instructions(mock_agent, attempt=3)
mock_sleep.assert_called_with(30)
self.assertEqual(mock_agent.get_instructions.call_count, 2)
self.assertTrue(res)
mock_agent.lock.acquire.assert_called()
mock_agent.unlock.assert_called()
@patch('agent.sleep')
def test_parse_instructions_failed(self, mock_sleep):
mock_agent = MagicMock()
mock_agent.get_instructions.side_effect = [False, False]
res = agent.parse_instructions(mock_agent, attempt=3)
mock_sleep.assert_called_with(30)
self.assertEqual(mock_agent.get_instructions.call_count, 2)
self.assertFalse(res)
mock_agent.lock.acquire.assert_called()
mock_agent.unlock.assert_called()
@patch('agent.executors')
@patch('logging.warning')
@patch('agent.sleep')
def test_parse_instructions_cmd_failed(self, mock_sleep, mock_log, mock_exec):
mock_exec.EXECUTOR_MAP = {'shell': MagicMock(side_effect=[False, False, True])}
mock_agent = MagicMock()
mock_agent.get_instructions.return_value = [{'executor': 'shell', 'data': 'foo',
'monitor': None}]
mock_agent.get_identity = MagicMock(return_value='abc')
agent.parse_instructions(mock_agent)
assert_logs = MagicMock()
assert_logs.warning('Failed to execute all instructions on attempt #1. ' + \
'Retrying in 10 seconds...')
assert_logs.warning('Failed to execute all instructions on attempt #2. ' + \
'Retrying in 20 seconds...')
self.assertEqual(mock_log.mock_calls, assert_logs.mock_calls)
assert_sleep = MagicMock()
assert_sleep(10)
assert_sleep(20)
self.assertEqual(mock_sleep.mock_calls, assert_sleep.mock_calls)
mock_agent.get_identity.assert_called()
mock_agent.lock.acquire.assert_called()
mock_agent.unlock.assert_called()
@patch('agent.executors')
@patch('logging.error')
@patch('agent.sleep')
def test_parse_instructions_all_cmd_failed(self, mock_sleep, mock_log, mock_exec):
mock_exec.EXECUTOR_MAP = {'shell': MagicMock(return_value=False)}
mock_agent = MagicMock()
mock_agent.get_instructions.return_value = [{'executor': 'shell', 'data': 'foo',
'monitor': None}]
mock_agent.get_identity = MagicMock(return_value='abc')
agent.parse_instructions(mock_agent)
assert_sleep = MagicMock()
assert_sleep(10)
assert_sleep(20)
assert_sleep(30)
self.assertEqual(mock_sleep.mock_calls, assert_sleep.mock_calls)
mock_agent.get_identity.assert_not_called()
mock_log.assert_called_with('Failed to execute all instructions. Giving up.')
mock_agent.lock.acquire.assert_called()
mock_agent.unlock.assert_called()
@patch('agent.executors')
@patch('threading.Thread')
def test_parse_instructions_monitor(self, mock_thread_class, mock_exec):
mock_thread = MagicMock()
mock_thread_class.return_value = mock_thread
monitor_str = '{"file": "/tmp/foo", "pattern": "bar"}'
mock_exec.EXECUTOR_MAP = {'shell': MagicMock(side_effect=[1, 2])}
mock_agent = MagicMock()
mock_agent.get_instructions.return_value = [{'executor': 'shell', 'data': 'foo',
'monitor': monitor_str}]
mock_channel = MagicMock()
agent.parse_instructions(mock_agent, channel=mock_channel)
mock_thread_class.assert_called_with(target=mock_agent.monitor, args=(mock_channel,),
kwargs=json.loads(monitor_str))
mock_thread.start.assert_called()
mock_agent.lock.acquire.assert_called()
mock_agent.unlock.assert_called()
def test_parse_instructions_none(self):
mock_agent = MagicMock()
mock_agent.identity = 'abc'
mock_agent.get_instructions = MagicMock(return_value=[])
mock_agent.get_identity = MagicMock(return_value='foo')
res = agent.parse_instructions(mock_agent)
self.assertTrue(res)
self.assertEqual(mock_agent.identity, 'foo')
mock_agent.lock.acquire.assert_called()
mock_agent.unlock.assert_called()
@patch('agent.executors')
@patch('logging.debug')
def test_parse_instructions_os_none(self, mock_log, mock_exec):
mock_exec.EXECUTOR_MAP = {'init': MagicMock(side_effect=[1, 2])}
mock_agent = MagicMock()
mock_agent.get_instructions.return_value = [
{'executor': 'init', 'data': '{"hostname": "test"}', 'monitor': None}
]
mock_agent.os = None
agent.parse_instructions(mock_agent)
mock_log.assert_called_with('Skipping init instructions due to missing os')
@patch('subprocess.check_output',
return_value=b'ntp.service\nfoo.service\[email protected]\nchrony.service')
@patch('subprocess.call')
def test_restart_ntp(self, mock_call, mock_check):
mock_for_assert = MagicMock()
mock_for_assert('systemctl restart ntp.service', shell=True)
mock_for_assert('systemctl restart [email protected]', shell=True)
mock_for_assert('systemctl restart chrony.service', shell=True)
agent.restart_ntp()
self.assertEqual(mock_call.mock_calls, mock_for_assert.mock_calls)
@patch('subprocess.run')
@patch('agent.restart_ntp')
@patch('agent.datetime')
def test_fix_clock(self, mock_datetime, mock_ntp, mock_run):
mock_datetime.now = MagicMock(return_value=datetime(2020, 3, 2))
agent.fix_clock()
mock_run.assert_called_with('hwclock -s', shell=True)
mock_ntp.assert_called()
@patch('subprocess.run', side_effect=Exception)
@patch('logging.error')
def test_fix_clock_failed(self, mock_log, mock_run):
agent.fix_clock()
mock_log.assert_called_with('Failed to fix clock')
@patch('os.path.exists')
@patch('subprocess.run')
def test_check_devices(self, mock_run, mock_exists):
res = agent.check_devices(self.config)
self.assertTrue(res)
@patch('os.path.exists', return_value=False)
@patch('logging.info')
def test_check_devices_path_does_not_exist(self, mock_log, mock_exists):
res = agent.check_devices(self.config)
self.assertFalse(res)
mock_log.assert_called_with(f'{self.config["key_device"]} does not exist - agent will not be started')
@patch('os.path.exists')
@patch('agent.mount_device', return_value=False)
def test_check_devices_mount_failed(self, mock_mount, mock_exists):
res = agent.check_devices(self.config)
self.assertFalse(res)
@patch('os.path.exists')
@patch('agent.mount_device')
@patch('subprocess.run', side_effect=Exception)
@patch('logging.info')
def test_check_devices_ls_failed(self, mock_log, mock_run, mock_mount, mock_exists):
res = agent.check_devices(self.config)
self.assertFalse(res)
mock_log.assert_called_with(f'Failed to find expected files on {self.config["key_device"]} ' + \
'filesystem - agent will not be started')
@patch('os.path.exists')
@patch('subprocess.run')
def test_mount_device(self, mock_run, mock_exists):
res = agent.mount_device(self.config)
self.assertTrue(res)
mock_run.assert_called_with(f'mount {self.config["key_device"]} {self.config["key_dir"]} 2>/dev/null',
shell=True)
@patch('os.path.exists', return_value=False)
@patch('os.makedirs')
@patch('subprocess.run')
@patch('logging.debug')
def test_mount_device_directory_does_not_exist(self, mock_log, mock_run, mock_dir, mock_exists):
res = agent.mount_device(self.config)
self.assertTrue(res)
mock_log.assert_called_with(f'{self.config["key_dir"]} does not exist, creating')
mock_dir.assert_called_with(self.config['key_dir'])
@patch('os.path.exists')
@patch('subprocess.run')
def test_mount_device_directory_exists(self, mock_run, mock_exists):
res = agent.mount_device(self.config)
self.assertTrue(res)
mock_run.assert_has_calls([call(f'umount {self.config["key_dir"]} 2>/dev/null', shell=True),
call(f'mount {self.config["key_device"]} {self.config["key_dir"]} ' + \
'2>/dev/null', shell=True)])
@patch('os.path.exists', return_value = False)
@patch('os.makedirs', side_effect=Exception)
@patch('logging.error')
def test_mount_device_directory_create_failed(self, mock_log, mock_dir, mock_exists):
res = agent.mount_device(self.config)
self.assertFalse(res)
mock_log.assert_called_with(f'Failed to create the {self.config["key_dir"]} directory')
@patch('os.path.exists')
@patch('subprocess.run')
@patch('logging.debug')
def test_mount_device_unmount_failed(self, mock_log, mock_run, mock_exists):
cmd2 = MagicMock()
mock_run.side_effect = [subprocess.CalledProcessError(1, 'a'), cmd2]
res = agent.mount_device(self.config)
self.assertTrue(res)
mock_log.assert_called_with(f'{self.config["key_dir"]} exists but is not mounted')
@patch('os.path.exists')
@patch('subprocess.run')
@patch('logging.error')
def test_mount_device_mount_failed(self, mock_log, mock_run, mock_exists):
cmd1 = MagicMock()
mock_run.side_effect = [cmd1, subprocess.CalledProcessError(1, 'a')]
res = agent.mount_device(self.config)
self.assertFalse(res)
mock_log.assert_called_with(f'Failed to mount {self.config["key_device"]} to ' + \
f'{self.config["key_dir"]}')
class TestExecutors(TestCase):
@patch('subprocess.run')
def test_shell(self, mock_run):
res = executors.shell('foo\nbar')
self.assertTrue(res)
self.assertEqual(mock_run.call_count, 2)
@patch('subprocess.run', side_effect=Exception)
@patch('logging.error')
def test_shell_failed(self, mock_log, mock_run):
res = executors.shell('foo\nbar\n')
self.assertFalse(res)
mock_log.assert_called_with('Command `foo` failed')
@patch('builtins.open')
@patch('subprocess.run')
def test_file(self, mock_run, mock_open):
outfile = MagicMock()
outfile.write = MagicMock()
mock_open.return_value.__enter__.return_value = outfile
res = executors.file('{"/tmp/foo.txt": "bar", "post_cmd": ["cat /tmp/foo.txt"]}')
self.assertTrue(res)
mock_open.assert_called_with('/tmp/foo.txt', 'w')
outfile.write.assert_called_with('bar')
mock_run.assert_called_with('cat /tmp/foo.txt', shell=True, check=True)
@patch('builtins.open')
@patch('subprocess.run')
def test_file_cmd_string(self, mock_run, mock_open):
outfile = MagicMock()
outfile.write = MagicMock()
mock_open.return_value.__enter__.return_value = outfile
res = executors.file('{"/tmp/foo.txt": "bar", "post_cmd": "cat /tmp/foo.txt"}')
self.assertTrue(res)
mock_open.assert_called_with('/tmp/foo.txt', 'w')
outfile.write.assert_called_with('bar')
mock_run.assert_called_with('cat /tmp/foo.txt', shell=True, check=True)
@patch('builtins.open', side_effect=Exception)
@patch('logging.error')
@patch('subprocess.run')
def test_file_write_failed(self, mock_run, mock_log, mock_open):
res = executors.file('{"/tmp/foo.txt": "bar", "post_cmd": ["cat /tmp/foo.txt"]}')
self.assertFalse(res)
mock_log.assert_called_with('Failed to write /tmp/foo.txt')
@patch('builtins.open')
@patch('subprocess.run', side_effect=Exception)
@patch('logging.error')
def test_file_cmd_failed(self, mock_log, mock_run, mock_open):
outfile = MagicMock()
outfile.write = MagicMock()
mock_open.return_value.__enter__.return_value = outfile
res = executors.file('{"/tmp/foo.txt": "bar", "post_cmd": ["cat /tmp/foo.txt"]}')
self.assertFalse(res)
mock_log.assert_called_with('post_cmd `cat /tmp/foo.txt` failed')
@patch('logging.error')
def test_file_json_parse_failed(self, mock_log):
data = '{"/tmp/foo.txt": "bar", "post_cmd": [cat /tmp/foo.txt"]}'
res = executors.file(data)
self.assertFalse(res)
mock_log.assert_called_with('Failed to decode instructions as JSON: ' + \
'Expecting value: line 1 column 38 (char 37)')
@patch('executors.shell')
def test_init(self, mock_shell):
res = executors.init('{"hostname": "test"}')
self.assertTrue(res)
@patch('logging.error')
def test_init_json_parse_failed(self, mock_log):
res = executors.init('string')
mock_log.assert_called_with('Failed to decode init data as JSON: ' + \
'Expecting value: line 1 column 1 (char 0)')
self.assertFalse(res)
class TestPlatformDetect(TestCase):
@patch('subprocess.run')
def test_detect(self, mock_exec):
cmd1 = MagicMock()
cmd1.stdout = b'Distributor ID:\tUbuntu\n'
cmd2 = MagicMock()
cmd2.stdout = b'Release:\t20.04\n'
mock_exec.side_effect = [cmd1, cmd2]
res = platform_detect.detect()
self.assertEqual(res, ('Ubuntu', '20.04'))
mock_for_assert = MagicMock()
mock_for_assert(['lsb_release', '-i'], check=True, stdout=subprocess.PIPE)
mock_for_assert(['lsb_release', '-r'], check=True, stdout=subprocess.PIPE)
self.assertEqual(mock_exec.mock_calls, mock_for_assert.mock_calls)
@patch('subprocess.run', side_effect=Exception)
@patch('logging.warning')
def test_detect_fail_os(self, mock_log, mock_exec):
os, release = platform_detect.detect()
mock_log.assert_called_with('Platform detection failed to determine OS')
self.assertIsNone(os)
self.assertIsNone(release)
@patch('subprocess.run')
@patch('logging.warning')
def test_detect_fail_release(self, mock_log, mock_exec):
os_str = b'Ubuntu'
cmd1 = MagicMock()
cmd1.stdout = b'Distributor ID:\t' + os_str + b'\n'
mock_exec.side_effect = [cmd1, Exception]
os, release = platform_detect.detect()
mock_log.assert_called_with('Platform detection failed to determine Release')
self.assertEqual(os, os_str.decode())
self.assertIsNone(release)
| air_agent-main | tests/tests.py |
# -*- coding: utf-8 -*-
#
# The Linux Kernel documentation build configuration file, created by
# sphinx-quickstart on Fri Feb 12 13:51:46 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import sphinx
import shutil
# helper
# ------
def have_command(cmd):
"""Search ``cmd`` in the ``PATH`` environment.
If found, return True.
If not found, return False.
"""
return shutil.which(cmd) is not None
# Get Sphinx version
major, minor, patch = sphinx.version_info[:3]
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('sphinx'))
from load_config import loadConfig
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = '1.7'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['kerneldoc', 'rstFlatTable', 'kernel_include',
'kfigure', 'sphinx.ext.ifconfig', 'automarkup',
'maintainers_include', 'sphinx.ext.autosectionlabel',
'kernel_abi', 'kernel_feat']
if major >= 3:
if (major > 3) or (minor > 0 or patch >= 2):
# Sphinx c function parser is more pedantic with regards to type
# checking. Due to that, having macros at c:function cause problems.
# Those needed to be scaped by using c_id_attributes[] array
c_id_attributes = [
# GCC Compiler types not parsed by Sphinx:
"__restrict__",
# include/linux/compiler_types.h:
"__iomem",
"__kernel",
"noinstr",
"notrace",
"__percpu",
"__rcu",
"__user",
# include/linux/compiler_attributes.h:
"__alias",
"__aligned",
"__aligned_largest",
"__always_inline",
"__assume_aligned",
"__cold",
"__attribute_const__",
"__copy",
"__pure",
"__designated_init",
"__visible",
"__printf",
"__scanf",
"__gnu_inline",
"__malloc",
"__mode",
"__no_caller_saved_registers",
"__noclone",
"__nonstring",
"__noreturn",
"__packed",
"__pure",
"__section",
"__always_unused",
"__maybe_unused",
"__used",
"__weak",
"noinline",
"__fix_address",
# include/linux/memblock.h:
"__init_memblock",
"__meminit",
# include/linux/init.h:
"__init",
"__ref",
# include/linux/linkage.h:
"asmlinkage",
]
else:
extensions.append('cdomain')
# Ensure that autosectionlabel will produce unique names
autosectionlabel_prefix_document = True
autosectionlabel_maxdepth = 2
# Load math renderer:
# For html builder, load imgmath only when its dependencies are met.
# mathjax is the default math renderer since Sphinx 1.8.
have_latex = have_command('latex')
have_dvipng = have_command('dvipng')
load_imgmath = have_latex and have_dvipng
# Respect SPHINX_IMGMATH (for html docs only)
if 'SPHINX_IMGMATH' in os.environ:
env_sphinx_imgmath = os.environ['SPHINX_IMGMATH']
if 'yes' in env_sphinx_imgmath:
load_imgmath = True
elif 'no' in env_sphinx_imgmath:
load_imgmath = False
else:
sys.stderr.write("Unknown env SPHINX_IMGMATH=%s ignored.\n" % env_sphinx_imgmath)
# Always load imgmath for Sphinx <1.8 or for epub docs
load_imgmath = (load_imgmath or (major == 1 and minor < 8)
or 'epub' in sys.argv)
if load_imgmath:
extensions.append("sphinx.ext.imgmath")
math_renderer = 'imgmath'
else:
math_renderer = 'mathjax'
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'The Linux Kernel'
copyright = 'The kernel development community'
author = 'The kernel development community'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# In a normal build, version and release are are set to KERNELVERSION and
# KERNELRELEASE, respectively, from the Makefile via Sphinx command line
# arguments.
#
# The following code tries to extract the information by reading the Makefile,
# when Sphinx is run directly (e.g. by Read the Docs).
try:
makefile_version = None
makefile_patchlevel = None
for line in open('../Makefile'):
key, val = [x.strip() for x in line.split('=', 2)]
if key == 'VERSION':
makefile_version = val
elif key == 'PATCHLEVEL':
makefile_patchlevel = val
if makefile_version and makefile_patchlevel:
break
except:
pass
finally:
if makefile_version and makefile_patchlevel:
version = release = makefile_version + '.' + makefile_patchlevel
else:
version = release = "unknown version"
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = 'en'
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['output']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
primary_domain = 'c'
highlight_language = 'none'
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
# Default theme
html_theme = 'sphinx_rtd_theme'
html_css_files = []
if "DOCS_THEME" in os.environ:
html_theme = os.environ["DOCS_THEME"]
if html_theme == 'sphinx_rtd_theme' or html_theme == 'sphinx_rtd_dark_mode':
# Read the Docs theme
try:
import sphinx_rtd_theme
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_css_files = [
'theme_overrides.css',
]
# Read the Docs dark mode override theme
if html_theme == 'sphinx_rtd_dark_mode':
try:
import sphinx_rtd_dark_mode
extensions.append('sphinx_rtd_dark_mode')
except ImportError:
html_theme == 'sphinx_rtd_theme'
if html_theme == 'sphinx_rtd_theme':
# Add color-specific RTD normal mode
html_css_files.append('theme_rtd_colors.css')
except ImportError:
html_theme = 'classic'
if "DOCS_CSS" in os.environ:
css = os.environ["DOCS_CSS"].split(" ")
for l in css:
html_css_files.append(l)
if major <= 1 and minor < 8:
html_context = {
'css_files': [],
}
for l in html_css_files:
html_context['css_files'].append('_static/' + l)
if html_theme == 'classic':
html_theme_options = {
'rightsidebar': False,
'stickysidebar': True,
'collapsiblesidebar': True,
'externalrefs': False,
'footerbgcolor': "white",
'footertextcolor': "white",
'sidebarbgcolor': "white",
'sidebarbtncolor': "black",
'sidebartextcolor': "black",
'sidebarlinkcolor': "#686bff",
'relbarbgcolor': "#133f52",
'relbartextcolor': "white",
'relbarlinkcolor': "white",
'bgcolor': "white",
'textcolor': "black",
'headbgcolor': "#f2f2f2",
'headtextcolor': "#20435c",
'headlinkcolor': "#c60f0f",
'linkcolor': "#355f7c",
'visitedlinkcolor': "#355f7c",
'codebgcolor': "#3f3f3f",
'codetextcolor': "white",
'bodyfont': "serif",
'headfont': "sans-serif",
}
sys.stderr.write("Using %s theme\n" % html_theme)
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['sphinx-static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
html_use_smartypants = False
# Custom sidebar templates, maps document names to template names.
# Note that the RTD theme ignores this.
html_sidebars = { '**': ['searchbox.html', 'localtoc.html', 'sourcelink.html']}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'TheLinuxKerneldoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
'papersize': 'a4paper',
# The font size ('10pt', '11pt' or '12pt').
'pointsize': '11pt',
# Latex figure (float) alignment
#'figure_align': 'htbp',
# Don't mangle with UTF-8 chars
'inputenc': '',
'utf8extra': '',
# Set document margins
'sphinxsetup': '''
hmargin=0.5in, vmargin=1in,
parsedliteralwraps=true,
verbatimhintsturnover=false,
''',
# For CJK One-half spacing, need to be in front of hyperref
'extrapackages': r'\usepackage{setspace}',
# Additional stuff for the LaTeX preamble.
'preamble': '''
% Use some font with UTF-8 support with XeLaTeX
\\usepackage{fontspec}
\\setsansfont{DejaVu Sans}
\\setromanfont{DejaVu Serif}
\\setmonofont{DejaVu Sans Mono}
''',
}
# Fix reference escape troubles with Sphinx 1.4.x
if major == 1:
latex_elements['preamble'] += '\\renewcommand*{\\DUrole}[2]{ #2 }\n'
# Load kerneldoc specific LaTeX settings
latex_elements['preamble'] += '''
% Load kerneldoc specific LaTeX settings
\\input{kerneldoc-preamble.sty}
'''
# With Sphinx 1.6, it is possible to change the Bg color directly
# by using:
# \definecolor{sphinxnoteBgColor}{RGB}{204,255,255}
# \definecolor{sphinxwarningBgColor}{RGB}{255,204,204}
# \definecolor{sphinxattentionBgColor}{RGB}{255,255,204}
# \definecolor{sphinximportantBgColor}{RGB}{192,255,204}
#
# However, it require to use sphinx heavy box with:
#
# \renewenvironment{sphinxlightbox} {%
# \\begin{sphinxheavybox}
# }
# \\end{sphinxheavybox}
# }
#
# Unfortunately, the implementation is buggy: if a note is inside a
# table, it isn't displayed well. So, for now, let's use boring
# black and white notes.
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
# Sorted in alphabetical order
latex_documents = [
]
# Add all other index files from Documentation/ subdirectories
for fn in os.listdir('.'):
doc = os.path.join(fn, "index")
if os.path.exists(doc + ".rst"):
has = False
for l in latex_documents:
if l[0] == doc:
has = True
break
if not has:
latex_documents.append((doc, fn + '.tex',
'Linux %s Documentation' % fn.capitalize(),
'The kernel development community',
'manual'))
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# Additional LaTeX stuff to be copied to build directory
latex_additional_files = [
'sphinx/kerneldoc-preamble.sty',
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'thelinuxkernel', 'The Linux Kernel Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'TheLinuxKernel', 'The Linux Kernel Documentation',
author, 'TheLinuxKernel', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# -- Options for Epub output ----------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
epub_author = author
epub_publisher = author
epub_copyright = copyright
# The basename for the epub file. It defaults to the project name.
#epub_basename = project
# The HTML theme for the epub output. Since the default themes are not
# optimized for small screen space, using the same theme for HTML and epub
# output is usually not wise. This defaults to 'epub', a theme designed to save
# visual space.
#epub_theme = 'epub'
# The language of the text. It defaults to the language option
# or 'en' if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#epub_cover = ()
# A sequence of (type, uri, title) tuples for the guide element of content.opf.
#epub_guide = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files that should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
# Choose between 'default' and 'includehidden'.
#epub_tocscope = 'default'
# Fix unsupported image types using the Pillow.
#epub_fix_images = False
# Scale large images.
#epub_max_image_width = 0
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#epub_show_urls = 'inline'
# If false, no index is generated.
#epub_use_index = True
#=======
# rst2pdf
#
# Grouping the document tree into PDF files. List of tuples
# (source start file, target name, title, author, options).
#
# See the Sphinx chapter of https://ralsina.me/static/manual.pdf
#
# FIXME: Do not add the index file here; the result will be too big. Adding
# multiple PDF files here actually tries to get the cross-referencing right
# *between* PDF files.
pdf_documents = [
('kernel-documentation', u'Kernel', u'Kernel', u'J. Random Bozo'),
]
# kernel-doc extension configuration for running Sphinx directly (e.g. by Read
# the Docs). In a normal build, these are supplied from the Makefile via command
# line arguments.
kerneldoc_bin = '../scripts/kernel-doc'
kerneldoc_srctree = '..'
# ------------------------------------------------------------------------------
# Since loadConfig overwrites settings from the global namespace, it has to be
# the last statement in the conf.py file
# ------------------------------------------------------------------------------
loadConfig(globals())
| grace-kernel-grace-kernel-6.1.y | Documentation/conf.py |
#!/usr/bin/env python
# add symbolic names to read_msr / write_msr in trace
# decode_msr msr-index.h < trace
import sys
import re
msrs = dict()
with open(sys.argv[1] if len(sys.argv) > 1 else "msr-index.h", "r") as f:
for j in f:
m = re.match(r'#define (MSR_\w+)\s+(0x[0-9a-fA-F]+)', j)
if m:
msrs[int(m.group(2), 16)] = m.group(1)
extra_ranges = (
( "MSR_LASTBRANCH_%d_FROM_IP", 0x680, 0x69F ),
( "MSR_LASTBRANCH_%d_TO_IP", 0x6C0, 0x6DF ),
( "LBR_INFO_%d", 0xdc0, 0xddf ),
)
for j in sys.stdin:
m = re.search(r'(read|write)_msr:\s+([0-9a-f]+)', j)
if m:
r = None
num = int(m.group(2), 16)
if num in msrs:
r = msrs[num]
else:
for er in extra_ranges:
if er[1] <= num <= er[2]:
r = er[0] % (num - er[1],)
break
if r:
j = j.replace(" " + m.group(2), " " + r + "(" + m.group(2) + ")")
print j,
| grace-kernel-grace-kernel-6.1.y | Documentation/trace/postprocess/decode_msr.py |
#!/usr/bin/env python
# The TCM v4 multi-protocol fabric module generation script for drivers/target/$NEW_MOD
#
# Copyright (c) 2010 Rising Tide Systems
# Copyright (c) 2010 Linux-iSCSI.org
#
# Author: [email protected]
#
import os, sys
import subprocess as sub
import string
import re
import optparse
tcm_dir = ""
fabric_ops = []
fabric_mod_dir = ""
fabric_mod_port = ""
fabric_mod_init_port = ""
def tcm_mod_err(msg):
print msg
sys.exit(1)
def tcm_mod_create_module_subdir(fabric_mod_dir_var):
if os.path.isdir(fabric_mod_dir_var) == True:
return 1
print "Creating fabric_mod_dir: " + fabric_mod_dir_var
ret = os.mkdir(fabric_mod_dir_var)
if ret:
tcm_mod_err("Unable to mkdir " + fabric_mod_dir_var)
return
def tcm_mod_build_FC_include(fabric_mod_dir_var, fabric_mod_name):
global fabric_mod_port
global fabric_mod_init_port
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n"
buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_tpg {\n"
buf += " /* FC lport target portal group tag for TCM */\n"
buf += " u16 lport_tpgt;\n"
buf += " /* Pointer back to " + fabric_mod_name + "_lport */\n"
buf += " struct " + fabric_mod_name + "_lport *lport;\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n"
buf += " struct se_portal_group se_tpg;\n"
buf += "};\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_lport {\n"
buf += " /* Binary World Wide unique Port Name for FC Target Lport */\n"
buf += " u64 lport_wwpn;\n"
buf += " /* ASCII formatted WWPN for FC Target Lport */\n"
buf += " char lport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_lport() */\n"
buf += " struct se_wwn lport_wwn;\n"
buf += "};\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
fabric_mod_port = "lport"
fabric_mod_init_port = "nport"
return
def tcm_mod_build_SAS_include(fabric_mod_dir_var, fabric_mod_name):
global fabric_mod_port
global fabric_mod_init_port
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n"
buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_tpg {\n"
buf += " /* SAS port target portal group tag for TCM */\n"
buf += " u16 tport_tpgt;\n"
buf += " /* Pointer back to " + fabric_mod_name + "_tport */\n"
buf += " struct " + fabric_mod_name + "_tport *tport;\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n"
buf += " struct se_portal_group se_tpg;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tport {\n"
buf += " /* Binary World Wide unique Port Name for SAS Target port */\n"
buf += " u64 tport_wwpn;\n"
buf += " /* ASCII formatted WWPN for SAS Target port */\n"
buf += " char tport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tport() */\n"
buf += " struct se_wwn tport_wwn;\n"
buf += "};\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
fabric_mod_port = "tport"
fabric_mod_init_port = "iport"
return
def tcm_mod_build_iSCSI_include(fabric_mod_dir_var, fabric_mod_name):
global fabric_mod_port
global fabric_mod_init_port
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n"
buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_tpg {\n"
buf += " /* iSCSI target portal group tag for TCM */\n"
buf += " u16 tport_tpgt;\n"
buf += " /* Pointer back to " + fabric_mod_name + "_tport */\n"
buf += " struct " + fabric_mod_name + "_tport *tport;\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n"
buf += " struct se_portal_group se_tpg;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tport {\n"
buf += " /* ASCII formatted TargetName for IQN */\n"
buf += " char tport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tport() */\n"
buf += " struct se_wwn tport_wwn;\n"
buf += "};\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
fabric_mod_port = "tport"
fabric_mod_init_port = "iport"
return
def tcm_mod_build_base_includes(proto_ident, fabric_mod_dir_val, fabric_mod_name):
if proto_ident == "FC":
tcm_mod_build_FC_include(fabric_mod_dir_val, fabric_mod_name)
elif proto_ident == "SAS":
tcm_mod_build_SAS_include(fabric_mod_dir_val, fabric_mod_name)
elif proto_ident == "iSCSI":
tcm_mod_build_iSCSI_include(fabric_mod_dir_val, fabric_mod_name)
else:
print "Unsupported proto_ident: " + proto_ident
sys.exit(1)
return
def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_configfs.c"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#include <linux/module.h>\n"
buf += "#include <linux/moduleparam.h>\n"
buf += "#include <linux/version.h>\n"
buf += "#include <generated/utsrelease.h>\n"
buf += "#include <linux/utsname.h>\n"
buf += "#include <linux/init.h>\n"
buf += "#include <linux/slab.h>\n"
buf += "#include <linux/kthread.h>\n"
buf += "#include <linux/types.h>\n"
buf += "#include <linux/string.h>\n"
buf += "#include <linux/configfs.h>\n"
buf += "#include <linux/ctype.h>\n"
buf += "#include <asm/unaligned.h>\n"
buf += "#include <scsi/scsi_proto.h>\n\n"
buf += "#include <target/target_core_base.h>\n"
buf += "#include <target/target_core_fabric.h>\n"
buf += "#include \"" + fabric_mod_name + "_base.h\"\n"
buf += "#include \"" + fabric_mod_name + "_fabric.h\"\n\n"
buf += "static const struct target_core_fabric_ops " + fabric_mod_name + "_ops;\n\n"
buf += "static struct se_portal_group *" + fabric_mod_name + "_make_tpg(\n"
buf += " struct se_wwn *wwn,\n"
buf += " struct config_group *group,\n"
buf += " const char *name)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + "*" + fabric_mod_port + " = container_of(wwn,\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + ", " + fabric_mod_port + "_wwn);\n\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg;\n"
buf += " unsigned long tpgt;\n"
buf += " int ret;\n\n"
buf += " if (strstr(name, \"tpgt_\") != name)\n"
buf += " return ERR_PTR(-EINVAL);\n"
buf += " if (kstrtoul(name + 5, 10, &tpgt) || tpgt > UINT_MAX)\n"
buf += " return ERR_PTR(-EINVAL);\n\n"
buf += " tpg = kzalloc(sizeof(struct " + fabric_mod_name + "_tpg), GFP_KERNEL);\n"
buf += " if (!tpg) {\n"
buf += " printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_tpg\");\n"
buf += " return ERR_PTR(-ENOMEM);\n"
buf += " }\n"
buf += " tpg->" + fabric_mod_port + " = " + fabric_mod_port + ";\n"
buf += " tpg->" + fabric_mod_port + "_tpgt = tpgt;\n\n"
if proto_ident == "FC":
buf += " ret = core_tpg_register(wwn, &tpg->se_tpg, SCSI_PROTOCOL_FCP);\n"
elif proto_ident == "SAS":
buf += " ret = core_tpg_register(wwn, &tpg->se_tpg, SCSI_PROTOCOL_SAS);\n"
elif proto_ident == "iSCSI":
buf += " ret = core_tpg_register(wwn, &tpg->se_tpg, SCSI_PROTOCOL_ISCSI);\n"
buf += " if (ret < 0) {\n"
buf += " kfree(tpg);\n"
buf += " return NULL;\n"
buf += " }\n"
buf += " return &tpg->se_tpg;\n"
buf += "}\n\n"
buf += "static void " + fabric_mod_name + "_drop_tpg(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n\n"
buf += " core_tpg_deregister(se_tpg);\n"
buf += " kfree(tpg);\n"
buf += "}\n\n"
buf += "static struct se_wwn *" + fabric_mod_name + "_make_" + fabric_mod_port + "(\n"
buf += " struct target_fabric_configfs *tf,\n"
buf += " struct config_group *group,\n"
buf += " const char *name)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + ";\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " u64 wwpn = 0;\n\n"
buf += " /* if (" + fabric_mod_name + "_parse_wwn(name, &wwpn, 1) < 0)\n"
buf += " return ERR_PTR(-EINVAL); */\n\n"
buf += " " + fabric_mod_port + " = kzalloc(sizeof(struct " + fabric_mod_name + "_" + fabric_mod_port + "), GFP_KERNEL);\n"
buf += " if (!" + fabric_mod_port + ") {\n"
buf += " printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_" + fabric_mod_port + "\");\n"
buf += " return ERR_PTR(-ENOMEM);\n"
buf += " }\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " " + fabric_mod_port + "->" + fabric_mod_port + "_wwpn = wwpn;\n"
buf += " /* " + fabric_mod_name + "_format_wwn(&" + fabric_mod_port + "->" + fabric_mod_port + "_name[0], " + fabric_mod_name.upper() + "_NAMELEN, wwpn); */\n\n"
buf += " return &" + fabric_mod_port + "->" + fabric_mod_port + "_wwn;\n"
buf += "}\n\n"
buf += "static void " + fabric_mod_name + "_drop_" + fabric_mod_port + "(struct se_wwn *wwn)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = container_of(wwn,\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + ", " + fabric_mod_port + "_wwn);\n"
buf += " kfree(" + fabric_mod_port + ");\n"
buf += "}\n\n"
buf += "static const struct target_core_fabric_ops " + fabric_mod_name + "_ops = {\n"
buf += " .module = THIS_MODULE,\n"
buf += " .name = \"" + fabric_mod_name + "\",\n"
buf += " .get_fabric_name = " + fabric_mod_name + "_get_fabric_name,\n"
buf += " .tpg_get_wwn = " + fabric_mod_name + "_get_fabric_wwn,\n"
buf += " .tpg_get_tag = " + fabric_mod_name + "_get_tag,\n"
buf += " .tpg_check_demo_mode = " + fabric_mod_name + "_check_false,\n"
buf += " .tpg_check_demo_mode_cache = " + fabric_mod_name + "_check_true,\n"
buf += " .tpg_check_demo_mode_write_protect = " + fabric_mod_name + "_check_true,\n"
buf += " .tpg_check_prod_mode_write_protect = " + fabric_mod_name + "_check_false,\n"
buf += " .tpg_get_inst_index = " + fabric_mod_name + "_tpg_get_inst_index,\n"
buf += " .release_cmd = " + fabric_mod_name + "_release_cmd,\n"
buf += " .sess_get_index = " + fabric_mod_name + "_sess_get_index,\n"
buf += " .sess_get_initiator_sid = NULL,\n"
buf += " .write_pending = " + fabric_mod_name + "_write_pending,\n"
buf += " .set_default_node_attributes = " + fabric_mod_name + "_set_default_node_attrs,\n"
buf += " .get_cmd_state = " + fabric_mod_name + "_get_cmd_state,\n"
buf += " .queue_data_in = " + fabric_mod_name + "_queue_data_in,\n"
buf += " .queue_status = " + fabric_mod_name + "_queue_status,\n"
buf += " .queue_tm_rsp = " + fabric_mod_name + "_queue_tm_rsp,\n"
buf += " .aborted_task = " + fabric_mod_name + "_aborted_task,\n"
buf += " /*\n"
buf += " * Setup function pointers for generic logic in target_core_fabric_configfs.c\n"
buf += " */\n"
buf += " .fabric_make_wwn = " + fabric_mod_name + "_make_" + fabric_mod_port + ",\n"
buf += " .fabric_drop_wwn = " + fabric_mod_name + "_drop_" + fabric_mod_port + ",\n"
buf += " .fabric_make_tpg = " + fabric_mod_name + "_make_tpg,\n"
buf += " .fabric_drop_tpg = " + fabric_mod_name + "_drop_tpg,\n"
buf += "};\n\n"
buf += "static int __init " + fabric_mod_name + "_init(void)\n"
buf += "{\n"
buf += " return target_register_template(&" + fabric_mod_name + "_ops);\n"
buf += "};\n\n"
buf += "static void __exit " + fabric_mod_name + "_exit(void)\n"
buf += "{\n"
buf += " target_unregister_template(&" + fabric_mod_name + "_ops);\n"
buf += "};\n\n"
buf += "MODULE_DESCRIPTION(\"" + fabric_mod_name.upper() + " series fabric driver\");\n"
buf += "MODULE_LICENSE(\"GPL\");\n"
buf += "module_init(" + fabric_mod_name + "_init);\n"
buf += "module_exit(" + fabric_mod_name + "_exit);\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
return
def tcm_mod_scan_fabric_ops(tcm_dir):
fabric_ops_api = tcm_dir + "include/target/target_core_fabric.h"
print "Using tcm_mod_scan_fabric_ops: " + fabric_ops_api
process_fo = 0;
p = open(fabric_ops_api, 'r')
line = p.readline()
while line:
if process_fo == 0 and re.search('struct target_core_fabric_ops {', line):
line = p.readline()
continue
if process_fo == 0:
process_fo = 1;
line = p.readline()
# Search for function pointer
if not re.search('\(\*', line):
continue
fabric_ops.append(line.rstrip())
continue
line = p.readline()
# Search for function pointer
if not re.search('\(\*', line):
continue
fabric_ops.append(line.rstrip())
p.close()
return
def tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir_var, fabric_mod_name):
buf = ""
bufi = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_fabric.c"
print "Writing file: " + f
p = open(f, 'w')
if not p:
tcm_mod_err("Unable to open file: " + f)
fi = fabric_mod_dir_var + "/" + fabric_mod_name + "_fabric.h"
print "Writing file: " + fi
pi = open(fi, 'w')
if not pi:
tcm_mod_err("Unable to open file: " + fi)
buf = "#include <linux/slab.h>\n"
buf += "#include <linux/kthread.h>\n"
buf += "#include <linux/types.h>\n"
buf += "#include <linux/list.h>\n"
buf += "#include <linux/types.h>\n"
buf += "#include <linux/string.h>\n"
buf += "#include <linux/ctype.h>\n"
buf += "#include <asm/unaligned.h>\n"
buf += "#include <scsi/scsi_common.h>\n"
buf += "#include <scsi/scsi_proto.h>\n"
buf += "#include <target/target_core_base.h>\n"
buf += "#include <target/target_core_fabric.h>\n"
buf += "#include \"" + fabric_mod_name + "_base.h\"\n"
buf += "#include \"" + fabric_mod_name + "_fabric.h\"\n\n"
buf += "int " + fabric_mod_name + "_check_true(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 1;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_check_true(struct se_portal_group *);\n"
buf += "int " + fabric_mod_name + "_check_false(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_check_false(struct se_portal_group *);\n"
total_fabric_ops = len(fabric_ops)
i = 0
while i < total_fabric_ops:
fo = fabric_ops[i]
i += 1
# print "fabric_ops: " + fo
if re.search('get_fabric_name', fo):
buf += "char *" + fabric_mod_name + "_get_fabric_name(void)\n"
buf += "{\n"
buf += " return \"" + fabric_mod_name + "\";\n"
buf += "}\n\n"
bufi += "char *" + fabric_mod_name + "_get_fabric_name(void);\n"
continue
if re.search('get_wwn', fo):
buf += "char *" + fabric_mod_name + "_get_fabric_wwn(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n\n"
buf += " return &" + fabric_mod_port + "->" + fabric_mod_port + "_name[0];\n"
buf += "}\n\n"
bufi += "char *" + fabric_mod_name + "_get_fabric_wwn(struct se_portal_group *);\n"
if re.search('get_tag', fo):
buf += "u16 " + fabric_mod_name + "_get_tag(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " return tpg->" + fabric_mod_port + "_tpgt;\n"
buf += "}\n\n"
bufi += "u16 " + fabric_mod_name + "_get_tag(struct se_portal_group *);\n"
if re.search('tpg_get_inst_index\)\(', fo):
buf += "u32 " + fabric_mod_name + "_tpg_get_inst_index(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 1;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_tpg_get_inst_index(struct se_portal_group *);\n"
if re.search('\*release_cmd\)\(', fo):
buf += "void " + fabric_mod_name + "_release_cmd(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_release_cmd(struct se_cmd *);\n"
if re.search('sess_get_index\)\(', fo):
buf += "u32 " + fabric_mod_name + "_sess_get_index(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_sess_get_index(struct se_session *);\n"
if re.search('write_pending\)\(', fo):
buf += "int " + fabric_mod_name + "_write_pending(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_write_pending(struct se_cmd *);\n"
if re.search('set_default_node_attributes\)\(', fo):
buf += "void " + fabric_mod_name + "_set_default_node_attrs(struct se_node_acl *nacl)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_set_default_node_attrs(struct se_node_acl *);\n"
if re.search('get_cmd_state\)\(', fo):
buf += "int " + fabric_mod_name + "_get_cmd_state(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_get_cmd_state(struct se_cmd *);\n"
if re.search('queue_data_in\)\(', fo):
buf += "int " + fabric_mod_name + "_queue_data_in(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_queue_data_in(struct se_cmd *);\n"
if re.search('queue_status\)\(', fo):
buf += "int " + fabric_mod_name + "_queue_status(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_queue_status(struct se_cmd *);\n"
if re.search('queue_tm_rsp\)\(', fo):
buf += "void " + fabric_mod_name + "_queue_tm_rsp(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_queue_tm_rsp(struct se_cmd *);\n"
if re.search('aborted_task\)\(', fo):
buf += "void " + fabric_mod_name + "_aborted_task(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_aborted_task(struct se_cmd *);\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
ret = pi.write(bufi)
if ret:
tcm_mod_err("Unable to write fi: " + fi)
pi.close()
return
def tcm_mod_build_kbuild(fabric_mod_dir_var, fabric_mod_name):
buf = ""
f = fabric_mod_dir_var + "/Makefile"
print "Writing file: " + f
p = open(f, 'w')
if not p:
tcm_mod_err("Unable to open file: " + f)
buf += fabric_mod_name + "-objs := " + fabric_mod_name + "_fabric.o \\\n"
buf += " " + fabric_mod_name + "_configfs.o\n"
buf += "obj-$(CONFIG_" + fabric_mod_name.upper() + ") += " + fabric_mod_name + ".o\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
return
def tcm_mod_build_kconfig(fabric_mod_dir_var, fabric_mod_name):
buf = ""
f = fabric_mod_dir_var + "/Kconfig"
print "Writing file: " + f
p = open(f, 'w')
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "config " + fabric_mod_name.upper() + "\n"
buf += " tristate \"" + fabric_mod_name.upper() + " fabric module\"\n"
buf += " depends on TARGET_CORE && CONFIGFS_FS\n"
buf += " default n\n"
buf += " help\n"
buf += " Say Y here to enable the " + fabric_mod_name.upper() + " fabric module\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
return
def tcm_mod_add_kbuild(tcm_dir, fabric_mod_name):
buf = "obj-$(CONFIG_" + fabric_mod_name.upper() + ") += " + fabric_mod_name.lower() + "/\n"
kbuild = tcm_dir + "/drivers/target/Makefile"
f = open(kbuild, 'a')
f.write(buf)
f.close()
return
def tcm_mod_add_kconfig(tcm_dir, fabric_mod_name):
buf = "source \"drivers/target/" + fabric_mod_name.lower() + "/Kconfig\"\n"
kconfig = tcm_dir + "/drivers/target/Kconfig"
f = open(kconfig, 'a')
f.write(buf)
f.close()
return
def main(modname, proto_ident):
# proto_ident = "FC"
# proto_ident = "SAS"
# proto_ident = "iSCSI"
tcm_dir = os.getcwd();
tcm_dir += "/../../"
print "tcm_dir: " + tcm_dir
fabric_mod_name = modname
fabric_mod_dir = tcm_dir + "drivers/target/" + fabric_mod_name
print "Set fabric_mod_name: " + fabric_mod_name
print "Set fabric_mod_dir: " + fabric_mod_dir
print "Using proto_ident: " + proto_ident
if proto_ident != "FC" and proto_ident != "SAS" and proto_ident != "iSCSI":
print "Unsupported proto_ident: " + proto_ident
sys.exit(1)
ret = tcm_mod_create_module_subdir(fabric_mod_dir)
if ret:
print "tcm_mod_create_module_subdir() failed because module already exists!"
sys.exit(1)
tcm_mod_build_base_includes(proto_ident, fabric_mod_dir, fabric_mod_name)
tcm_mod_scan_fabric_ops(tcm_dir)
tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir, fabric_mod_name)
tcm_mod_build_configfs(proto_ident, fabric_mod_dir, fabric_mod_name)
tcm_mod_build_kbuild(fabric_mod_dir, fabric_mod_name)
tcm_mod_build_kconfig(fabric_mod_dir, fabric_mod_name)
input = raw_input("Would you like to add " + fabric_mod_name + " to drivers/target/Makefile..? [yes,no]: ")
if input == "yes" or input == "y":
tcm_mod_add_kbuild(tcm_dir, fabric_mod_name)
input = raw_input("Would you like to add " + fabric_mod_name + " to drivers/target/Kconfig..? [yes,no]: ")
if input == "yes" or input == "y":
tcm_mod_add_kconfig(tcm_dir, fabric_mod_name)
return
parser = optparse.OptionParser()
parser.add_option('-m', '--modulename', help='Module name', dest='modname',
action='store', nargs=1, type='string')
parser.add_option('-p', '--protoident', help='Protocol Ident', dest='protoident',
action='store', nargs=1, type='string')
(opts, args) = parser.parse_args()
mandatories = ['modname', 'protoident']
for m in mandatories:
if not opts.__dict__[m]:
print "mandatory option is missing\n"
parser.print_help()
exit(-1)
if __name__ == "__main__":
main(str(opts.modname), opts.protoident)
| grace-kernel-grace-kernel-6.1.y | Documentation/target/tcm_mod_builder.py |
#!/usr/bin/env python
# Copyright 2009 Simon Arlott
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
# Software Foundation; either version 2 of the License, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 59
# Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
# Usage: cxacru-cf.py < cxacru-cf.bin
# Output: values string suitable for the sysfs adsl_config attribute
#
# Warning: cxacru-cf.bin with MD5 hash cdbac2689969d5ed5d4850f117702110
# contains mis-aligned values which will stop the modem from being able
# to make a connection. If the first and last two bytes are removed then
# the values become valid, but the modulation will be forced to ANSI
# T1.413 only which may not be appropriate.
#
# The original binary format is a packed list of le32 values.
import sys
import struct
i = 0
while True:
buf = sys.stdin.read(4)
if len(buf) == 0:
break
elif len(buf) != 4:
sys.stdout.write("\n")
sys.stderr.write("Error: read {0} not 4 bytes\n".format(len(buf)))
sys.exit(1)
if i > 0:
sys.stdout.write(" ")
sys.stdout.write("{0:x}={1}".format(i, struct.unpack("<I", buf)[0]))
i += 1
sys.stdout.write("\n")
| grace-kernel-grace-kernel-6.1.y | Documentation/networking/device_drivers/atm/cxacru-cf.py |
# coding=utf-8
# SPDX-License-Identifier: GPL-2.0
#
u"""
kernel-feat
~~~~~~~~~~~
Implementation of the ``kernel-feat`` reST-directive.
:copyright: Copyright (C) 2016 Markus Heiser
:copyright: Copyright (C) 2016-2019 Mauro Carvalho Chehab
:maintained-by: Mauro Carvalho Chehab <[email protected]>
:license: GPL Version 2, June 1991 see Linux/COPYING for details.
The ``kernel-feat`` (:py:class:`KernelFeat`) directive calls the
scripts/get_feat.pl script to parse the Kernel ABI files.
Overview of directive's argument and options.
.. code-block:: rst
.. kernel-feat:: <ABI directory location>
:debug:
The argument ``<ABI directory location>`` is required. It contains the
location of the ABI files to be parsed.
``debug``
Inserts a code-block with the *raw* reST. Sometimes it is helpful to see
what reST is generated.
"""
import codecs
import os
import re
import subprocess
import sys
from os import path
from docutils import nodes, statemachine
from docutils.statemachine import ViewList
from docutils.parsers.rst import directives, Directive
from docutils.utils.error_reporting import ErrorString
from sphinx.util.docutils import switch_source_input
__version__ = '1.0'
def setup(app):
app.add_directive("kernel-feat", KernelFeat)
return dict(
version = __version__
, parallel_read_safe = True
, parallel_write_safe = True
)
class KernelFeat(Directive):
u"""KernelFeat (``kernel-feat``) directive"""
required_arguments = 1
optional_arguments = 2
has_content = False
final_argument_whitespace = True
option_spec = {
"debug" : directives.flag
}
def warn(self, message, **replace):
replace["fname"] = self.state.document.current_source
replace["line_no"] = replace.get("line_no", self.lineno)
message = ("%(fname)s:%(line_no)s: [kernel-feat WARN] : " + message) % replace
self.state.document.settings.env.app.warn(message, prefix="")
def run(self):
doc = self.state.document
if not doc.settings.file_insertion_enabled:
raise self.warning("docutils: file insertion disabled")
env = doc.settings.env
cwd = path.dirname(doc.current_source)
cmd = "get_feat.pl rest --enable-fname --dir "
cmd += self.arguments[0]
if len(self.arguments) > 1:
cmd += " --arch " + self.arguments[1]
srctree = path.abspath(os.environ["srctree"])
fname = cmd
# extend PATH with $(srctree)/scripts
path_env = os.pathsep.join([
srctree + os.sep + "scripts",
os.environ["PATH"]
])
shell_env = os.environ.copy()
shell_env["PATH"] = path_env
shell_env["srctree"] = srctree
lines = self.runCmd(cmd, shell=True, cwd=cwd, env=shell_env)
line_regex = re.compile("^\.\. FILE (\S+)$")
out_lines = ""
for line in lines.split("\n"):
match = line_regex.search(line)
if match:
fname = match.group(1)
# Add the file to Sphinx build dependencies
env.note_dependency(os.path.abspath(fname))
else:
out_lines += line + "\n"
nodeList = self.nestedParse(out_lines, fname)
return nodeList
def runCmd(self, cmd, **kwargs):
u"""Run command ``cmd`` and return its stdout as unicode."""
try:
proc = subprocess.Popen(
cmd
, stdout = subprocess.PIPE
, stderr = subprocess.PIPE
, **kwargs
)
out, err = proc.communicate()
out, err = codecs.decode(out, 'utf-8'), codecs.decode(err, 'utf-8')
if proc.returncode != 0:
raise self.severe(
u"command '%s' failed with return code %d"
% (cmd, proc.returncode)
)
except OSError as exc:
raise self.severe(u"problems with '%s' directive: %s."
% (self.name, ErrorString(exc)))
return out
def nestedParse(self, lines, fname):
content = ViewList()
node = nodes.section()
if "debug" in self.options:
code_block = "\n\n.. code-block:: rst\n :linenos:\n"
for l in lines.split("\n"):
code_block += "\n " + l
lines = code_block + "\n\n"
for c, l in enumerate(lines.split("\n")):
content.append(l, fname, c)
buf = self.state.memo.title_styles, self.state.memo.section_level, self.state.memo.reporter
with switch_source_input(self.state, content):
self.state.nested_parse(content, 0, node, match_titles=1)
return node.children
| grace-kernel-grace-kernel-6.1.y | Documentation/sphinx/kernel_feat.py |
# -*- coding: utf-8; mode: python -*-
# pylint: disable=W0141,C0113,C0103,C0325
u"""
cdomain
~~~~~~~
Replacement for the sphinx c-domain.
:copyright: Copyright (C) 2016 Markus Heiser
:license: GPL Version 2, June 1991 see Linux/COPYING for details.
List of customizations:
* Moved the *duplicate C object description* warnings for function
declarations in the nitpicky mode. See Sphinx documentation for
the config values for ``nitpick`` and ``nitpick_ignore``.
* Add option 'name' to the "c:function:" directive. With option 'name' the
ref-name of a function can be modified. E.g.::
.. c:function:: int ioctl( int fd, int request )
:name: VIDIOC_LOG_STATUS
The func-name (e.g. ioctl) remains in the output but the ref-name changed
from 'ioctl' to 'VIDIOC_LOG_STATUS'. The function is referenced by::
* :c:func:`VIDIOC_LOG_STATUS` or
* :any:`VIDIOC_LOG_STATUS` (``:any:`` needs sphinx 1.3)
* Handle signatures of function-like macros well. Don't try to deduce
arguments types of function-like macros.
"""
from docutils import nodes
from docutils.parsers.rst import directives
import sphinx
from sphinx import addnodes
from sphinx.domains.c import c_funcptr_sig_re, c_sig_re
from sphinx.domains.c import CObject as Base_CObject
from sphinx.domains.c import CDomain as Base_CDomain
from itertools import chain
import re
__version__ = '1.1'
# Get Sphinx version
major, minor, patch = sphinx.version_info[:3]
# Namespace to be prepended to the full name
namespace = None
#
# Handle trivial newer c domain tags that are part of Sphinx 3.1 c domain tags
# - Store the namespace if ".. c:namespace::" tag is found
#
RE_namespace = re.compile(r'^\s*..\s*c:namespace::\s*(\S+)\s*$')
def markup_namespace(match):
global namespace
namespace = match.group(1)
return ""
#
# Handle c:macro for function-style declaration
#
RE_macro = re.compile(r'^\s*..\s*c:macro::\s*(\S+)\s+(\S.*)\s*$')
def markup_macro(match):
return ".. c:function:: " + match.group(1) + ' ' + match.group(2)
#
# Handle newer c domain tags that are evaluated as .. c:type: for
# backward-compatibility with Sphinx < 3.0
#
RE_ctype = re.compile(r'^\s*..\s*c:(struct|union|enum|enumerator|alias)::\s*(.*)$')
def markup_ctype(match):
return ".. c:type:: " + match.group(2)
#
# Handle newer c domain tags that are evaluated as :c:type: for
# backward-compatibility with Sphinx < 3.0
#
RE_ctype_refs = re.compile(r':c:(var|struct|union|enum|enumerator)::`([^\`]+)`')
def markup_ctype_refs(match):
return ":c:type:`" + match.group(2) + '`'
#
# Simply convert :c:expr: and :c:texpr: into a literal block.
#
RE_expr = re.compile(r':c:(expr|texpr):`([^\`]+)`')
def markup_c_expr(match):
return '\ ``' + match.group(2) + '``\ '
#
# Parse Sphinx 3.x C markups, replacing them by backward-compatible ones
#
def c_markups(app, docname, source):
result = ""
markup_func = {
RE_namespace: markup_namespace,
RE_expr: markup_c_expr,
RE_macro: markup_macro,
RE_ctype: markup_ctype,
RE_ctype_refs: markup_ctype_refs,
}
lines = iter(source[0].splitlines(True))
for n in lines:
match_iterators = [regex.finditer(n) for regex in markup_func]
matches = sorted(chain(*match_iterators), key=lambda m: m.start())
for m in matches:
n = n[:m.start()] + markup_func[m.re](m) + n[m.end():]
result = result + n
source[0] = result
#
# Now implements support for the cdomain namespacing logic
#
def setup(app):
# Handle easy Sphinx 3.1+ simple new tags: :c:expr and .. c:namespace::
app.connect('source-read', c_markups)
if (major == 1 and minor < 8):
app.override_domain(CDomain)
else:
app.add_domain(CDomain, override=True)
return dict(
version = __version__,
parallel_read_safe = True,
parallel_write_safe = True
)
class CObject(Base_CObject):
"""
Description of a C language object.
"""
option_spec = {
"name" : directives.unchanged
}
def handle_func_like_macro(self, sig, signode):
u"""Handles signatures of function-like macros.
If the objtype is 'function' and the the signature ``sig`` is a
function-like macro, the name of the macro is returned. Otherwise
``False`` is returned. """
global namespace
if not self.objtype == 'function':
return False
m = c_funcptr_sig_re.match(sig)
if m is None:
m = c_sig_re.match(sig)
if m is None:
raise ValueError('no match')
rettype, fullname, arglist, _const = m.groups()
arglist = arglist.strip()
if rettype or not arglist:
return False
arglist = arglist.replace('`', '').replace('\\ ', '') # remove markup
arglist = [a.strip() for a in arglist.split(",")]
# has the first argument a type?
if len(arglist[0].split(" ")) > 1:
return False
# This is a function-like macro, it's arguments are typeless!
signode += addnodes.desc_name(fullname, fullname)
paramlist = addnodes.desc_parameterlist()
signode += paramlist
for argname in arglist:
param = addnodes.desc_parameter('', '', noemph=True)
# separate by non-breaking space in the output
param += nodes.emphasis(argname, argname)
paramlist += param
if namespace:
fullname = namespace + "." + fullname
return fullname
def handle_signature(self, sig, signode):
"""Transform a C signature into RST nodes."""
global namespace
fullname = self.handle_func_like_macro(sig, signode)
if not fullname:
fullname = super(CObject, self).handle_signature(sig, signode)
if "name" in self.options:
if self.objtype == 'function':
fullname = self.options["name"]
else:
# FIXME: handle :name: value of other declaration types?
pass
else:
if namespace:
fullname = namespace + "." + fullname
return fullname
def add_target_and_index(self, name, sig, signode):
# for C API items we add a prefix since names are usually not qualified
# by a module name and so easily clash with e.g. section titles
targetname = 'c.' + name
if targetname not in self.state.document.ids:
signode['names'].append(targetname)
signode['ids'].append(targetname)
signode['first'] = (not self.names)
self.state.document.note_explicit_target(signode)
inv = self.env.domaindata['c']['objects']
if (name in inv and self.env.config.nitpicky):
if self.objtype == 'function':
if ('c:func', name) not in self.env.config.nitpick_ignore:
self.state_machine.reporter.warning(
'duplicate C object description of %s, ' % name +
'other instance in ' + self.env.doc2path(inv[name][0]),
line=self.lineno)
inv[name] = (self.env.docname, self.objtype)
indextext = self.get_index_text(name)
if indextext:
self.indexnode['entries'].append(
('single', indextext, targetname, '', None))
class CDomain(Base_CDomain):
"""C language domain."""
name = 'c'
label = 'C'
directives = {
'function': CObject,
'member': CObject,
'macro': CObject,
'type': CObject,
'var': CObject,
}
| grace-kernel-grace-kernel-6.1.y | Documentation/sphinx/cdomain.py |
#!/usr/bin/env python3
# -*- coding: utf-8; mode: python -*-
# pylint: disable=R0903, C0330, R0914, R0912, E0401
u"""
kernel-include
~~~~~~~~~~~~~~
Implementation of the ``kernel-include`` reST-directive.
:copyright: Copyright (C) 2016 Markus Heiser
:license: GPL Version 2, June 1991 see linux/COPYING for details.
The ``kernel-include`` reST-directive is a replacement for the ``include``
directive. The ``kernel-include`` directive expand environment variables in
the path name and allows to include files from arbitrary locations.
.. hint::
Including files from arbitrary locations (e.g. from ``/etc``) is a
security risk for builders. This is why the ``include`` directive from
docutils *prohibit* pathnames pointing to locations *above* the filesystem
tree where the reST document with the include directive is placed.
Substrings of the form $name or ${name} are replaced by the value of
environment variable name. Malformed variable names and references to
non-existing variables are left unchanged.
"""
# ==============================================================================
# imports
# ==============================================================================
import os.path
from docutils import io, nodes, statemachine
from docutils.utils.error_reporting import SafeString, ErrorString
from docutils.parsers.rst import directives
from docutils.parsers.rst.directives.body import CodeBlock, NumberLines
from docutils.parsers.rst.directives.misc import Include
__version__ = '1.0'
# ==============================================================================
def setup(app):
# ==============================================================================
app.add_directive("kernel-include", KernelInclude)
return dict(
version = __version__,
parallel_read_safe = True,
parallel_write_safe = True
)
# ==============================================================================
class KernelInclude(Include):
# ==============================================================================
u"""KernelInclude (``kernel-include``) directive"""
def run(self):
env = self.state.document.settings.env
path = os.path.realpath(
os.path.expandvars(self.arguments[0]))
# to get a bit security back, prohibit /etc:
if path.startswith(os.sep + "etc"):
raise self.severe(
'Problems with "%s" directive, prohibited path: %s'
% (self.name, path))
self.arguments[0] = path
env.note_dependency(os.path.abspath(path))
#return super(KernelInclude, self).run() # won't work, see HINTs in _run()
return self._run()
def _run(self):
"""Include a file as part of the content of this reST file."""
# HINT: I had to copy&paste the whole Include.run method. I'am not happy
# with this, but due to security reasons, the Include.run method does
# not allow absolute or relative pathnames pointing to locations *above*
# the filesystem tree where the reST document is placed.
if not self.state.document.settings.file_insertion_enabled:
raise self.warning('"%s" directive disabled.' % self.name)
source = self.state_machine.input_lines.source(
self.lineno - self.state_machine.input_offset - 1)
source_dir = os.path.dirname(os.path.abspath(source))
path = directives.path(self.arguments[0])
if path.startswith('<') and path.endswith('>'):
path = os.path.join(self.standard_include_path, path[1:-1])
path = os.path.normpath(os.path.join(source_dir, path))
# HINT: this is the only line I had to change / commented out:
#path = utils.relative_path(None, path)
path = nodes.reprunicode(path)
encoding = self.options.get(
'encoding', self.state.document.settings.input_encoding)
e_handler=self.state.document.settings.input_encoding_error_handler
tab_width = self.options.get(
'tab-width', self.state.document.settings.tab_width)
try:
self.state.document.settings.record_dependencies.add(path)
include_file = io.FileInput(source_path=path,
encoding=encoding,
error_handler=e_handler)
except UnicodeEncodeError as error:
raise self.severe('Problems with "%s" directive path:\n'
'Cannot encode input file path "%s" '
'(wrong locale?).' %
(self.name, SafeString(path)))
except IOError as error:
raise self.severe('Problems with "%s" directive path:\n%s.' %
(self.name, ErrorString(error)))
startline = self.options.get('start-line', None)
endline = self.options.get('end-line', None)
try:
if startline or (endline is not None):
lines = include_file.readlines()
rawtext = ''.join(lines[startline:endline])
else:
rawtext = include_file.read()
except UnicodeError as error:
raise self.severe('Problem with "%s" directive:\n%s' %
(self.name, ErrorString(error)))
# start-after/end-before: no restrictions on newlines in match-text,
# and no restrictions on matching inside lines vs. line boundaries
after_text = self.options.get('start-after', None)
if after_text:
# skip content in rawtext before *and incl.* a matching text
after_index = rawtext.find(after_text)
if after_index < 0:
raise self.severe('Problem with "start-after" option of "%s" '
'directive:\nText not found.' % self.name)
rawtext = rawtext[after_index + len(after_text):]
before_text = self.options.get('end-before', None)
if before_text:
# skip content in rawtext after *and incl.* a matching text
before_index = rawtext.find(before_text)
if before_index < 0:
raise self.severe('Problem with "end-before" option of "%s" '
'directive:\nText not found.' % self.name)
rawtext = rawtext[:before_index]
include_lines = statemachine.string2lines(rawtext, tab_width,
convert_whitespace=True)
if 'literal' in self.options:
# Convert tabs to spaces, if `tab_width` is positive.
if tab_width >= 0:
text = rawtext.expandtabs(tab_width)
else:
text = rawtext
literal_block = nodes.literal_block(rawtext, source=path,
classes=self.options.get('class', []))
literal_block.line = 1
self.add_name(literal_block)
if 'number-lines' in self.options:
try:
startline = int(self.options['number-lines'] or 1)
except ValueError:
raise self.error(':number-lines: with non-integer '
'start value')
endline = startline + len(include_lines)
if text.endswith('\n'):
text = text[:-1]
tokens = NumberLines([([], text)], startline, endline)
for classes, value in tokens:
if classes:
literal_block += nodes.inline(value, value,
classes=classes)
else:
literal_block += nodes.Text(value, value)
else:
literal_block += nodes.Text(text, text)
return [literal_block]
if 'code' in self.options:
self.options['source'] = path
codeblock = CodeBlock(self.name,
[self.options.pop('code')], # arguments
self.options,
include_lines, # content
self.lineno,
self.content_offset,
self.block_text,
self.state,
self.state_machine)
return codeblock.run()
self.state_machine.insert_input(include_lines, path)
return []
| grace-kernel-grace-kernel-6.1.y | Documentation/sphinx/kernel_include.py |
#!/usr/bin/env python
# SPDX-License-Identifier: GPL-2.0
# -*- coding: utf-8; mode: python -*-
# pylint: disable=R0903, C0330, R0914, R0912, E0401
u"""
maintainers-include
~~~~~~~~~~~~~~~~~~~
Implementation of the ``maintainers-include`` reST-directive.
:copyright: Copyright (C) 2019 Kees Cook <[email protected]>
:license: GPL Version 2, June 1991 see linux/COPYING for details.
The ``maintainers-include`` reST-directive performs extensive parsing
specific to the Linux kernel's standard "MAINTAINERS" file, in an
effort to avoid needing to heavily mark up the original plain text.
"""
import sys
import re
import os.path
from docutils import statemachine
from docutils.utils.error_reporting import ErrorString
from docutils.parsers.rst import Directive
from docutils.parsers.rst.directives.misc import Include
__version__ = '1.0'
def setup(app):
app.add_directive("maintainers-include", MaintainersInclude)
return dict(
version = __version__,
parallel_read_safe = True,
parallel_write_safe = True
)
class MaintainersInclude(Include):
u"""MaintainersInclude (``maintainers-include``) directive"""
required_arguments = 0
def parse_maintainers(self, path):
"""Parse all the MAINTAINERS lines into ReST for human-readability"""
result = list()
result.append(".. _maintainers:")
result.append("")
# Poor man's state machine.
descriptions = False
maintainers = False
subsystems = False
# Field letter to field name mapping.
field_letter = None
fields = dict()
prev = None
field_prev = ""
field_content = ""
for line in open(path):
# Have we reached the end of the preformatted Descriptions text?
if descriptions and line.startswith('Maintainers'):
descriptions = False
# Ensure a blank line following the last "|"-prefixed line.
result.append("")
# Start subsystem processing? This is to skip processing the text
# between the Maintainers heading and the first subsystem name.
if maintainers and not subsystems:
if re.search('^[A-Z0-9]', line):
subsystems = True
# Drop needless input whitespace.
line = line.rstrip()
# Linkify all non-wildcard refs to ReST files in Documentation/.
pat = '(Documentation/([^\s\?\*]*)\.rst)'
m = re.search(pat, line)
if m:
# maintainers.rst is in a subdirectory, so include "../".
line = re.sub(pat, ':doc:`%s <../%s>`' % (m.group(2), m.group(2)), line)
# Check state machine for output rendering behavior.
output = None
if descriptions:
# Escape the escapes in preformatted text.
output = "| %s" % (line.replace("\\", "\\\\"))
# Look for and record field letter to field name mappings:
# R: Designated *reviewer*: FullName <address@domain>
m = re.search("\s(\S):\s", line)
if m:
field_letter = m.group(1)
if field_letter and not field_letter in fields:
m = re.search("\*([^\*]+)\*", line)
if m:
fields[field_letter] = m.group(1)
elif subsystems:
# Skip empty lines: subsystem parser adds them as needed.
if len(line) == 0:
continue
# Subsystem fields are batched into "field_content"
if line[1] != ':':
# Render a subsystem entry as:
# SUBSYSTEM NAME
# ~~~~~~~~~~~~~~
# Flush pending field content.
output = field_content + "\n\n"
field_content = ""
# Collapse whitespace in subsystem name.
heading = re.sub("\s+", " ", line)
output = output + "%s\n%s" % (heading, "~" * len(heading))
field_prev = ""
else:
# Render a subsystem field as:
# :Field: entry
# entry...
field, details = line.split(':', 1)
details = details.strip()
# Mark paths (and regexes) as literal text for improved
# readability and to escape any escapes.
if field in ['F', 'N', 'X', 'K']:
# But only if not already marked :)
if not ':doc:' in details:
details = '``%s``' % (details)
# Comma separate email field continuations.
if field == field_prev and field_prev in ['M', 'R', 'L']:
field_content = field_content + ","
# Do not repeat field names, so that field entries
# will be collapsed together.
if field != field_prev:
output = field_content + "\n"
field_content = ":%s:" % (fields.get(field, field))
field_content = field_content + "\n\t%s" % (details)
field_prev = field
else:
output = line
# Re-split on any added newlines in any above parsing.
if output != None:
for separated in output.split('\n'):
result.append(separated)
# Update the state machine when we find heading separators.
if line.startswith('----------'):
if prev.startswith('Descriptions'):
descriptions = True
if prev.startswith('Maintainers'):
maintainers = True
# Retain previous line for state machine transitions.
prev = line
# Flush pending field contents.
if field_content != "":
for separated in field_content.split('\n'):
result.append(separated)
output = "\n".join(result)
# For debugging the pre-rendered results...
#print(output, file=open("/tmp/MAINTAINERS.rst", "w"))
self.state_machine.insert_input(
statemachine.string2lines(output), path)
def run(self):
"""Include the MAINTAINERS file as part of this reST file."""
if not self.state.document.settings.file_insertion_enabled:
raise self.warning('"%s" directive disabled.' % self.name)
# Walk up source path directories to find Documentation/../
path = self.state_machine.document.attributes['source']
path = os.path.realpath(path)
tail = path
while tail != "Documentation" and tail != "":
(path, tail) = os.path.split(path)
# Append "MAINTAINERS"
path = os.path.join(path, "MAINTAINERS")
try:
self.state.document.settings.record_dependencies.add(path)
lines = self.parse_maintainers(path)
except IOError as error:
raise self.severe('Problems with "%s" directive path:\n%s.' %
(self.name, ErrorString(error)))
return []
| grace-kernel-grace-kernel-6.1.y | Documentation/sphinx/maintainers_include.py |
# -*- coding: utf-8; mode: python -*-
# coding=utf-8
# SPDX-License-Identifier: GPL-2.0
#
u"""
kernel-abi
~~~~~~~~~~
Implementation of the ``kernel-abi`` reST-directive.
:copyright: Copyright (C) 2016 Markus Heiser
:copyright: Copyright (C) 2016-2020 Mauro Carvalho Chehab
:maintained-by: Mauro Carvalho Chehab <[email protected]>
:license: GPL Version 2, June 1991 see Linux/COPYING for details.
The ``kernel-abi`` (:py:class:`KernelCmd`) directive calls the
scripts/get_abi.pl script to parse the Kernel ABI files.
Overview of directive's argument and options.
.. code-block:: rst
.. kernel-abi:: <ABI directory location>
:debug:
The argument ``<ABI directory location>`` is required. It contains the
location of the ABI files to be parsed.
``debug``
Inserts a code-block with the *raw* reST. Sometimes it is helpful to see
what reST is generated.
"""
import codecs
import os
import subprocess
import sys
import re
import kernellog
from os import path
from docutils import nodes, statemachine
from docutils.statemachine import ViewList
from docutils.parsers.rst import directives, Directive
from docutils.utils.error_reporting import ErrorString
from sphinx.util.docutils import switch_source_input
__version__ = '1.0'
def setup(app):
app.add_directive("kernel-abi", KernelCmd)
return dict(
version = __version__
, parallel_read_safe = True
, parallel_write_safe = True
)
class KernelCmd(Directive):
u"""KernelABI (``kernel-abi``) directive"""
required_arguments = 1
optional_arguments = 2
has_content = False
final_argument_whitespace = True
option_spec = {
"debug" : directives.flag,
"rst" : directives.unchanged
}
def run(self):
doc = self.state.document
if not doc.settings.file_insertion_enabled:
raise self.warning("docutils: file insertion disabled")
env = doc.settings.env
cwd = path.dirname(doc.current_source)
cmd = "get_abi.pl rest --enable-lineno --dir "
cmd += self.arguments[0]
if 'rst' in self.options:
cmd += " --rst-source"
srctree = path.abspath(os.environ["srctree"])
fname = cmd
# extend PATH with $(srctree)/scripts
path_env = os.pathsep.join([
srctree + os.sep + "scripts",
os.environ["PATH"]
])
shell_env = os.environ.copy()
shell_env["PATH"] = path_env
shell_env["srctree"] = srctree
lines = self.runCmd(cmd, shell=True, cwd=cwd, env=shell_env)
nodeList = self.nestedParse(lines, self.arguments[0])
return nodeList
def runCmd(self, cmd, **kwargs):
u"""Run command ``cmd`` and return its stdout as unicode."""
try:
proc = subprocess.Popen(
cmd
, stdout = subprocess.PIPE
, stderr = subprocess.PIPE
, **kwargs
)
out, err = proc.communicate()
out, err = codecs.decode(out, 'utf-8'), codecs.decode(err, 'utf-8')
if proc.returncode != 0:
raise self.severe(
u"command '%s' failed with return code %d"
% (cmd, proc.returncode)
)
except OSError as exc:
raise self.severe(u"problems with '%s' directive: %s."
% (self.name, ErrorString(exc)))
return out
def nestedParse(self, lines, fname):
env = self.state.document.settings.env
content = ViewList()
node = nodes.section()
if "debug" in self.options:
code_block = "\n\n.. code-block:: rst\n :linenos:\n"
for l in lines.split("\n"):
code_block += "\n " + l
lines = code_block + "\n\n"
line_regex = re.compile("^\.\. LINENO (\S+)\#([0-9]+)$")
ln = 0
n = 0
f = fname
for line in lines.split("\n"):
n = n + 1
match = line_regex.search(line)
if match:
new_f = match.group(1)
# Sphinx parser is lazy: it stops parsing contents in the
# middle, if it is too big. So, handle it per input file
if new_f != f and content:
self.do_parse(content, node)
content = ViewList()
# Add the file to Sphinx build dependencies
env.note_dependency(os.path.abspath(f))
f = new_f
# sphinx counts lines from 0
ln = int(match.group(2)) - 1
else:
content.append(line, f, ln)
kernellog.info(self.state.document.settings.env.app, "%s: parsed %i lines" % (fname, n))
if content:
self.do_parse(content, node)
return node.children
def do_parse(self, content, node):
with switch_source_input(self.state, content):
self.state.nested_parse(content, 0, node, match_titles=1)
| grace-kernel-grace-kernel-6.1.y | Documentation/sphinx/kernel_abi.py |
#!/usr/bin/env python3
# -*- coding: utf-8; mode: python -*-
# pylint: disable=C0330, R0903, R0912
u"""
flat-table
~~~~~~~~~~
Implementation of the ``flat-table`` reST-directive.
:copyright: Copyright (C) 2016 Markus Heiser
:license: GPL Version 2, June 1991 see linux/COPYING for details.
The ``flat-table`` (:py:class:`FlatTable`) is a double-stage list similar to
the ``list-table`` with some additional features:
* *column-span*: with the role ``cspan`` a cell can be extended through
additional columns
* *row-span*: with the role ``rspan`` a cell can be extended through
additional rows
* *auto span* rightmost cell of a table row over the missing cells on the
right side of that table-row. With Option ``:fill-cells:`` this behavior
can be changed from *auto span* to *auto fill*, which automatically inserts
(empty) cells instead of spanning the last cell.
Options:
* header-rows: [int] count of header rows
* stub-columns: [int] count of stub columns
* widths: [[int] [int] ... ] widths of columns
* fill-cells: instead of autospann missing cells, insert missing cells
roles:
* cspan: [int] additionale columns (*morecols*)
* rspan: [int] additionale rows (*morerows*)
"""
# ==============================================================================
# imports
# ==============================================================================
from docutils import nodes
from docutils.parsers.rst import directives, roles
from docutils.parsers.rst.directives.tables import Table
from docutils.utils import SystemMessagePropagation
# ==============================================================================
# common globals
# ==============================================================================
__version__ = '1.0'
# ==============================================================================
def setup(app):
# ==============================================================================
app.add_directive("flat-table", FlatTable)
roles.register_local_role('cspan', c_span)
roles.register_local_role('rspan', r_span)
return dict(
version = __version__,
parallel_read_safe = True,
parallel_write_safe = True
)
# ==============================================================================
def c_span(name, rawtext, text, lineno, inliner, options=None, content=None):
# ==============================================================================
# pylint: disable=W0613
options = options if options is not None else {}
content = content if content is not None else []
nodelist = [colSpan(span=int(text))]
msglist = []
return nodelist, msglist
# ==============================================================================
def r_span(name, rawtext, text, lineno, inliner, options=None, content=None):
# ==============================================================================
# pylint: disable=W0613
options = options if options is not None else {}
content = content if content is not None else []
nodelist = [rowSpan(span=int(text))]
msglist = []
return nodelist, msglist
# ==============================================================================
class rowSpan(nodes.General, nodes.Element): pass # pylint: disable=C0103,C0321
class colSpan(nodes.General, nodes.Element): pass # pylint: disable=C0103,C0321
# ==============================================================================
# ==============================================================================
class FlatTable(Table):
# ==============================================================================
u"""FlatTable (``flat-table``) directive"""
option_spec = {
'name': directives.unchanged
, 'class': directives.class_option
, 'header-rows': directives.nonnegative_int
, 'stub-columns': directives.nonnegative_int
, 'widths': directives.positive_int_list
, 'fill-cells' : directives.flag }
def run(self):
if not self.content:
error = self.state_machine.reporter.error(
'The "%s" directive is empty; content required.' % self.name,
nodes.literal_block(self.block_text, self.block_text),
line=self.lineno)
return [error]
title, messages = self.make_title()
node = nodes.Element() # anonymous container for parsing
self.state.nested_parse(self.content, self.content_offset, node)
tableBuilder = ListTableBuilder(self)
tableBuilder.parseFlatTableNode(node)
tableNode = tableBuilder.buildTableNode()
# SDK.CONSOLE() # print --> tableNode.asdom().toprettyxml()
if title:
tableNode.insert(0, title)
return [tableNode] + messages
# ==============================================================================
class ListTableBuilder(object):
# ==============================================================================
u"""Builds a table from a double-stage list"""
def __init__(self, directive):
self.directive = directive
self.rows = []
self.max_cols = 0
def buildTableNode(self):
colwidths = self.directive.get_column_widths(self.max_cols)
if isinstance(colwidths, tuple):
# Since docutils 0.13, get_column_widths returns a (widths,
# colwidths) tuple, where widths is a string (i.e. 'auto').
# See https://sourceforge.net/p/docutils/patches/120/.
colwidths = colwidths[1]
stub_columns = self.directive.options.get('stub-columns', 0)
header_rows = self.directive.options.get('header-rows', 0)
table = nodes.table()
tgroup = nodes.tgroup(cols=len(colwidths))
table += tgroup
for colwidth in colwidths:
colspec = nodes.colspec(colwidth=colwidth)
# FIXME: It seems, that the stub method only works well in the
# absence of rowspan (observed by the html builder, the docutils-xml
# build seems OK). This is not extraordinary, because there exists
# no table directive (except *this* flat-table) which allows to
# define coexistent of rowspan and stubs (there was no use-case
# before flat-table). This should be reviewed (later).
if stub_columns:
colspec.attributes['stub'] = 1
stub_columns -= 1
tgroup += colspec
stub_columns = self.directive.options.get('stub-columns', 0)
if header_rows:
thead = nodes.thead()
tgroup += thead
for row in self.rows[:header_rows]:
thead += self.buildTableRowNode(row)
tbody = nodes.tbody()
tgroup += tbody
for row in self.rows[header_rows:]:
tbody += self.buildTableRowNode(row)
return table
def buildTableRowNode(self, row_data, classes=None):
classes = [] if classes is None else classes
row = nodes.row()
for cell in row_data:
if cell is None:
continue
cspan, rspan, cellElements = cell
attributes = {"classes" : classes}
if rspan:
attributes['morerows'] = rspan
if cspan:
attributes['morecols'] = cspan
entry = nodes.entry(**attributes)
entry.extend(cellElements)
row += entry
return row
def raiseError(self, msg):
error = self.directive.state_machine.reporter.error(
msg
, nodes.literal_block(self.directive.block_text
, self.directive.block_text)
, line = self.directive.lineno )
raise SystemMessagePropagation(error)
def parseFlatTableNode(self, node):
u"""parses the node from a :py:class:`FlatTable` directive's body"""
if len(node) != 1 or not isinstance(node[0], nodes.bullet_list):
self.raiseError(
'Error parsing content block for the "%s" directive: '
'exactly one bullet list expected.' % self.directive.name )
for rowNum, rowItem in enumerate(node[0]):
row = self.parseRowItem(rowItem, rowNum)
self.rows.append(row)
self.roundOffTableDefinition()
def roundOffTableDefinition(self):
u"""Round off the table definition.
This method rounds off the table definition in :py:member:`rows`.
* This method inserts the needed ``None`` values for the missing cells
arising from spanning cells over rows and/or columns.
* recount the :py:member:`max_cols`
* Autospan or fill (option ``fill-cells``) missing cells on the right
side of the table-row
"""
y = 0
while y < len(self.rows):
x = 0
while x < len(self.rows[y]):
cell = self.rows[y][x]
if cell is None:
x += 1
continue
cspan, rspan = cell[:2]
# handle colspan in current row
for c in range(cspan):
try:
self.rows[y].insert(x+c+1, None)
except: # pylint: disable=W0702
# the user sets ambiguous rowspans
pass # SDK.CONSOLE()
# handle colspan in spanned rows
for r in range(rspan):
for c in range(cspan + 1):
try:
self.rows[y+r+1].insert(x+c, None)
except: # pylint: disable=W0702
# the user sets ambiguous rowspans
pass # SDK.CONSOLE()
x += 1
y += 1
# Insert the missing cells on the right side. For this, first
# re-calculate the max columns.
for row in self.rows:
if self.max_cols < len(row):
self.max_cols = len(row)
# fill with empty cells or cellspan?
fill_cells = False
if 'fill-cells' in self.directive.options:
fill_cells = True
for row in self.rows:
x = self.max_cols - len(row)
if x and not fill_cells:
if row[-1] is None:
row.append( ( x - 1, 0, []) )
else:
cspan, rspan, content = row[-1]
row[-1] = (cspan + x, rspan, content)
elif x and fill_cells:
for i in range(x):
row.append( (0, 0, nodes.comment()) )
def pprint(self):
# for debugging
retVal = "[ "
for row in self.rows:
retVal += "[ "
for col in row:
if col is None:
retVal += ('%r' % col)
retVal += "\n , "
else:
content = col[2][0].astext()
if len (content) > 30:
content = content[:30] + "..."
retVal += ('(cspan=%s, rspan=%s, %r)'
% (col[0], col[1], content))
retVal += "]\n , "
retVal = retVal[:-2]
retVal += "]\n , "
retVal = retVal[:-2]
return retVal + "]"
def parseRowItem(self, rowItem, rowNum):
row = []
childNo = 0
error = False
cell = None
target = None
for child in rowItem:
if (isinstance(child , nodes.comment)
or isinstance(child, nodes.system_message)):
pass
elif isinstance(child , nodes.target):
target = child
elif isinstance(child, nodes.bullet_list):
childNo += 1
cell = child
else:
error = True
break
if childNo != 1 or error:
self.raiseError(
'Error parsing content block for the "%s" directive: '
'two-level bullet list expected, but row %s does not '
'contain a second-level bullet list.'
% (self.directive.name, rowNum + 1))
for cellItem in cell:
cspan, rspan, cellElements = self.parseCellItem(cellItem)
if target is not None:
cellElements.insert(0, target)
row.append( (cspan, rspan, cellElements) )
return row
def parseCellItem(self, cellItem):
# search and remove cspan, rspan colspec from the first element in
# this listItem (field).
cspan = rspan = 0
if not len(cellItem):
return cspan, rspan, []
for elem in cellItem[0]:
if isinstance(elem, colSpan):
cspan = elem.get("span")
elem.parent.remove(elem)
continue
if isinstance(elem, rowSpan):
rspan = elem.get("span")
elem.parent.remove(elem)
continue
return cspan, rspan, cellItem[:]
| grace-kernel-grace-kernel-6.1.y | Documentation/sphinx/rstFlatTable.py |
# SPDX-License-Identifier: GPL-2.0
#
# Sphinx has deprecated its older logging interface, but the replacement
# only goes back to 1.6. So here's a wrapper layer to keep around for
# as long as we support 1.4.
#
# We don't support 1.4 anymore, but we'll keep the wrappers around until
# we change all the code to not use them anymore :)
#
import sphinx
from sphinx.util import logging
logger = logging.getLogger('kerneldoc')
def warn(app, message):
logger.warning(message)
def verbose(app, message):
logger.verbose(message)
def info(app, message):
logger.info(message)
| grace-kernel-grace-kernel-6.1.y | Documentation/sphinx/kernellog.py |
# -*- coding: utf-8; mode: python -*-
# pylint: disable=R0903, C0330, R0914, R0912, E0401
import os
import sys
from sphinx.util.osutil import fs_encoding
# ------------------------------------------------------------------------------
def loadConfig(namespace):
# ------------------------------------------------------------------------------
u"""Load an additional configuration file into *namespace*.
The name of the configuration file is taken from the environment
``SPHINX_CONF``. The external configuration file extends (or overwrites) the
configuration values from the origin ``conf.py``. With this you are able to
maintain *build themes*. """
config_file = os.environ.get("SPHINX_CONF", None)
if (config_file is not None
and os.path.normpath(namespace["__file__"]) != os.path.normpath(config_file) ):
config_file = os.path.abspath(config_file)
# Let's avoid one conf.py file just due to latex_documents
start = config_file.find('Documentation/')
if start >= 0:
start = config_file.find('/', start + 1)
end = config_file.rfind('/')
if start >= 0 and end > 0:
dir = config_file[start + 1:end]
print("source directory: %s" % dir)
new_latex_docs = []
latex_documents = namespace['latex_documents']
for l in latex_documents:
if l[0].find(dir + '/') == 0:
has = True
fn = l[0][len(dir) + 1:]
new_latex_docs.append((fn, l[1], l[2], l[3], l[4]))
break
namespace['latex_documents'] = new_latex_docs
# If there is an extra conf.py file, load it
if os.path.isfile(config_file):
sys.stdout.write("load additional sphinx-config: %s\n" % config_file)
config = namespace.copy()
config['__file__'] = config_file
with open(config_file, 'rb') as f:
code = compile(f.read(), fs_encoding, 'exec')
exec(code, config)
del config['__file__']
namespace.update(config)
else:
config = namespace.copy()
config['tags'].add("subproject")
namespace.update(config)
| grace-kernel-grace-kernel-6.1.y | Documentation/sphinx/load_config.py |
# SPDX-License-Identifier: GPL-2.0
# Copyright 2019 Jonathan Corbet <[email protected]>
#
# Apply kernel-specific tweaks after the initial document processing
# has been done.
#
from docutils import nodes
import sphinx
from sphinx import addnodes
if sphinx.version_info[0] < 2 or \
sphinx.version_info[0] == 2 and sphinx.version_info[1] < 1:
from sphinx.environment import NoUri
else:
from sphinx.errors import NoUri
import re
from itertools import chain
#
# Python 2 lacks re.ASCII...
#
try:
ascii_p3 = re.ASCII
except AttributeError:
ascii_p3 = 0
#
# Regex nastiness. Of course.
# Try to identify "function()" that's not already marked up some
# other way. Sphinx doesn't like a lot of stuff right after a
# :c:func: block (i.e. ":c:func:`mmap()`s" flakes out), so the last
# bit tries to restrict matches to things that won't create trouble.
#
RE_function = re.compile(r'\b(([a-zA-Z_]\w+)\(\))', flags=ascii_p3)
#
# Sphinx 2 uses the same :c:type role for struct, union, enum and typedef
#
RE_generic_type = re.compile(r'\b(struct|union|enum|typedef)\s+([a-zA-Z_]\w+)',
flags=ascii_p3)
#
# Sphinx 3 uses a different C role for each one of struct, union, enum and
# typedef
#
RE_struct = re.compile(r'\b(struct)\s+([a-zA-Z_]\w+)', flags=ascii_p3)
RE_union = re.compile(r'\b(union)\s+([a-zA-Z_]\w+)', flags=ascii_p3)
RE_enum = re.compile(r'\b(enum)\s+([a-zA-Z_]\w+)', flags=ascii_p3)
RE_typedef = re.compile(r'\b(typedef)\s+([a-zA-Z_]\w+)', flags=ascii_p3)
#
# Detects a reference to a documentation page of the form Documentation/... with
# an optional extension
#
RE_doc = re.compile(r'(\bDocumentation/)?((\.\./)*[\w\-/]+)\.(rst|txt)')
RE_namespace = re.compile(r'^\s*..\s*c:namespace::\s*(\S+)\s*$')
#
# Reserved C words that we should skip when cross-referencing
#
Skipnames = [ 'for', 'if', 'register', 'sizeof', 'struct', 'unsigned' ]
#
# Many places in the docs refer to common system calls. It is
# pointless to try to cross-reference them and, as has been known
# to happen, somebody defining a function by these names can lead
# to the creation of incorrect and confusing cross references. So
# just don't even try with these names.
#
Skipfuncs = [ 'open', 'close', 'read', 'write', 'fcntl', 'mmap',
'select', 'poll', 'fork', 'execve', 'clone', 'ioctl',
'socket' ]
c_namespace = ''
def markup_refs(docname, app, node):
t = node.astext()
done = 0
repl = [ ]
#
# Associate each regex with the function that will markup its matches
#
markup_func_sphinx2 = {RE_doc: markup_doc_ref,
RE_function: markup_c_ref,
RE_generic_type: markup_c_ref}
markup_func_sphinx3 = {RE_doc: markup_doc_ref,
RE_function: markup_func_ref_sphinx3,
RE_struct: markup_c_ref,
RE_union: markup_c_ref,
RE_enum: markup_c_ref,
RE_typedef: markup_c_ref}
if sphinx.version_info[0] >= 3:
markup_func = markup_func_sphinx3
else:
markup_func = markup_func_sphinx2
match_iterators = [regex.finditer(t) for regex in markup_func]
#
# Sort all references by the starting position in text
#
sorted_matches = sorted(chain(*match_iterators), key=lambda m: m.start())
for m in sorted_matches:
#
# Include any text prior to match as a normal text node.
#
if m.start() > done:
repl.append(nodes.Text(t[done:m.start()]))
#
# Call the function associated with the regex that matched this text and
# append its return to the text
#
repl.append(markup_func[m.re](docname, app, m))
done = m.end()
if done < len(t):
repl.append(nodes.Text(t[done:]))
return repl
#
# Keep track of cross-reference lookups that failed so we don't have to
# do them again.
#
failed_lookups = { }
def failure_seen(target):
return (target) in failed_lookups
def note_failure(target):
failed_lookups[target] = True
#
# In sphinx3 we can cross-reference to C macro and function, each one with its
# own C role, but both match the same regex, so we try both.
#
def markup_func_ref_sphinx3(docname, app, match):
cdom = app.env.domains['c']
#
# Go through the dance of getting an xref out of the C domain
#
base_target = match.group(2)
target_text = nodes.Text(match.group(0))
xref = None
possible_targets = [base_target]
# Check if this document has a namespace, and if so, try
# cross-referencing inside it first.
if c_namespace:
possible_targets.insert(0, c_namespace + "." + base_target)
if base_target not in Skipnames:
for target in possible_targets:
if (target not in Skipfuncs) and not failure_seen(target):
lit_text = nodes.literal(classes=['xref', 'c', 'c-func'])
lit_text += target_text
pxref = addnodes.pending_xref('', refdomain = 'c',
reftype = 'function',
reftarget = target,
modname = None,
classname = None)
#
# XXX The Latex builder will throw NoUri exceptions here,
# work around that by ignoring them.
#
try:
xref = cdom.resolve_xref(app.env, docname, app.builder,
'function', target, pxref,
lit_text)
except NoUri:
xref = None
if xref:
return xref
note_failure(target)
return target_text
def markup_c_ref(docname, app, match):
class_str = {# Sphinx 2 only
RE_function: 'c-func',
RE_generic_type: 'c-type',
# Sphinx 3+ only
RE_struct: 'c-struct',
RE_union: 'c-union',
RE_enum: 'c-enum',
RE_typedef: 'c-type',
}
reftype_str = {# Sphinx 2 only
RE_function: 'function',
RE_generic_type: 'type',
# Sphinx 3+ only
RE_struct: 'struct',
RE_union: 'union',
RE_enum: 'enum',
RE_typedef: 'type',
}
cdom = app.env.domains['c']
#
# Go through the dance of getting an xref out of the C domain
#
base_target = match.group(2)
target_text = nodes.Text(match.group(0))
xref = None
possible_targets = [base_target]
# Check if this document has a namespace, and if so, try
# cross-referencing inside it first.
if c_namespace:
possible_targets.insert(0, c_namespace + "." + base_target)
if base_target not in Skipnames:
for target in possible_targets:
if not (match.re == RE_function and target in Skipfuncs):
lit_text = nodes.literal(classes=['xref', 'c', class_str[match.re]])
lit_text += target_text
pxref = addnodes.pending_xref('', refdomain = 'c',
reftype = reftype_str[match.re],
reftarget = target, modname = None,
classname = None)
#
# XXX The Latex builder will throw NoUri exceptions here,
# work around that by ignoring them.
#
try:
xref = cdom.resolve_xref(app.env, docname, app.builder,
reftype_str[match.re], target, pxref,
lit_text)
except NoUri:
xref = None
if xref:
return xref
return target_text
#
# Try to replace a documentation reference of the form Documentation/... with a
# cross reference to that page
#
def markup_doc_ref(docname, app, match):
stddom = app.env.domains['std']
#
# Go through the dance of getting an xref out of the std domain
#
absolute = match.group(1)
target = match.group(2)
if absolute:
target = "/" + target
xref = None
pxref = addnodes.pending_xref('', refdomain = 'std', reftype = 'doc',
reftarget = target, modname = None,
classname = None, refexplicit = False)
#
# XXX The Latex builder will throw NoUri exceptions here,
# work around that by ignoring them.
#
try:
xref = stddom.resolve_xref(app.env, docname, app.builder, 'doc',
target, pxref, None)
except NoUri:
xref = None
#
# Return the xref if we got it; otherwise just return the plain text.
#
if xref:
return xref
else:
return nodes.Text(match.group(0))
def get_c_namespace(app, docname):
source = app.env.doc2path(docname)
with open(source) as f:
for l in f:
match = RE_namespace.search(l)
if match:
return match.group(1)
return ''
def auto_markup(app, doctree, name):
global c_namespace
c_namespace = get_c_namespace(app, name)
def text_but_not_a_reference(node):
# The nodes.literal test catches ``literal text``, its purpose is to
# avoid adding cross-references to functions that have been explicitly
# marked with cc:func:.
if not isinstance(node, nodes.Text) or isinstance(node.parent, nodes.literal):
return False
child_of_reference = False
parent = node.parent
while parent:
if isinstance(parent, nodes.Referential):
child_of_reference = True
break
parent = parent.parent
return not child_of_reference
#
# This loop could eventually be improved on. Someday maybe we
# want a proper tree traversal with a lot of awareness of which
# kinds of nodes to prune. But this works well for now.
#
for para in doctree.traverse(nodes.paragraph):
for node in para.traverse(condition=text_but_not_a_reference):
node.parent.replace(node, markup_refs(name, app, node))
def setup(app):
app.connect('doctree-resolved', auto_markup)
return {
'parallel_read_safe': True,
'parallel_write_safe': True,
}
| grace-kernel-grace-kernel-6.1.y | Documentation/sphinx/automarkup.py |
# coding=utf-8
#
# Copyright © 2016 Intel Corporation
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice (including the next
# paragraph) shall be included in all copies or substantial portions of the
# Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
# Authors:
# Jani Nikula <[email protected]>
#
# Please make sure this works on both python2 and python3.
#
import codecs
import os
import subprocess
import sys
import re
import glob
from docutils import nodes, statemachine
from docutils.statemachine import ViewList
from docutils.parsers.rst import directives, Directive
import sphinx
from sphinx.util.docutils import switch_source_input
import kernellog
__version__ = '1.0'
class KernelDocDirective(Directive):
"""Extract kernel-doc comments from the specified file"""
required_argument = 1
optional_arguments = 4
option_spec = {
'doc': directives.unchanged_required,
'export': directives.unchanged,
'internal': directives.unchanged,
'identifiers': directives.unchanged,
'no-identifiers': directives.unchanged,
'functions': directives.unchanged,
}
has_content = False
def run(self):
env = self.state.document.settings.env
cmd = [env.config.kerneldoc_bin, '-rst', '-enable-lineno']
# Pass the version string to kernel-doc, as it needs to use a different
# dialect, depending what the C domain supports for each specific
# Sphinx versions
cmd += ['-sphinx-version', sphinx.__version__]
filename = env.config.kerneldoc_srctree + '/' + self.arguments[0]
export_file_patterns = []
# Tell sphinx of the dependency
env.note_dependency(os.path.abspath(filename))
tab_width = self.options.get('tab-width', self.state.document.settings.tab_width)
# 'function' is an alias of 'identifiers'
if 'functions' in self.options:
self.options['identifiers'] = self.options.get('functions')
# FIXME: make this nicer and more robust against errors
if 'export' in self.options:
cmd += ['-export']
export_file_patterns = str(self.options.get('export')).split()
elif 'internal' in self.options:
cmd += ['-internal']
export_file_patterns = str(self.options.get('internal')).split()
elif 'doc' in self.options:
cmd += ['-function', str(self.options.get('doc'))]
elif 'identifiers' in self.options:
identifiers = self.options.get('identifiers').split()
if identifiers:
for i in identifiers:
cmd += ['-function', i]
else:
cmd += ['-no-doc-sections']
if 'no-identifiers' in self.options:
no_identifiers = self.options.get('no-identifiers').split()
if no_identifiers:
for i in no_identifiers:
cmd += ['-nosymbol', i]
for pattern in export_file_patterns:
for f in glob.glob(env.config.kerneldoc_srctree + '/' + pattern):
env.note_dependency(os.path.abspath(f))
cmd += ['-export-file', f]
cmd += [filename]
try:
kernellog.verbose(env.app,
'calling kernel-doc \'%s\'' % (" ".join(cmd)))
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = p.communicate()
out, err = codecs.decode(out, 'utf-8'), codecs.decode(err, 'utf-8')
if p.returncode != 0:
sys.stderr.write(err)
kernellog.warn(env.app,
'kernel-doc \'%s\' failed with return code %d' % (" ".join(cmd), p.returncode))
return [nodes.error(None, nodes.paragraph(text = "kernel-doc missing"))]
elif env.config.kerneldoc_verbosity > 0:
sys.stderr.write(err)
lines = statemachine.string2lines(out, tab_width, convert_whitespace=True)
result = ViewList()
lineoffset = 0;
line_regex = re.compile("^\.\. LINENO ([0-9]+)$")
for line in lines:
match = line_regex.search(line)
if match:
# sphinx counts lines from 0
lineoffset = int(match.group(1)) - 1
# we must eat our comments since the upset the markup
else:
doc = env.srcdir + "/" + env.docname + ":" + str(self.lineno)
result.append(line, doc + ": " + filename, lineoffset)
lineoffset += 1
node = nodes.section()
self.do_parse(result, node)
return node.children
except Exception as e: # pylint: disable=W0703
kernellog.warn(env.app, 'kernel-doc \'%s\' processing failed with: %s' %
(" ".join(cmd), str(e)))
return [nodes.error(None, nodes.paragraph(text = "kernel-doc missing"))]
def do_parse(self, result, node):
with switch_source_input(self.state, result):
self.state.nested_parse(result, 0, node, match_titles=1)
def setup(app):
app.add_config_value('kerneldoc_bin', None, 'env')
app.add_config_value('kerneldoc_srctree', None, 'env')
app.add_config_value('kerneldoc_verbosity', 1, 'env')
app.add_directive('kernel-doc', KernelDocDirective)
return dict(
version = __version__,
parallel_read_safe = True,
parallel_write_safe = True
)
| grace-kernel-grace-kernel-6.1.y | Documentation/sphinx/kerneldoc.py |
# -*- coding: utf-8; mode: python -*-
# pylint: disable=C0103, R0903, R0912, R0915
u"""
scalable figure and image handling
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Sphinx extension which implements scalable image handling.
:copyright: Copyright (C) 2016 Markus Heiser
:license: GPL Version 2, June 1991 see Linux/COPYING for details.
The build for image formats depend on image's source format and output's
destination format. This extension implement methods to simplify image
handling from the author's POV. Directives like ``kernel-figure`` implement
methods *to* always get the best output-format even if some tools are not
installed. For more details take a look at ``convert_image(...)`` which is
the core of all conversions.
* ``.. kernel-image``: for image handling / a ``.. image::`` replacement
* ``.. kernel-figure``: for figure handling / a ``.. figure::`` replacement
* ``.. kernel-render``: for render markup / a concept to embed *render*
markups (or languages). Supported markups (see ``RENDER_MARKUP_EXT``)
- ``DOT``: render embedded Graphviz's **DOC**
- ``SVG``: render embedded Scalable Vector Graphics (**SVG**)
- ... *developable*
Used tools:
* ``dot(1)``: Graphviz (https://www.graphviz.org). If Graphviz is not
available, the DOT language is inserted as literal-block.
For conversion to PDF, ``rsvg-convert(1)`` of librsvg
(https://gitlab.gnome.org/GNOME/librsvg) is used when available.
* SVG to PDF: To generate PDF, you need at least one of this tools:
- ``convert(1)``: ImageMagick (https://www.imagemagick.org)
- ``inkscape(1)``: Inkscape (https://inkscape.org/)
List of customizations:
* generate PDF from SVG / used by PDF (LaTeX) builder
* generate SVG (html-builder) and PDF (latex-builder) from DOT files.
DOT: see https://www.graphviz.org/content/dot-language
"""
import os
from os import path
import subprocess
from hashlib import sha1
import re
from docutils import nodes
from docutils.statemachine import ViewList
from docutils.parsers.rst import directives
from docutils.parsers.rst.directives import images
import sphinx
from sphinx.util.nodes import clean_astext
import kernellog
# Get Sphinx version
major, minor, patch = sphinx.version_info[:3]
if major == 1 and minor > 3:
# patches.Figure only landed in Sphinx 1.4
from sphinx.directives.patches import Figure # pylint: disable=C0413
else:
Figure = images.Figure
__version__ = '1.0.0'
# simple helper
# -------------
def which(cmd):
"""Searches the ``cmd`` in the ``PATH`` environment.
This *which* searches the PATH for executable ``cmd`` . First match is
returned, if nothing is found, ``None` is returned.
"""
envpath = os.environ.get('PATH', None) or os.defpath
for folder in envpath.split(os.pathsep):
fname = folder + os.sep + cmd
if path.isfile(fname):
return fname
def mkdir(folder, mode=0o775):
if not path.isdir(folder):
os.makedirs(folder, mode)
def file2literal(fname):
with open(fname, "r") as src:
data = src.read()
node = nodes.literal_block(data, data)
return node
def isNewer(path1, path2):
"""Returns True if ``path1`` is newer than ``path2``
If ``path1`` exists and is newer than ``path2`` the function returns
``True`` is returned otherwise ``False``
"""
return (path.exists(path1)
and os.stat(path1).st_ctime > os.stat(path2).st_ctime)
def pass_handle(self, node): # pylint: disable=W0613
pass
# setup conversion tools and sphinx extension
# -------------------------------------------
# Graphviz's dot(1) support
dot_cmd = None
# dot(1) -Tpdf should be used
dot_Tpdf = False
# ImageMagick' convert(1) support
convert_cmd = None
# librsvg's rsvg-convert(1) support
rsvg_convert_cmd = None
# Inkscape's inkscape(1) support
inkscape_cmd = None
# Inkscape prior to 1.0 uses different command options
inkscape_ver_one = False
def setup(app):
# check toolchain first
app.connect('builder-inited', setupTools)
# image handling
app.add_directive("kernel-image", KernelImage)
app.add_node(kernel_image,
html = (visit_kernel_image, pass_handle),
latex = (visit_kernel_image, pass_handle),
texinfo = (visit_kernel_image, pass_handle),
text = (visit_kernel_image, pass_handle),
man = (visit_kernel_image, pass_handle), )
# figure handling
app.add_directive("kernel-figure", KernelFigure)
app.add_node(kernel_figure,
html = (visit_kernel_figure, pass_handle),
latex = (visit_kernel_figure, pass_handle),
texinfo = (visit_kernel_figure, pass_handle),
text = (visit_kernel_figure, pass_handle),
man = (visit_kernel_figure, pass_handle), )
# render handling
app.add_directive('kernel-render', KernelRender)
app.add_node(kernel_render,
html = (visit_kernel_render, pass_handle),
latex = (visit_kernel_render, pass_handle),
texinfo = (visit_kernel_render, pass_handle),
text = (visit_kernel_render, pass_handle),
man = (visit_kernel_render, pass_handle), )
app.connect('doctree-read', add_kernel_figure_to_std_domain)
return dict(
version = __version__,
parallel_read_safe = True,
parallel_write_safe = True
)
def setupTools(app):
u"""
Check available build tools and log some *verbose* messages.
This function is called once, when the builder is initiated.
"""
global dot_cmd, dot_Tpdf, convert_cmd, rsvg_convert_cmd # pylint: disable=W0603
global inkscape_cmd, inkscape_ver_one # pylint: disable=W0603
kernellog.verbose(app, "kfigure: check installed tools ...")
dot_cmd = which('dot')
convert_cmd = which('convert')
rsvg_convert_cmd = which('rsvg-convert')
inkscape_cmd = which('inkscape')
if dot_cmd:
kernellog.verbose(app, "use dot(1) from: " + dot_cmd)
try:
dot_Thelp_list = subprocess.check_output([dot_cmd, '-Thelp'],
stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as err:
dot_Thelp_list = err.output
pass
dot_Tpdf_ptn = b'pdf'
dot_Tpdf = re.search(dot_Tpdf_ptn, dot_Thelp_list)
else:
kernellog.warn(app, "dot(1) not found, for better output quality install "
"graphviz from https://www.graphviz.org")
if inkscape_cmd:
kernellog.verbose(app, "use inkscape(1) from: " + inkscape_cmd)
inkscape_ver = subprocess.check_output([inkscape_cmd, '--version'],
stderr=subprocess.DEVNULL)
ver_one_ptn = b'Inkscape 1'
inkscape_ver_one = re.search(ver_one_ptn, inkscape_ver)
convert_cmd = None
rsvg_convert_cmd = None
dot_Tpdf = False
else:
if convert_cmd:
kernellog.verbose(app, "use convert(1) from: " + convert_cmd)
else:
kernellog.verbose(app,
"Neither inkscape(1) nor convert(1) found.\n"
"For SVG to PDF conversion, "
"install either Inkscape (https://inkscape.org/) (preferred) or\n"
"ImageMagick (https://www.imagemagick.org)")
if rsvg_convert_cmd:
kernellog.verbose(app, "use rsvg-convert(1) from: " + rsvg_convert_cmd)
kernellog.verbose(app, "use 'dot -Tsvg' and rsvg-convert(1) for DOT -> PDF conversion")
dot_Tpdf = False
else:
kernellog.verbose(app,
"rsvg-convert(1) not found.\n"
" SVG rendering of convert(1) is done by ImageMagick-native renderer.")
if dot_Tpdf:
kernellog.verbose(app, "use 'dot -Tpdf' for DOT -> PDF conversion")
else:
kernellog.verbose(app, "use 'dot -Tsvg' and convert(1) for DOT -> PDF conversion")
# integrate conversion tools
# --------------------------
RENDER_MARKUP_EXT = {
# The '.ext' must be handled by convert_image(..) function's *in_ext* input.
# <name> : <.ext>
'DOT' : '.dot',
'SVG' : '.svg'
}
def convert_image(img_node, translator, src_fname=None):
"""Convert a image node for the builder.
Different builder prefer different image formats, e.g. *latex* builder
prefer PDF while *html* builder prefer SVG format for images.
This function handles output image formats in dependence of source the
format (of the image) and the translator's output format.
"""
app = translator.builder.app
fname, in_ext = path.splitext(path.basename(img_node['uri']))
if src_fname is None:
src_fname = path.join(translator.builder.srcdir, img_node['uri'])
if not path.exists(src_fname):
src_fname = path.join(translator.builder.outdir, img_node['uri'])
dst_fname = None
# in kernel builds, use 'make SPHINXOPTS=-v' to see verbose messages
kernellog.verbose(app, 'assert best format for: ' + img_node['uri'])
if in_ext == '.dot':
if not dot_cmd:
kernellog.verbose(app,
"dot from graphviz not available / include DOT raw.")
img_node.replace_self(file2literal(src_fname))
elif translator.builder.format == 'latex':
dst_fname = path.join(translator.builder.outdir, fname + '.pdf')
img_node['uri'] = fname + '.pdf'
img_node['candidates'] = {'*': fname + '.pdf'}
elif translator.builder.format == 'html':
dst_fname = path.join(
translator.builder.outdir,
translator.builder.imagedir,
fname + '.svg')
img_node['uri'] = path.join(
translator.builder.imgpath, fname + '.svg')
img_node['candidates'] = {
'*': path.join(translator.builder.imgpath, fname + '.svg')}
else:
# all other builder formats will include DOT as raw
img_node.replace_self(file2literal(src_fname))
elif in_ext == '.svg':
if translator.builder.format == 'latex':
if not inkscape_cmd and convert_cmd is None:
kernellog.warn(app,
"no SVG to PDF conversion available / include SVG raw."
"\nIncluding large raw SVGs can cause xelatex error."
"\nInstall Inkscape (preferred) or ImageMagick.")
img_node.replace_self(file2literal(src_fname))
else:
dst_fname = path.join(translator.builder.outdir, fname + '.pdf')
img_node['uri'] = fname + '.pdf'
img_node['candidates'] = {'*': fname + '.pdf'}
if dst_fname:
# the builder needs not to copy one more time, so pop it if exists.
translator.builder.images.pop(img_node['uri'], None)
_name = dst_fname[len(translator.builder.outdir) + 1:]
if isNewer(dst_fname, src_fname):
kernellog.verbose(app,
"convert: {out}/%s already exists and is newer" % _name)
else:
ok = False
mkdir(path.dirname(dst_fname))
if in_ext == '.dot':
kernellog.verbose(app, 'convert DOT to: {out}/' + _name)
if translator.builder.format == 'latex' and not dot_Tpdf:
svg_fname = path.join(translator.builder.outdir, fname + '.svg')
ok1 = dot2format(app, src_fname, svg_fname)
ok2 = svg2pdf_by_rsvg(app, svg_fname, dst_fname)
ok = ok1 and ok2
else:
ok = dot2format(app, src_fname, dst_fname)
elif in_ext == '.svg':
kernellog.verbose(app, 'convert SVG to: {out}/' + _name)
ok = svg2pdf(app, src_fname, dst_fname)
if not ok:
img_node.replace_self(file2literal(src_fname))
def dot2format(app, dot_fname, out_fname):
"""Converts DOT file to ``out_fname`` using ``dot(1)``.
* ``dot_fname`` pathname of the input DOT file, including extension ``.dot``
* ``out_fname`` pathname of the output file, including format extension
The *format extension* depends on the ``dot`` command (see ``man dot``
option ``-Txxx``). Normally you will use one of the following extensions:
- ``.ps`` for PostScript,
- ``.svg`` or ``svgz`` for Structured Vector Graphics,
- ``.fig`` for XFIG graphics and
- ``.png`` or ``gif`` for common bitmap graphics.
"""
out_format = path.splitext(out_fname)[1][1:]
cmd = [dot_cmd, '-T%s' % out_format, dot_fname]
exit_code = 42
with open(out_fname, "w") as out:
exit_code = subprocess.call(cmd, stdout = out)
if exit_code != 0:
kernellog.warn(app,
"Error #%d when calling: %s" % (exit_code, " ".join(cmd)))
return bool(exit_code == 0)
def svg2pdf(app, svg_fname, pdf_fname):
"""Converts SVG to PDF with ``inkscape(1)`` or ``convert(1)`` command.
Uses ``inkscape(1)`` from Inkscape (https://inkscape.org/) or ``convert(1)``
from ImageMagick (https://www.imagemagick.org) for conversion.
Returns ``True`` on success and ``False`` if an error occurred.
* ``svg_fname`` pathname of the input SVG file with extension (``.svg``)
* ``pdf_name`` pathname of the output PDF file with extension (``.pdf``)
"""
cmd = [convert_cmd, svg_fname, pdf_fname]
cmd_name = 'convert(1)'
if inkscape_cmd:
cmd_name = 'inkscape(1)'
if inkscape_ver_one:
cmd = [inkscape_cmd, '-o', pdf_fname, svg_fname]
else:
cmd = [inkscape_cmd, '-z', '--export-pdf=%s' % pdf_fname, svg_fname]
try:
warning_msg = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
exit_code = 0
except subprocess.CalledProcessError as err:
warning_msg = err.output
exit_code = err.returncode
pass
if exit_code != 0:
kernellog.warn(app, "Error #%d when calling: %s" % (exit_code, " ".join(cmd)))
if warning_msg:
kernellog.warn(app, "Warning msg from %s: %s"
% (cmd_name, str(warning_msg, 'utf-8')))
elif warning_msg:
kernellog.verbose(app, "Warning msg from %s (likely harmless):\n%s"
% (cmd_name, str(warning_msg, 'utf-8')))
return bool(exit_code == 0)
def svg2pdf_by_rsvg(app, svg_fname, pdf_fname):
"""Convert SVG to PDF with ``rsvg-convert(1)`` command.
* ``svg_fname`` pathname of input SVG file, including extension ``.svg``
* ``pdf_fname`` pathname of output PDF file, including extension ``.pdf``
Input SVG file should be the one generated by ``dot2format()``.
SVG -> PDF conversion is done by ``rsvg-convert(1)``.
If ``rsvg-convert(1)`` is unavailable, fall back to ``svg2pdf()``.
"""
if rsvg_convert_cmd is None:
ok = svg2pdf(app, svg_fname, pdf_fname)
else:
cmd = [rsvg_convert_cmd, '--format=pdf', '-o', pdf_fname, svg_fname]
# use stdout and stderr from parent
exit_code = subprocess.call(cmd)
if exit_code != 0:
kernellog.warn(app, "Error #%d when calling: %s" % (exit_code, " ".join(cmd)))
ok = bool(exit_code == 0)
return ok
# image handling
# ---------------------
def visit_kernel_image(self, node): # pylint: disable=W0613
"""Visitor of the ``kernel_image`` Node.
Handles the ``image`` child-node with the ``convert_image(...)``.
"""
img_node = node[0]
convert_image(img_node, self)
class kernel_image(nodes.image):
"""Node for ``kernel-image`` directive."""
pass
class KernelImage(images.Image):
u"""KernelImage directive
Earns everything from ``.. image::`` directive, except *remote URI* and
*glob* pattern. The KernelImage wraps a image node into a
kernel_image node. See ``visit_kernel_image``.
"""
def run(self):
uri = self.arguments[0]
if uri.endswith('.*') or uri.find('://') != -1:
raise self.severe(
'Error in "%s: %s": glob pattern and remote images are not allowed'
% (self.name, uri))
result = images.Image.run(self)
if len(result) == 2 or isinstance(result[0], nodes.system_message):
return result
(image_node,) = result
# wrap image node into a kernel_image node / see visitors
node = kernel_image('', image_node)
return [node]
# figure handling
# ---------------------
def visit_kernel_figure(self, node): # pylint: disable=W0613
"""Visitor of the ``kernel_figure`` Node.
Handles the ``image`` child-node with the ``convert_image(...)``.
"""
img_node = node[0][0]
convert_image(img_node, self)
class kernel_figure(nodes.figure):
"""Node for ``kernel-figure`` directive."""
class KernelFigure(Figure):
u"""KernelImage directive
Earns everything from ``.. figure::`` directive, except *remote URI* and
*glob* pattern. The KernelFigure wraps a figure node into a kernel_figure
node. See ``visit_kernel_figure``.
"""
def run(self):
uri = self.arguments[0]
if uri.endswith('.*') or uri.find('://') != -1:
raise self.severe(
'Error in "%s: %s":'
' glob pattern and remote images are not allowed'
% (self.name, uri))
result = Figure.run(self)
if len(result) == 2 or isinstance(result[0], nodes.system_message):
return result
(figure_node,) = result
# wrap figure node into a kernel_figure node / see visitors
node = kernel_figure('', figure_node)
return [node]
# render handling
# ---------------------
def visit_kernel_render(self, node):
"""Visitor of the ``kernel_render`` Node.
If rendering tools available, save the markup of the ``literal_block`` child
node into a file and replace the ``literal_block`` node with a new created
``image`` node, pointing to the saved markup file. Afterwards, handle the
image child-node with the ``convert_image(...)``.
"""
app = self.builder.app
srclang = node.get('srclang')
kernellog.verbose(app, 'visit kernel-render node lang: "%s"' % (srclang))
tmp_ext = RENDER_MARKUP_EXT.get(srclang, None)
if tmp_ext is None:
kernellog.warn(app, 'kernel-render: "%s" unknown / include raw.' % (srclang))
return
if not dot_cmd and tmp_ext == '.dot':
kernellog.verbose(app, "dot from graphviz not available / include raw.")
return
literal_block = node[0]
code = literal_block.astext()
hashobj = code.encode('utf-8') # str(node.attributes)
fname = path.join('%s-%s' % (srclang, sha1(hashobj).hexdigest()))
tmp_fname = path.join(
self.builder.outdir, self.builder.imagedir, fname + tmp_ext)
if not path.isfile(tmp_fname):
mkdir(path.dirname(tmp_fname))
with open(tmp_fname, "w") as out:
out.write(code)
img_node = nodes.image(node.rawsource, **node.attributes)
img_node['uri'] = path.join(self.builder.imgpath, fname + tmp_ext)
img_node['candidates'] = {
'*': path.join(self.builder.imgpath, fname + tmp_ext)}
literal_block.replace_self(img_node)
convert_image(img_node, self, tmp_fname)
class kernel_render(nodes.General, nodes.Inline, nodes.Element):
"""Node for ``kernel-render`` directive."""
pass
class KernelRender(Figure):
u"""KernelRender directive
Render content by external tool. Has all the options known from the
*figure* directive, plus option ``caption``. If ``caption`` has a
value, a figure node with the *caption* is inserted. If not, a image node is
inserted.
The KernelRender directive wraps the text of the directive into a
literal_block node and wraps it into a kernel_render node. See
``visit_kernel_render``.
"""
has_content = True
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = False
# earn options from 'figure'
option_spec = Figure.option_spec.copy()
option_spec['caption'] = directives.unchanged
def run(self):
return [self.build_node()]
def build_node(self):
srclang = self.arguments[0].strip()
if srclang not in RENDER_MARKUP_EXT.keys():
return [self.state_machine.reporter.warning(
'Unknown source language "%s", use one of: %s.' % (
srclang, ",".join(RENDER_MARKUP_EXT.keys())),
line=self.lineno)]
code = '\n'.join(self.content)
if not code.strip():
return [self.state_machine.reporter.warning(
'Ignoring "%s" directive without content.' % (
self.name),
line=self.lineno)]
node = kernel_render()
node['alt'] = self.options.get('alt','')
node['srclang'] = srclang
literal_node = nodes.literal_block(code, code)
node += literal_node
caption = self.options.get('caption')
if caption:
# parse caption's content
parsed = nodes.Element()
self.state.nested_parse(
ViewList([caption], source=''), self.content_offset, parsed)
caption_node = nodes.caption(
parsed[0].rawsource, '', *parsed[0].children)
caption_node.source = parsed[0].source
caption_node.line = parsed[0].line
figure_node = nodes.figure('', node)
for k,v in self.options.items():
figure_node[k] = v
figure_node += caption_node
node = figure_node
return node
def add_kernel_figure_to_std_domain(app, doctree):
"""Add kernel-figure anchors to 'std' domain.
The ``StandardDomain.process_doc(..)`` method does not know how to resolve
the caption (label) of ``kernel-figure`` directive (it only knows about
standard nodes, e.g. table, figure etc.). Without any additional handling
this will result in a 'undefined label' for kernel-figures.
This handle adds labels of kernel-figure to the 'std' domain labels.
"""
std = app.env.domains["std"]
docname = app.env.docname
labels = std.data["labels"]
for name, explicit in doctree.nametypes.items():
if not explicit:
continue
labelid = doctree.nameids[name]
if labelid is None:
continue
node = doctree.ids[labelid]
if node.tagname == 'kernel_figure':
for n in node.next_node():
if n.tagname == 'caption':
sectname = clean_astext(n)
# add label to std domain
labels[name] = docname, labelid, sectname
break
| grace-kernel-grace-kernel-6.1.y | Documentation/sphinx/kfigure.py |
# -*- coding: utf-8; mode: python -*-
# SPDX-License-Identifier: GPL-2.0
project = 'Linux Media Subsystem Documentation'
# It is possible to run Sphinx in nickpick mode with:
nitpicky = True
# within nit-picking build, do not refer to any intersphinx object
intersphinx_mapping = {}
# In nickpick mode, it will complain about lots of missing references that
#
# 1) are just typedefs like: bool, __u32, etc;
# 2) It will complain for things like: enum, NULL;
# 3) It will complain for symbols that should be on different
# books (but currently aren't ported to ReST)
#
# The list below has a list of such symbols to be ignored in nitpick mode
#
nitpick_ignore = [
("c:func", "clock_gettime"),
("c:func", "close"),
("c:func", "container_of"),
("c:func", "copy_from_user"),
("c:func", "copy_to_user"),
("c:func", "determine_valid_ioctls"),
("c:func", "ERR_PTR"),
("c:func", "i2c_new_client_device"),
("c:func", "ioctl"),
("c:func", "IS_ERR"),
("c:func", "KERNEL_VERSION"),
("c:func", "mmap"),
("c:func", "open"),
("c:func", "pci_name"),
("c:func", "poll"),
("c:func", "PTR_ERR"),
("c:func", "read"),
("c:func", "release"),
("c:func", "set"),
("c:func", "struct fd_set"),
("c:func", "struct pollfd"),
("c:func", "usb_make_path"),
("c:func", "wait_finish"),
("c:func", "wait_prepare"),
("c:func", "write"),
("c:type", "atomic_t"),
("c:type", "bool"),
("c:type", "boolean"),
("c:type", "buf_queue"),
("c:type", "device"),
("c:type", "device_driver"),
("c:type", "device_node"),
("c:type", "enum"),
("c:type", "fd"),
("c:type", "fd_set"),
("c:type", "file"),
("c:type", "i2c_adapter"),
("c:type", "i2c_board_info"),
("c:type", "i2c_client"),
("c:type", "int16_t"),
("c:type", "ktime_t"),
("c:type", "led_classdev_flash"),
("c:type", "list_head"),
("c:type", "lock_class_key"),
("c:type", "module"),
("c:type", "mutex"),
("c:type", "NULL"),
("c:type", "off_t"),
("c:type", "pci_dev"),
("c:type", "pdvbdev"),
("c:type", "poll_table"),
("c:type", "platform_device"),
("c:type", "pollfd"),
("c:type", "poll_table_struct"),
("c:type", "s32"),
("c:type", "s64"),
("c:type", "sd"),
("c:type", "size_t"),
("c:type", "spi_board_info"),
("c:type", "spi_device"),
("c:type", "spi_master"),
("c:type", "ssize_t"),
("c:type", "fb_fix_screeninfo"),
("c:type", "pollfd"),
("c:type", "timeval"),
("c:type", "video_capability"),
("c:type", "timeval"),
("c:type", "__u16"),
("c:type", "u16"),
("c:type", "__u32"),
("c:type", "u32"),
("c:type", "__u64"),
("c:type", "u64"),
("c:type", "u8"),
("c:type", "uint16_t"),
("c:type", "uint32_t"),
("c:type", "union"),
("c:type", "__user"),
("c:type", "usb_device"),
("c:type", "usb_interface"),
("c:type", "v4l2_std_id"),
("c:type", "video_system_t"),
("c:type", "vm_area_struct"),
# Opaque structures
("c:type", "v4l2_m2m_dev"),
]
| grace-kernel-grace-kernel-6.1.y | Documentation/userspace-api/media/conf_nitpick.py |
#!/usr/bin/env python3
# SPDX-License-Identifier: GPL-2.0-only
#
# Tool for analyzing boot timing
# Copyright (c) 2013, Intel Corporation.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms and conditions of the GNU General Public License,
# version 2, as published by the Free Software Foundation.
#
# This program is distributed in the hope it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# Authors:
# Todd Brandt <[email protected]>
#
# Description:
# This tool is designed to assist kernel and OS developers in optimizing
# their linux stack's boot time. It creates an html representation of
# the kernel boot timeline up to the start of the init process.
#
# ----------------- LIBRARIES --------------------
import sys
import time
import os
import string
import re
import platform
import shutil
from datetime import datetime, timedelta
from subprocess import call, Popen, PIPE
import sleepgraph as aslib
def pprint(msg):
print(msg)
sys.stdout.flush()
# ----------------- CLASSES --------------------
# Class: SystemValues
# Description:
# A global, single-instance container used to
# store system values and test parameters
class SystemValues(aslib.SystemValues):
title = 'BootGraph'
version = '2.2'
hostname = 'localhost'
testtime = ''
kernel = ''
dmesgfile = ''
ftracefile = ''
htmlfile = 'bootgraph.html'
testdir = ''
kparams = ''
result = ''
useftrace = False
usecallgraph = False
suspendmode = 'boot'
max_graph_depth = 2
graph_filter = 'do_one_initcall'
reboot = False
manual = False
iscronjob = False
timeformat = '%.6f'
bootloader = 'grub'
blexec = []
def __init__(self):
self.kernel, self.hostname = 'unknown', platform.node()
self.testtime = datetime.now().strftime('%Y-%m-%d_%H:%M:%S')
if os.path.exists('/proc/version'):
fp = open('/proc/version', 'r')
self.kernel = self.kernelVersion(fp.read().strip())
fp.close()
self.testdir = datetime.now().strftime('boot-%y%m%d-%H%M%S')
def kernelVersion(self, msg):
m = re.match('^[Ll]inux *[Vv]ersion *(?P<v>\S*) .*', msg)
if m:
return m.group('v')
return 'unknown'
def checkFtraceKernelVersion(self):
m = re.match('^(?P<x>[0-9]*)\.(?P<y>[0-9]*)\.(?P<z>[0-9]*).*', self.kernel)
if m:
val = tuple(map(int, m.groups()))
if val >= (4, 10, 0):
return True
return False
def kernelParams(self):
cmdline = 'initcall_debug log_buf_len=32M'
if self.useftrace:
if self.cpucount > 0:
bs = min(self.memtotal // 2, 2*1024*1024) // self.cpucount
else:
bs = 131072
cmdline += ' trace_buf_size=%dK trace_clock=global '\
'trace_options=nooverwrite,funcgraph-abstime,funcgraph-cpu,'\
'funcgraph-duration,funcgraph-proc,funcgraph-tail,'\
'nofuncgraph-overhead,context-info,graph-time '\
'ftrace=function_graph '\
'ftrace_graph_max_depth=%d '\
'ftrace_graph_filter=%s' % \
(bs, self.max_graph_depth, self.graph_filter)
return cmdline
def setGraphFilter(self, val):
master = self.getBootFtraceFilterFunctions()
fs = ''
for i in val.split(','):
func = i.strip()
if func == '':
doError('badly formatted filter function string')
if '[' in func or ']' in func:
doError('loadable module functions not allowed - "%s"' % func)
if ' ' in func:
doError('spaces found in filter functions - "%s"' % func)
if func not in master:
doError('function "%s" not available for ftrace' % func)
if not fs:
fs = func
else:
fs += ','+func
if not fs:
doError('badly formatted filter function string')
self.graph_filter = fs
def getBootFtraceFilterFunctions(self):
self.rootCheck(True)
fp = open(self.tpath+'available_filter_functions')
fulllist = fp.read().split('\n')
fp.close()
list = []
for i in fulllist:
if not i or ' ' in i or '[' in i or ']' in i:
continue
list.append(i)
return list
def myCronJob(self, line):
if '@reboot' not in line:
return False
if 'bootgraph' in line or 'analyze_boot.py' in line or '-cronjob' in line:
return True
return False
def cronjobCmdString(self):
cmdline = '%s -cronjob' % os.path.abspath(sys.argv[0])
args = iter(sys.argv[1:])
for arg in args:
if arg in ['-h', '-v', '-cronjob', '-reboot', '-verbose']:
continue
elif arg in ['-o', '-dmesg', '-ftrace', '-func']:
next(args)
continue
elif arg == '-result':
cmdline += ' %s "%s"' % (arg, os.path.abspath(next(args)))
continue
elif arg == '-cgskip':
file = self.configFile(next(args))
cmdline += ' %s "%s"' % (arg, os.path.abspath(file))
continue
cmdline += ' '+arg
if self.graph_filter != 'do_one_initcall':
cmdline += ' -func "%s"' % self.graph_filter
cmdline += ' -o "%s"' % os.path.abspath(self.testdir)
return cmdline
def manualRebootRequired(self):
cmdline = self.kernelParams()
pprint('To generate a new timeline manually, follow these steps:\n\n'\
'1. Add the CMDLINE string to your kernel command line.\n'\
'2. Reboot the system.\n'\
'3. After reboot, re-run this tool with the same arguments but no command (w/o -reboot or -manual).\n\n'\
'CMDLINE="%s"' % cmdline)
sys.exit()
def blGrub(self):
blcmd = ''
for cmd in ['update-grub', 'grub-mkconfig', 'grub2-mkconfig']:
if blcmd:
break
blcmd = self.getExec(cmd)
if not blcmd:
doError('[GRUB] missing update command')
if not os.path.exists('/etc/default/grub'):
doError('[GRUB] missing /etc/default/grub')
if 'grub2' in blcmd:
cfg = '/boot/grub2/grub.cfg'
else:
cfg = '/boot/grub/grub.cfg'
if not os.path.exists(cfg):
doError('[GRUB] missing %s' % cfg)
if 'update-grub' in blcmd:
self.blexec = [blcmd]
else:
self.blexec = [blcmd, '-o', cfg]
def getBootLoader(self):
if self.bootloader == 'grub':
self.blGrub()
else:
doError('unknown boot loader: %s' % self.bootloader)
def writeDatafileHeader(self, filename):
self.kparams = open('/proc/cmdline', 'r').read().strip()
fp = open(filename, 'w')
fp.write(self.teststamp+'\n')
fp.write(self.sysstamp+'\n')
fp.write('# command | %s\n' % self.cmdline)
fp.write('# kparams | %s\n' % self.kparams)
fp.close()
sysvals = SystemValues()
# Class: Data
# Description:
# The primary container for test data.
class Data(aslib.Data):
dmesg = {} # root data structure
start = 0.0 # test start
end = 0.0 # test end
dmesgtext = [] # dmesg text file in memory
testnumber = 0
idstr = ''
html_device_id = 0
valid = False
tUserMode = 0.0
boottime = ''
phases = ['kernel', 'user']
do_one_initcall = False
def __init__(self, num):
self.testnumber = num
self.idstr = 'a'
self.dmesgtext = []
self.dmesg = {
'kernel': {'list': dict(), 'start': -1.0, 'end': -1.0, 'row': 0,
'order': 0, 'color': 'linear-gradient(to bottom, #fff, #bcf)'},
'user': {'list': dict(), 'start': -1.0, 'end': -1.0, 'row': 0,
'order': 1, 'color': '#fff'}
}
def deviceTopology(self):
return ''
def newAction(self, phase, name, pid, start, end, ret, ulen):
# new device callback for a specific phase
self.html_device_id += 1
devid = '%s%d' % (self.idstr, self.html_device_id)
list = self.dmesg[phase]['list']
length = -1.0
if(start >= 0 and end >= 0):
length = end - start
i = 2
origname = name
while(name in list):
name = '%s[%d]' % (origname, i)
i += 1
list[name] = {'name': name, 'start': start, 'end': end,
'pid': pid, 'length': length, 'row': 0, 'id': devid,
'ret': ret, 'ulen': ulen }
return name
def deviceMatch(self, pid, cg):
if cg.end - cg.start == 0:
return ''
for p in data.phases:
list = self.dmesg[p]['list']
for devname in list:
dev = list[devname]
if pid != dev['pid']:
continue
if cg.name == 'do_one_initcall':
if(cg.start <= dev['start'] and cg.end >= dev['end'] and dev['length'] > 0):
dev['ftrace'] = cg
self.do_one_initcall = True
return devname
else:
if(cg.start > dev['start'] and cg.end < dev['end']):
if 'ftraces' not in dev:
dev['ftraces'] = []
dev['ftraces'].append(cg)
return devname
return ''
def printDetails(self):
sysvals.vprint('Timeline Details:')
sysvals.vprint(' Host: %s' % sysvals.hostname)
sysvals.vprint(' Kernel: %s' % sysvals.kernel)
sysvals.vprint(' Test time: %s' % sysvals.testtime)
sysvals.vprint(' Boot time: %s' % self.boottime)
for phase in self.phases:
dc = len(self.dmesg[phase]['list'])
sysvals.vprint('%9s mode: %.3f - %.3f (%d initcalls)' % (phase,
self.dmesg[phase]['start']*1000,
self.dmesg[phase]['end']*1000, dc))
# ----------------- FUNCTIONS --------------------
# Function: parseKernelLog
# Description:
# parse a kernel log for boot data
def parseKernelLog():
sysvals.vprint('Analyzing the dmesg data (%s)...' % \
os.path.basename(sysvals.dmesgfile))
phase = 'kernel'
data = Data(0)
data.dmesg['kernel']['start'] = data.start = ktime = 0.0
sysvals.stamp = {
'time': datetime.now().strftime('%B %d %Y, %I:%M:%S %p'),
'host': sysvals.hostname,
'mode': 'boot', 'kernel': ''}
tp = aslib.TestProps()
devtemp = dict()
if(sysvals.dmesgfile):
lf = open(sysvals.dmesgfile, 'rb')
else:
lf = Popen('dmesg', stdout=PIPE).stdout
for line in lf:
line = aslib.ascii(line).replace('\r\n', '')
# grab the stamp and sysinfo
if re.match(tp.stampfmt, line):
tp.stamp = line
continue
elif re.match(tp.sysinfofmt, line):
tp.sysinfo = line
continue
elif re.match(tp.cmdlinefmt, line):
tp.cmdline = line
continue
elif re.match(tp.kparamsfmt, line):
tp.kparams = line
continue
idx = line.find('[')
if idx > 1:
line = line[idx:]
m = re.match('[ \t]*(\[ *)(?P<ktime>[0-9\.]*)(\]) (?P<msg>.*)', line)
if(not m):
continue
ktime = float(m.group('ktime'))
if(ktime > 120):
break
msg = m.group('msg')
data.dmesgtext.append(line)
if(ktime == 0.0 and re.match('^Linux version .*', msg)):
if(not sysvals.stamp['kernel']):
sysvals.stamp['kernel'] = sysvals.kernelVersion(msg)
continue
m = re.match('.* setting system clock to (?P<d>[0-9\-]*)[ A-Z](?P<t>[0-9:]*) UTC.*', msg)
if(m):
bt = datetime.strptime(m.group('d')+' '+m.group('t'), '%Y-%m-%d %H:%M:%S')
bt = bt - timedelta(seconds=int(ktime))
data.boottime = bt.strftime('%Y-%m-%d_%H:%M:%S')
sysvals.stamp['time'] = bt.strftime('%B %d %Y, %I:%M:%S %p')
continue
m = re.match('^calling *(?P<f>.*)\+.* @ (?P<p>[0-9]*)', msg)
if(m):
func = m.group('f')
pid = int(m.group('p'))
devtemp[func] = (ktime, pid)
continue
m = re.match('^initcall *(?P<f>.*)\+.* returned (?P<r>.*) after (?P<t>.*) usecs', msg)
if(m):
data.valid = True
data.end = ktime
f, r, t = m.group('f', 'r', 't')
if(f in devtemp):
start, pid = devtemp[f]
data.newAction(phase, f, pid, start, ktime, int(r), int(t))
del devtemp[f]
continue
if(re.match('^Freeing unused kernel .*', msg)):
data.tUserMode = ktime
data.dmesg['kernel']['end'] = ktime
data.dmesg['user']['start'] = ktime
phase = 'user'
if tp.stamp:
sysvals.stamp = 0
tp.parseStamp(data, sysvals)
data.dmesg['user']['end'] = data.end
lf.close()
return data
# Function: parseTraceLog
# Description:
# Check if trace is available and copy to a temp file
def parseTraceLog(data):
sysvals.vprint('Analyzing the ftrace data (%s)...' % \
os.path.basename(sysvals.ftracefile))
# if available, calculate cgfilter allowable ranges
cgfilter = []
if len(sysvals.cgfilter) > 0:
for p in data.phases:
list = data.dmesg[p]['list']
for i in sysvals.cgfilter:
if i in list:
cgfilter.append([list[i]['start']-0.0001,
list[i]['end']+0.0001])
# parse the trace log
ftemp = dict()
tp = aslib.TestProps()
tp.setTracerType('function_graph')
tf = open(sysvals.ftracefile, 'r')
for line in tf:
if line[0] == '#':
continue
m = re.match(tp.ftrace_line_fmt, line.strip())
if(not m):
continue
m_time, m_proc, m_pid, m_msg, m_dur = \
m.group('time', 'proc', 'pid', 'msg', 'dur')
t = float(m_time)
if len(cgfilter) > 0:
allow = False
for r in cgfilter:
if t >= r[0] and t < r[1]:
allow = True
break
if not allow:
continue
if t > data.end:
break
if(m_time and m_pid and m_msg):
t = aslib.FTraceLine(m_time, m_msg, m_dur)
pid = int(m_pid)
else:
continue
if t.fevent or t.fkprobe:
continue
key = (m_proc, pid)
if(key not in ftemp):
ftemp[key] = []
ftemp[key].append(aslib.FTraceCallGraph(pid, sysvals))
cg = ftemp[key][-1]
res = cg.addLine(t)
if(res != 0):
ftemp[key].append(aslib.FTraceCallGraph(pid, sysvals))
if(res == -1):
ftemp[key][-1].addLine(t)
tf.close()
# add the callgraph data to the device hierarchy
for key in ftemp:
proc, pid = key
for cg in ftemp[key]:
if len(cg.list) < 1 or cg.invalid or (cg.end - cg.start == 0):
continue
if(not cg.postProcess()):
pprint('Sanity check failed for %s-%d' % (proc, pid))
continue
# match cg data to devices
devname = data.deviceMatch(pid, cg)
if not devname:
kind = 'Orphan'
if cg.partial:
kind = 'Partial'
sysvals.vprint('%s callgraph found for %s %s-%d [%f - %f]' %\
(kind, cg.name, proc, pid, cg.start, cg.end))
elif len(cg.list) > 1000000:
pprint('WARNING: the callgraph found for %s is massive! (%d lines)' %\
(devname, len(cg.list)))
# Function: retrieveLogs
# Description:
# Create copies of dmesg and/or ftrace for later processing
def retrieveLogs():
# check ftrace is configured first
if sysvals.useftrace:
tracer = sysvals.fgetVal('current_tracer').strip()
if tracer != 'function_graph':
doError('ftrace not configured for a boot callgraph')
# create the folder and get dmesg
sysvals.systemInfo(aslib.dmidecode(sysvals.mempath))
sysvals.initTestOutput('boot')
sysvals.writeDatafileHeader(sysvals.dmesgfile)
call('dmesg >> '+sysvals.dmesgfile, shell=True)
if not sysvals.useftrace:
return
# get ftrace
sysvals.writeDatafileHeader(sysvals.ftracefile)
call('cat '+sysvals.tpath+'trace >> '+sysvals.ftracefile, shell=True)
# Function: colorForName
# Description:
# Generate a repeatable color from a list for a given name
def colorForName(name):
list = [
('c1', '#ec9999'),
('c2', '#ffc1a6'),
('c3', '#fff0a6'),
('c4', '#adf199'),
('c5', '#9fadea'),
('c6', '#a699c1'),
('c7', '#ad99b4'),
('c8', '#eaffea'),
('c9', '#dcecfb'),
('c10', '#ffffea')
]
i = 0
total = 0
count = len(list)
while i < len(name):
total += ord(name[i])
i += 1
return list[total % count]
def cgOverview(cg, minlen):
stats = dict()
large = []
for l in cg.list:
if l.fcall and l.depth == 1:
if l.length >= minlen:
large.append(l)
if l.name not in stats:
stats[l.name] = [0, 0.0]
stats[l.name][0] += (l.length * 1000.0)
stats[l.name][1] += 1
return (large, stats)
# Function: createBootGraph
# Description:
# Create the output html file from the resident test data
# Arguments:
# testruns: array of Data objects from parseKernelLog or parseTraceLog
# Output:
# True if the html file was created, false if it failed
def createBootGraph(data):
# html function templates
html_srccall = '<div id={6} title="{5}" class="srccall" style="left:{1}%;top:{2}px;height:{3}px;width:{4}%;line-height:{3}px;">{0}</div>\n'
html_timetotal = '<table class="time1">\n<tr>'\
'<td class="blue">Init process starts @ <b>{0} ms</b></td>'\
'<td class="blue">Last initcall ends @ <b>{1} ms</b></td>'\
'</tr>\n</table>\n'
# device timeline
devtl = aslib.Timeline(100, 20)
# write the test title and general info header
devtl.createHeader(sysvals, sysvals.stamp)
# Generate the header for this timeline
t0 = data.start
tMax = data.end
tTotal = tMax - t0
if(tTotal == 0):
pprint('ERROR: No timeline data')
return False
user_mode = '%.0f'%(data.tUserMode*1000)
last_init = '%.0f'%(tTotal*1000)
devtl.html += html_timetotal.format(user_mode, last_init)
# determine the maximum number of rows we need to draw
devlist = []
for p in data.phases:
list = data.dmesg[p]['list']
for devname in list:
d = aslib.DevItem(0, p, list[devname])
devlist.append(d)
devtl.getPhaseRows(devlist, 0, 'start')
devtl.calcTotalRows()
# draw the timeline background
devtl.createZoomBox()
devtl.html += devtl.html_tblock.format('boot', '0', '100', devtl.scaleH)
for p in data.phases:
phase = data.dmesg[p]
length = phase['end']-phase['start']
left = '%.3f' % (((phase['start']-t0)*100.0)/tTotal)
width = '%.3f' % ((length*100.0)/tTotal)
devtl.html += devtl.html_phase.format(left, width, \
'%.3f'%devtl.scaleH, '%.3f'%devtl.bodyH, \
phase['color'], '')
# draw the device timeline
num = 0
devstats = dict()
for phase in data.phases:
list = data.dmesg[phase]['list']
for devname in sorted(list):
cls, color = colorForName(devname)
dev = list[devname]
info = '@|%.3f|%.3f|%.3f|%d' % (dev['start']*1000.0, dev['end']*1000.0,
dev['ulen']/1000.0, dev['ret'])
devstats[dev['id']] = {'info':info}
dev['color'] = color
height = devtl.phaseRowHeight(0, phase, dev['row'])
top = '%.6f' % ((dev['row']*height) + devtl.scaleH)
left = '%.6f' % (((dev['start']-t0)*100)/tTotal)
width = '%.6f' % (((dev['end']-dev['start'])*100)/tTotal)
length = ' (%0.3f ms) ' % ((dev['end']-dev['start'])*1000)
devtl.html += devtl.html_device.format(dev['id'],
devname+length+phase+'_mode', left, top, '%.3f'%height,
width, devname, ' '+cls, '')
rowtop = devtl.phaseRowTop(0, phase, dev['row'])
height = '%.6f' % (devtl.rowH / 2)
top = '%.6f' % (rowtop + devtl.scaleH + (devtl.rowH / 2))
if data.do_one_initcall:
if('ftrace' not in dev):
continue
cg = dev['ftrace']
large, stats = cgOverview(cg, 0.001)
devstats[dev['id']]['fstat'] = stats
for l in large:
left = '%f' % (((l.time-t0)*100)/tTotal)
width = '%f' % (l.length*100/tTotal)
title = '%s (%0.3fms)' % (l.name, l.length * 1000.0)
devtl.html += html_srccall.format(l.name, left,
top, height, width, title, 'x%d'%num)
num += 1
continue
if('ftraces' not in dev):
continue
for cg in dev['ftraces']:
left = '%f' % (((cg.start-t0)*100)/tTotal)
width = '%f' % ((cg.end-cg.start)*100/tTotal)
cglen = (cg.end - cg.start) * 1000.0
title = '%s (%0.3fms)' % (cg.name, cglen)
cg.id = 'x%d' % num
devtl.html += html_srccall.format(cg.name, left,
top, height, width, title, dev['id']+cg.id)
num += 1
# draw the time scale, try to make the number of labels readable
devtl.createTimeScale(t0, tMax, tTotal, 'boot')
devtl.html += '</div>\n'
# timeline is finished
devtl.html += '</div>\n</div>\n'
# draw a legend which describes the phases by color
devtl.html += '<div class="legend">\n'
pdelta = 20.0
pmargin = 36.0
for phase in data.phases:
order = '%.2f' % ((data.dmesg[phase]['order'] * pdelta) + pmargin)
devtl.html += devtl.html_legend.format(order, \
data.dmesg[phase]['color'], phase+'_mode', phase[0])
devtl.html += '</div>\n'
hf = open(sysvals.htmlfile, 'w')
# add the css
extra = '\
.c1 {background:rgba(209,0,0,0.4);}\n\
.c2 {background:rgba(255,102,34,0.4);}\n\
.c3 {background:rgba(255,218,33,0.4);}\n\
.c4 {background:rgba(51,221,0,0.4);}\n\
.c5 {background:rgba(17,51,204,0.4);}\n\
.c6 {background:rgba(34,0,102,0.4);}\n\
.c7 {background:rgba(51,0,68,0.4);}\n\
.c8 {background:rgba(204,255,204,0.4);}\n\
.c9 {background:rgba(169,208,245,0.4);}\n\
.c10 {background:rgba(255,255,204,0.4);}\n\
.vt {transform:rotate(-60deg);transform-origin:0 0;}\n\
table.fstat {table-layout:fixed;padding:150px 15px 0 0;font-size:10px;column-width:30px;}\n\
.fstat th {width:55px;}\n\
.fstat td {text-align:left;width:35px;}\n\
.srccall {position:absolute;font-size:10px;z-index:7;overflow:hidden;color:black;text-align:center;white-space:nowrap;border-radius:5px;border:1px solid black;background:linear-gradient(to bottom right,#CCC,#969696);}\n\
.srccall:hover {color:white;font-weight:bold;border:1px solid white;}\n'
aslib.addCSS(hf, sysvals, 1, False, extra)
# write the device timeline
hf.write(devtl.html)
# add boot specific html
statinfo = 'var devstats = {\n'
for n in sorted(devstats):
statinfo += '\t"%s": [\n\t\t"%s",\n' % (n, devstats[n]['info'])
if 'fstat' in devstats[n]:
funcs = devstats[n]['fstat']
for f in sorted(funcs, key=lambda k:(funcs[k], k), reverse=True):
if funcs[f][0] < 0.01 and len(funcs) > 10:
break
statinfo += '\t\t"%f|%s|%d",\n' % (funcs[f][0], f, funcs[f][1])
statinfo += '\t],\n'
statinfo += '};\n'
html = \
'<div id="devicedetailtitle"></div>\n'\
'<div id="devicedetail" style="display:none;">\n'\
'<div id="devicedetail0">\n'
for p in data.phases:
phase = data.dmesg[p]
html += devtl.html_phaselet.format(p+'_mode', '0', '100', phase['color'])
html += '</div>\n</div>\n'\
'<script type="text/javascript">\n'+statinfo+\
'</script>\n'
hf.write(html)
# add the callgraph html
if(sysvals.usecallgraph):
aslib.addCallgraphs(sysvals, hf, data)
# add the test log as a hidden div
if sysvals.testlog and sysvals.logmsg:
hf.write('<div id="testlog" style="display:none;">\n'+sysvals.logmsg+'</div>\n')
# add the dmesg log as a hidden div
if sysvals.dmesglog:
hf.write('<div id="dmesglog" style="display:none;">\n')
for line in data.dmesgtext:
line = line.replace('<', '<').replace('>', '>')
hf.write(line)
hf.write('</div>\n')
# write the footer and close
aslib.addScriptCode(hf, [data])
hf.write('</body>\n</html>\n')
hf.close()
return True
# Function: updateCron
# Description:
# (restore=False) Set the tool to run automatically on reboot
# (restore=True) Restore the original crontab
def updateCron(restore=False):
if not restore:
sysvals.rootUser(True)
crondir = '/var/spool/cron/crontabs/'
if not os.path.exists(crondir):
crondir = '/var/spool/cron/'
if not os.path.exists(crondir):
doError('%s not found' % crondir)
cronfile = crondir+'root'
backfile = crondir+'root-analyze_boot-backup'
cmd = sysvals.getExec('crontab')
if not cmd:
doError('crontab not found')
# on restore: move the backup cron back into place
if restore:
if os.path.exists(backfile):
shutil.move(backfile, cronfile)
call([cmd, cronfile])
return
# backup current cron and install new one with reboot
if os.path.exists(cronfile):
shutil.move(cronfile, backfile)
else:
fp = open(backfile, 'w')
fp.close()
res = -1
try:
fp = open(backfile, 'r')
op = open(cronfile, 'w')
for line in fp:
if not sysvals.myCronJob(line):
op.write(line)
continue
fp.close()
op.write('@reboot python %s\n' % sysvals.cronjobCmdString())
op.close()
res = call([cmd, cronfile])
except Exception as e:
pprint('Exception: %s' % str(e))
shutil.move(backfile, cronfile)
res = -1
if res != 0:
doError('crontab failed')
# Function: updateGrub
# Description:
# update grub.cfg for all kernels with our parameters
def updateGrub(restore=False):
# call update-grub on restore
if restore:
try:
call(sysvals.blexec, stderr=PIPE, stdout=PIPE,
env={'PATH': '.:/sbin:/usr/sbin:/usr/bin:/sbin:/bin'})
except Exception as e:
pprint('Exception: %s\n' % str(e))
return
# extract the option and create a grub config without it
sysvals.rootUser(True)
tgtopt = 'GRUB_CMDLINE_LINUX_DEFAULT'
cmdline = ''
grubfile = '/etc/default/grub'
tempfile = '/etc/default/grub.analyze_boot'
shutil.move(grubfile, tempfile)
res = -1
try:
fp = open(tempfile, 'r')
op = open(grubfile, 'w')
cont = False
for line in fp:
line = line.strip()
if len(line) == 0 or line[0] == '#':
continue
opt = line.split('=')[0].strip()
if opt == tgtopt:
cmdline = line.split('=', 1)[1].strip('\\')
if line[-1] == '\\':
cont = True
elif cont:
cmdline += line.strip('\\')
if line[-1] != '\\':
cont = False
else:
op.write('%s\n' % line)
fp.close()
# if the target option value is in quotes, strip them
sp = '"'
val = cmdline.strip()
if val and (val[0] == '\'' or val[0] == '"'):
sp = val[0]
val = val.strip(sp)
cmdline = val
# append our cmd line options
if len(cmdline) > 0:
cmdline += ' '
cmdline += sysvals.kernelParams()
# write out the updated target option
op.write('\n%s=%s%s%s\n' % (tgtopt, sp, cmdline, sp))
op.close()
res = call(sysvals.blexec)
os.remove(grubfile)
except Exception as e:
pprint('Exception: %s' % str(e))
res = -1
# cleanup
shutil.move(tempfile, grubfile)
if res != 0:
doError('update grub failed')
# Function: updateKernelParams
# Description:
# update boot conf for all kernels with our parameters
def updateKernelParams(restore=False):
# find the boot loader
sysvals.getBootLoader()
if sysvals.bootloader == 'grub':
updateGrub(restore)
# Function: doError Description:
# generic error function for catastrphic failures
# Arguments:
# msg: the error message to print
# help: True if printHelp should be called after, False otherwise
def doError(msg, help=False):
if help == True:
printHelp()
pprint('ERROR: %s\n' % msg)
sysvals.outputResult({'error':msg})
sys.exit()
# Function: printHelp
# Description:
# print out the help text
def printHelp():
pprint('\n%s v%s\n'\
'Usage: bootgraph <options> <command>\n'\
'\n'\
'Description:\n'\
' This tool reads in a dmesg log of linux kernel boot and\n'\
' creates an html representation of the boot timeline up to\n'\
' the start of the init process.\n'\
'\n'\
' If no specific command is given the tool reads the current dmesg\n'\
' and/or ftrace log and creates a timeline\n'\
'\n'\
' Generates output files in subdirectory: boot-yymmdd-HHMMSS\n'\
' HTML output: <hostname>_boot.html\n'\
' raw dmesg output: <hostname>_boot_dmesg.txt\n'\
' raw ftrace output: <hostname>_boot_ftrace.txt\n'\
'\n'\
'Options:\n'\
' -h Print this help text\n'\
' -v Print the current tool version\n'\
' -verbose Print extra information during execution and analysis\n'\
' -addlogs Add the dmesg log to the html output\n'\
' -result fn Export a results table to a text file for parsing.\n'\
' -o name Overrides the output subdirectory name when running a new test\n'\
' default: boot-{date}-{time}\n'\
' [advanced]\n'\
' -fstat Use ftrace to add function detail and statistics (default: disabled)\n'\
' -f/-callgraph Add callgraph detail, can be very large (default: disabled)\n'\
' -maxdepth N limit the callgraph data to N call levels (default: 2)\n'\
' -mincg ms Discard all callgraphs shorter than ms milliseconds (e.g. 0.001 for us)\n'\
' -timeprec N Number of significant digits in timestamps (0:S, 3:ms, [6:us])\n'\
' -expandcg pre-expand the callgraph data in the html output (default: disabled)\n'\
' -func list Limit ftrace to comma-delimited list of functions (default: do_one_initcall)\n'\
' -cgfilter S Filter the callgraph output in the timeline\n'\
' -cgskip file Callgraph functions to skip, off to disable (default: cgskip.txt)\n'\
' -bl name Use the following boot loader for kernel params (default: grub)\n'\
' -reboot Reboot the machine automatically and generate a new timeline\n'\
' -manual Show the steps to generate a new timeline manually (used with -reboot)\n'\
'\n'\
'Other commands:\n'\
' -flistall Print all functions capable of being captured in ftrace\n'\
' -sysinfo Print out system info extracted from BIOS\n'\
' -which exec Print an executable path, should function even without PATH\n'\
' [redo]\n'\
' -dmesg file Create HTML output using dmesg input (used with -ftrace)\n'\
' -ftrace file Create HTML output using ftrace input (used with -dmesg)\n'\
'' % (sysvals.title, sysvals.version))
return True
# ----------------- MAIN --------------------
# exec start (skipped if script is loaded as library)
if __name__ == '__main__':
# loop through the command line arguments
cmd = ''
testrun = True
switchoff = ['disable', 'off', 'false', '0']
simplecmds = ['-sysinfo', '-kpupdate', '-flistall', '-checkbl']
cgskip = ''
if '-f' in sys.argv:
cgskip = sysvals.configFile('cgskip.txt')
args = iter(sys.argv[1:])
mdset = False
for arg in args:
if(arg == '-h'):
printHelp()
sys.exit()
elif(arg == '-v'):
pprint("Version %s" % sysvals.version)
sys.exit()
elif(arg == '-verbose'):
sysvals.verbose = True
elif(arg in simplecmds):
cmd = arg[1:]
elif(arg == '-fstat'):
sysvals.useftrace = True
elif(arg == '-callgraph' or arg == '-f'):
sysvals.useftrace = True
sysvals.usecallgraph = True
elif(arg == '-cgdump'):
sysvals.cgdump = True
elif(arg == '-mincg'):
sysvals.mincglen = aslib.getArgFloat('-mincg', args, 0.0, 10000.0)
elif(arg == '-cgfilter'):
try:
val = next(args)
except:
doError('No callgraph functions supplied', True)
sysvals.setCallgraphFilter(val)
elif(arg == '-cgskip'):
try:
val = next(args)
except:
doError('No file supplied', True)
if val.lower() in switchoff:
cgskip = ''
else:
cgskip = sysvals.configFile(val)
if(not cgskip):
doError('%s does not exist' % cgskip)
elif(arg == '-bl'):
try:
val = next(args)
except:
doError('No boot loader name supplied', True)
if val.lower() not in ['grub']:
doError('Unknown boot loader: %s' % val, True)
sysvals.bootloader = val.lower()
elif(arg == '-timeprec'):
sysvals.setPrecision(aslib.getArgInt('-timeprec', args, 0, 6))
elif(arg == '-maxdepth'):
mdset = True
sysvals.max_graph_depth = aslib.getArgInt('-maxdepth', args, 0, 1000)
elif(arg == '-func'):
try:
val = next(args)
except:
doError('No filter functions supplied', True)
sysvals.useftrace = True
sysvals.usecallgraph = True
sysvals.rootCheck(True)
sysvals.setGraphFilter(val)
elif(arg == '-ftrace'):
try:
val = next(args)
except:
doError('No ftrace file supplied', True)
if(os.path.exists(val) == False):
doError('%s does not exist' % val)
testrun = False
sysvals.ftracefile = val
elif(arg == '-addlogs'):
sysvals.dmesglog = True
elif(arg == '-expandcg'):
sysvals.cgexp = True
elif(arg == '-dmesg'):
try:
val = next(args)
except:
doError('No dmesg file supplied', True)
if(os.path.exists(val) == False):
doError('%s does not exist' % val)
testrun = False
sysvals.dmesgfile = val
elif(arg == '-o'):
try:
val = next(args)
except:
doError('No subdirectory name supplied', True)
sysvals.testdir = sysvals.setOutputFolder(val)
elif(arg == '-result'):
try:
val = next(args)
except:
doError('No result file supplied', True)
sysvals.result = val
elif(arg == '-reboot'):
sysvals.reboot = True
elif(arg == '-manual'):
sysvals.reboot = True
sysvals.manual = True
# remaining options are only for cron job use
elif(arg == '-cronjob'):
sysvals.iscronjob = True
elif(arg == '-which'):
try:
val = next(args)
except:
doError('No executable supplied', True)
out = sysvals.getExec(val)
if not out:
print('%s not found' % val)
sys.exit(1)
print(out)
sys.exit(0)
else:
doError('Invalid argument: '+arg, True)
# compatibility errors and access checks
if(sysvals.iscronjob and (sysvals.reboot or \
sysvals.dmesgfile or sysvals.ftracefile or cmd)):
doError('-cronjob is meant for batch purposes only')
if(sysvals.reboot and (sysvals.dmesgfile or sysvals.ftracefile)):
doError('-reboot and -dmesg/-ftrace are incompatible')
if cmd or sysvals.reboot or sysvals.iscronjob or testrun:
sysvals.rootCheck(True)
if (testrun and sysvals.useftrace) or cmd == 'flistall':
if not sysvals.verifyFtrace():
doError('Ftrace is not properly enabled')
# run utility commands
sysvals.cpuInfo()
if cmd != '':
if cmd == 'kpupdate':
updateKernelParams()
elif cmd == 'flistall':
for f in sysvals.getBootFtraceFilterFunctions():
print(f)
elif cmd == 'checkbl':
sysvals.getBootLoader()
pprint('Boot Loader: %s\n%s' % (sysvals.bootloader, sysvals.blexec))
elif(cmd == 'sysinfo'):
sysvals.printSystemInfo(True)
sys.exit()
# reboot: update grub, setup a cronjob, and reboot
if sysvals.reboot:
if (sysvals.useftrace or sysvals.usecallgraph) and \
not sysvals.checkFtraceKernelVersion():
doError('Ftrace functionality requires kernel v4.10 or newer')
if not sysvals.manual:
updateKernelParams()
updateCron()
call('reboot')
else:
sysvals.manualRebootRequired()
sys.exit()
if sysvals.usecallgraph and cgskip:
sysvals.vprint('Using cgskip file: %s' % cgskip)
sysvals.setCallgraphBlacklist(cgskip)
# cronjob: remove the cronjob, grub changes, and disable ftrace
if sysvals.iscronjob:
updateCron(True)
updateKernelParams(True)
try:
sysvals.fsetVal('0', 'tracing_on')
except:
pass
# testrun: generate copies of the logs
if testrun:
retrieveLogs()
else:
sysvals.setOutputFile()
# process the log data
if sysvals.dmesgfile:
if not mdset:
sysvals.max_graph_depth = 0
data = parseKernelLog()
if(not data.valid):
doError('No initcall data found in %s' % sysvals.dmesgfile)
if sysvals.useftrace and sysvals.ftracefile:
parseTraceLog(data)
if sysvals.cgdump:
data.debugPrint()
sys.exit()
else:
doError('dmesg file required')
sysvals.vprint('Creating the html timeline (%s)...' % sysvals.htmlfile)
sysvals.vprint('Command:\n %s' % sysvals.cmdline)
sysvals.vprint('Kernel parameters:\n %s' % sysvals.kparams)
data.printDetails()
createBootGraph(data)
# if running as root, change output dir owner to sudo_user
if testrun and os.path.isdir(sysvals.testdir) and \
os.getuid() == 0 and 'SUDO_USER' in os.environ:
cmd = 'chown -R {0}:{0} {1} > /dev/null 2>&1'
call(cmd.format(os.environ['SUDO_USER'], sysvals.testdir), shell=True)
sysvals.stamp['boot'] = (data.tUserMode - data.start) * 1000
sysvals.stamp['lastinit'] = data.end * 1000
sysvals.outputResult(sysvals.stamp)
| grace-kernel-grace-kernel-6.1.y | tools/power/pm-graph/bootgraph.py |
#!/usr/bin/env python3
# SPDX-License-Identifier: GPL-2.0-only
#
# Tool for analyzing suspend/resume timing
# Copyright (c) 2013, Intel Corporation.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms and conditions of the GNU General Public License,
# version 2, as published by the Free Software Foundation.
#
# This program is distributed in the hope it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# Authors:
# Todd Brandt <[email protected]>
#
# Links:
# Home Page
# https://01.org/pm-graph
# Source repo
# [email protected]:intel/pm-graph
#
# Description:
# This tool is designed to assist kernel and OS developers in optimizing
# their linux stack's suspend/resume time. Using a kernel image built
# with a few extra options enabled, the tool will execute a suspend and
# will capture dmesg and ftrace data until resume is complete. This data
# is transformed into a device timeline and a callgraph to give a quick
# and detailed view of which devices and callbacks are taking the most
# time in suspend/resume. The output is a single html file which can be
# viewed in firefox or chrome.
#
# The following kernel build options are required:
# CONFIG_DEVMEM=y
# CONFIG_PM_DEBUG=y
# CONFIG_PM_SLEEP_DEBUG=y
# CONFIG_FTRACE=y
# CONFIG_FUNCTION_TRACER=y
# CONFIG_FUNCTION_GRAPH_TRACER=y
# CONFIG_KPROBES=y
# CONFIG_KPROBES_ON_FTRACE=y
#
# For kernel versions older than 3.15:
# The following additional kernel parameters are required:
# (e.g. in file /etc/default/grub)
# GRUB_CMDLINE_LINUX_DEFAULT="... initcall_debug log_buf_len=16M ..."
#
# ----------------- LIBRARIES --------------------
import sys
import time
import os
import string
import re
import platform
import signal
import codecs
from datetime import datetime, timedelta
import struct
import configparser
import gzip
from threading import Thread
from subprocess import call, Popen, PIPE
import base64
debugtiming = False
mystarttime = time.time()
def pprint(msg):
if debugtiming:
print('[%09.3f] %s' % (time.time()-mystarttime, msg))
else:
print(msg)
sys.stdout.flush()
def ascii(text):
return text.decode('ascii', 'ignore')
# ----------------- CLASSES --------------------
# Class: SystemValues
# Description:
# A global, single-instance container used to
# store system values and test parameters
class SystemValues:
title = 'SleepGraph'
version = '5.10'
ansi = False
rs = 0
display = ''
gzip = False
sync = False
wifi = False
netfix = False
verbose = False
testlog = True
dmesglog = True
ftracelog = False
acpidebug = True
tstat = True
wifitrace = False
mindevlen = 0.0001
mincglen = 0.0
cgphase = ''
cgtest = -1
cgskip = ''
maxfail = 0
multitest = {'run': False, 'count': 1000000, 'delay': 0}
max_graph_depth = 0
callloopmaxgap = 0.0001
callloopmaxlen = 0.005
bufsize = 0
cpucount = 0
memtotal = 204800
memfree = 204800
osversion = ''
srgap = 0
cgexp = False
testdir = ''
outdir = ''
tpath = '/sys/kernel/debug/tracing/'
fpdtpath = '/sys/firmware/acpi/tables/FPDT'
epath = '/sys/kernel/debug/tracing/events/power/'
pmdpath = '/sys/power/pm_debug_messages'
s0ixpath = '/sys/module/intel_pmc_core/parameters/warn_on_s0ix_failures'
s0ixres = '/sys/devices/system/cpu/cpuidle/low_power_idle_system_residency_us'
acpipath='/sys/module/acpi/parameters/debug_level'
traceevents = [
'suspend_resume',
'wakeup_source_activate',
'wakeup_source_deactivate',
'device_pm_callback_end',
'device_pm_callback_start'
]
logmsg = ''
testcommand = ''
mempath = '/dev/mem'
powerfile = '/sys/power/state'
mempowerfile = '/sys/power/mem_sleep'
diskpowerfile = '/sys/power/disk'
suspendmode = 'mem'
memmode = ''
diskmode = ''
hostname = 'localhost'
prefix = 'test'
teststamp = ''
sysstamp = ''
dmesgstart = 0.0
dmesgfile = ''
ftracefile = ''
htmlfile = 'output.html'
result = ''
rtcwake = True
rtcwaketime = 15
rtcpath = ''
devicefilter = []
cgfilter = []
stamp = 0
execcount = 1
x2delay = 0
skiphtml = False
usecallgraph = False
ftopfunc = 'pm_suspend'
ftop = False
usetraceevents = False
usetracemarkers = True
useftrace = True
usekprobes = True
usedevsrc = False
useprocmon = False
notestrun = False
cgdump = False
devdump = False
mixedphaseheight = True
devprops = dict()
cfgdef = dict()
platinfo = []
predelay = 0
postdelay = 0
tmstart = 'SUSPEND START %Y%m%d-%H:%M:%S.%f'
tmend = 'RESUME COMPLETE %Y%m%d-%H:%M:%S.%f'
tracefuncs = {
'async_synchronize_full': {},
'sys_sync': {},
'ksys_sync': {},
'__pm_notifier_call_chain': {},
'pm_prepare_console': {},
'pm_notifier_call_chain': {},
'freeze_processes': {},
'freeze_kernel_threads': {},
'pm_restrict_gfp_mask': {},
'acpi_suspend_begin': {},
'acpi_hibernation_begin': {},
'acpi_hibernation_enter': {},
'acpi_hibernation_leave': {},
'acpi_pm_freeze': {},
'acpi_pm_thaw': {},
'acpi_s2idle_end': {},
'acpi_s2idle_sync': {},
'acpi_s2idle_begin': {},
'acpi_s2idle_prepare': {},
'acpi_s2idle_prepare_late': {},
'acpi_s2idle_wake': {},
'acpi_s2idle_wakeup': {},
'acpi_s2idle_restore': {},
'acpi_s2idle_restore_early': {},
'hibernate_preallocate_memory': {},
'create_basic_memory_bitmaps': {},
'swsusp_write': {},
'suspend_console': {},
'acpi_pm_prepare': {},
'syscore_suspend': {},
'arch_enable_nonboot_cpus_end': {},
'syscore_resume': {},
'acpi_pm_finish': {},
'resume_console': {},
'acpi_pm_end': {},
'pm_restore_gfp_mask': {},
'thaw_processes': {},
'pm_restore_console': {},
'CPU_OFF': {
'func':'_cpu_down',
'args_x86_64': {'cpu':'%di:s32'},
'format': 'CPU_OFF[{cpu}]'
},
'CPU_ON': {
'func':'_cpu_up',
'args_x86_64': {'cpu':'%di:s32'},
'format': 'CPU_ON[{cpu}]'
},
}
dev_tracefuncs = {
# general wait/delay/sleep
'msleep': { 'args_x86_64': {'time':'%di:s32'}, 'ub': 1 },
'schedule_timeout': { 'args_x86_64': {'timeout':'%di:s32'}, 'ub': 1 },
'udelay': { 'func':'__const_udelay', 'args_x86_64': {'loops':'%di:s32'}, 'ub': 1 },
'usleep_range': { 'args_x86_64': {'min':'%di:s32', 'max':'%si:s32'}, 'ub': 1 },
'mutex_lock_slowpath': { 'func':'__mutex_lock_slowpath', 'ub': 1 },
'acpi_os_stall': {'ub': 1},
'rt_mutex_slowlock': {'ub': 1},
# ACPI
'acpi_resume_power_resources': {},
'acpi_ps_execute_method': { 'args_x86_64': {
'fullpath':'+0(+40(%di)):string',
}},
# mei_me
'mei_reset': {},
# filesystem
'ext4_sync_fs': {},
# 80211
'ath10k_bmi_read_memory': { 'args_x86_64': {'length':'%cx:s32'} },
'ath10k_bmi_write_memory': { 'args_x86_64': {'length':'%cx:s32'} },
'ath10k_bmi_fast_download': { 'args_x86_64': {'length':'%cx:s32'} },
'iwlagn_mac_start': {},
'iwlagn_alloc_bcast_station': {},
'iwl_trans_pcie_start_hw': {},
'iwl_trans_pcie_start_fw': {},
'iwl_run_init_ucode': {},
'iwl_load_ucode_wait_alive': {},
'iwl_alive_start': {},
'iwlagn_mac_stop': {},
'iwlagn_mac_suspend': {},
'iwlagn_mac_resume': {},
'iwlagn_mac_add_interface': {},
'iwlagn_mac_remove_interface': {},
'iwlagn_mac_change_interface': {},
'iwlagn_mac_config': {},
'iwlagn_configure_filter': {},
'iwlagn_mac_hw_scan': {},
'iwlagn_bss_info_changed': {},
'iwlagn_mac_channel_switch': {},
'iwlagn_mac_flush': {},
# ATA
'ata_eh_recover': { 'args_x86_64': {'port':'+36(%di):s32'} },
# i915
'i915_gem_resume': {},
'i915_restore_state': {},
'intel_opregion_setup': {},
'g4x_pre_enable_dp': {},
'vlv_pre_enable_dp': {},
'chv_pre_enable_dp': {},
'g4x_enable_dp': {},
'vlv_enable_dp': {},
'intel_hpd_init': {},
'intel_opregion_register': {},
'intel_dp_detect': {},
'intel_hdmi_detect': {},
'intel_opregion_init': {},
'intel_fbdev_set_suspend': {},
}
infocmds = [
[0, 'sysinfo', 'uname', '-a'],
[0, 'cpuinfo', 'head', '-7', '/proc/cpuinfo'],
[0, 'kparams', 'cat', '/proc/cmdline'],
[0, 'mcelog', 'mcelog'],
[0, 'pcidevices', 'lspci', '-tv'],
[0, 'usbdevices', 'lsusb', '-tv'],
[0, 'acpidevices', 'sh', '-c', 'ls -l /sys/bus/acpi/devices/*/physical_node'],
[0, 's0ix_require', 'cat', '/sys/kernel/debug/pmc_core/substate_requirements'],
[0, 's0ix_debug', 'cat', '/sys/kernel/debug/pmc_core/slp_s0_debug_status'],
[1, 's0ix_residency', 'cat', '/sys/kernel/debug/pmc_core/slp_s0_residency_usec'],
[1, 'interrupts', 'cat', '/proc/interrupts'],
[1, 'wakeups', 'cat', '/sys/kernel/debug/wakeup_sources'],
[2, 'gpecounts', 'sh', '-c', 'grep -v invalid /sys/firmware/acpi/interrupts/*'],
[2, 'suspendstats', 'sh', '-c', 'grep -v invalid /sys/power/suspend_stats/*'],
[2, 'cpuidle', 'sh', '-c', 'grep -v invalid /sys/devices/system/cpu/cpu*/cpuidle/state*/s2idle/*'],
[2, 'battery', 'sh', '-c', 'grep -v invalid /sys/class/power_supply/*/*'],
[2, 'thermal', 'sh', '-c', 'grep . /sys/class/thermal/thermal_zone*/temp'],
]
cgblacklist = []
kprobes = dict()
timeformat = '%.3f'
cmdline = '%s %s' % \
(os.path.basename(sys.argv[0]), ' '.join(sys.argv[1:]))
sudouser = ''
def __init__(self):
self.archargs = 'args_'+platform.machine()
self.hostname = platform.node()
if(self.hostname == ''):
self.hostname = 'localhost'
rtc = "rtc0"
if os.path.exists('/dev/rtc'):
rtc = os.readlink('/dev/rtc')
rtc = '/sys/class/rtc/'+rtc
if os.path.exists(rtc) and os.path.exists(rtc+'/date') and \
os.path.exists(rtc+'/time') and os.path.exists(rtc+'/wakealarm'):
self.rtcpath = rtc
if (hasattr(sys.stdout, 'isatty') and sys.stdout.isatty()):
self.ansi = True
self.testdir = datetime.now().strftime('suspend-%y%m%d-%H%M%S')
if os.getuid() == 0 and 'SUDO_USER' in os.environ and \
os.environ['SUDO_USER']:
self.sudouser = os.environ['SUDO_USER']
def resetlog(self):
self.logmsg = ''
self.platinfo = []
def vprint(self, msg):
self.logmsg += msg+'\n'
if self.verbose or msg.startswith('WARNING:'):
pprint(msg)
def signalHandler(self, signum, frame):
if not self.result:
return
signame = self.signames[signum] if signum in self.signames else 'UNKNOWN'
msg = 'Signal %s caused a tool exit, line %d' % (signame, frame.f_lineno)
self.outputResult({'error':msg})
sys.exit(3)
def signalHandlerInit(self):
capture = ['BUS', 'SYS', 'XCPU', 'XFSZ', 'PWR', 'HUP', 'INT', 'QUIT',
'ILL', 'ABRT', 'FPE', 'SEGV', 'TERM']
self.signames = dict()
for i in capture:
s = 'SIG'+i
try:
signum = getattr(signal, s)
signal.signal(signum, self.signalHandler)
except:
continue
self.signames[signum] = s
def rootCheck(self, fatal=True):
if(os.access(self.powerfile, os.W_OK)):
return True
if fatal:
msg = 'This command requires sysfs mount and root access'
pprint('ERROR: %s\n' % msg)
self.outputResult({'error':msg})
sys.exit(1)
return False
def rootUser(self, fatal=False):
if 'USER' in os.environ and os.environ['USER'] == 'root':
return True
if fatal:
msg = 'This command must be run as root'
pprint('ERROR: %s\n' % msg)
self.outputResult({'error':msg})
sys.exit(1)
return False
def usable(self, file, ishtml=False):
if not os.path.exists(file) or os.path.getsize(file) < 1:
return False
if ishtml:
try:
fp = open(file, 'r')
res = fp.read(1000)
fp.close()
except:
return False
if '<html>' not in res:
return False
return True
def getExec(self, cmd):
try:
fp = Popen(['which', cmd], stdout=PIPE, stderr=PIPE).stdout
out = ascii(fp.read()).strip()
fp.close()
except:
out = ''
if out:
return out
for path in ['/sbin', '/bin', '/usr/sbin', '/usr/bin',
'/usr/local/sbin', '/usr/local/bin']:
cmdfull = os.path.join(path, cmd)
if os.path.exists(cmdfull):
return cmdfull
return out
def setPrecision(self, num):
if num < 0 or num > 6:
return
self.timeformat = '%.{0}f'.format(num)
def setOutputFolder(self, value):
args = dict()
n = datetime.now()
args['date'] = n.strftime('%y%m%d')
args['time'] = n.strftime('%H%M%S')
args['hostname'] = args['host'] = self.hostname
args['mode'] = self.suspendmode
return value.format(**args)
def setOutputFile(self):
if self.dmesgfile != '':
m = re.match('(?P<name>.*)_dmesg\.txt.*', self.dmesgfile)
if(m):
self.htmlfile = m.group('name')+'.html'
if self.ftracefile != '':
m = re.match('(?P<name>.*)_ftrace\.txt.*', self.ftracefile)
if(m):
self.htmlfile = m.group('name')+'.html'
def systemInfo(self, info):
p = m = ''
if 'baseboard-manufacturer' in info:
m = info['baseboard-manufacturer']
elif 'system-manufacturer' in info:
m = info['system-manufacturer']
if 'system-product-name' in info:
p = info['system-product-name']
elif 'baseboard-product-name' in info:
p = info['baseboard-product-name']
if m[:5].lower() == 'intel' and 'baseboard-product-name' in info:
p = info['baseboard-product-name']
c = info['processor-version'] if 'processor-version' in info else ''
b = info['bios-version'] if 'bios-version' in info else ''
r = info['bios-release-date'] if 'bios-release-date' in info else ''
self.sysstamp = '# sysinfo | man:%s | plat:%s | cpu:%s | bios:%s | biosdate:%s | numcpu:%d | memsz:%d | memfr:%d' % \
(m, p, c, b, r, self.cpucount, self.memtotal, self.memfree)
if self.osversion:
self.sysstamp += ' | os:%s' % self.osversion
def printSystemInfo(self, fatal=False):
self.rootCheck(True)
out = dmidecode(self.mempath, fatal)
if len(out) < 1:
return
fmt = '%-24s: %s'
if self.osversion:
print(fmt % ('os-version', self.osversion))
for name in sorted(out):
print(fmt % (name, out[name]))
print(fmt % ('cpucount', ('%d' % self.cpucount)))
print(fmt % ('memtotal', ('%d kB' % self.memtotal)))
print(fmt % ('memfree', ('%d kB' % self.memfree)))
def cpuInfo(self):
self.cpucount = 0
if os.path.exists('/proc/cpuinfo'):
with open('/proc/cpuinfo', 'r') as fp:
for line in fp:
if re.match('^processor[ \t]*:[ \t]*[0-9]*', line):
self.cpucount += 1
if os.path.exists('/proc/meminfo'):
with open('/proc/meminfo', 'r') as fp:
for line in fp:
m = re.match('^MemTotal:[ \t]*(?P<sz>[0-9]*) *kB', line)
if m:
self.memtotal = int(m.group('sz'))
m = re.match('^MemFree:[ \t]*(?P<sz>[0-9]*) *kB', line)
if m:
self.memfree = int(m.group('sz'))
if os.path.exists('/etc/os-release'):
with open('/etc/os-release', 'r') as fp:
for line in fp:
if line.startswith('PRETTY_NAME='):
self.osversion = line[12:].strip().replace('"', '')
def initTestOutput(self, name):
self.prefix = self.hostname
v = open('/proc/version', 'r').read().strip()
kver = v.split()[2]
fmt = name+'-%m%d%y-%H%M%S'
testtime = datetime.now().strftime(fmt)
self.teststamp = \
'# '+testtime+' '+self.prefix+' '+self.suspendmode+' '+kver
ext = ''
if self.gzip:
ext = '.gz'
self.dmesgfile = \
self.testdir+'/'+self.prefix+'_'+self.suspendmode+'_dmesg.txt'+ext
self.ftracefile = \
self.testdir+'/'+self.prefix+'_'+self.suspendmode+'_ftrace.txt'+ext
self.htmlfile = \
self.testdir+'/'+self.prefix+'_'+self.suspendmode+'.html'
if not os.path.isdir(self.testdir):
os.makedirs(self.testdir)
self.sudoUserchown(self.testdir)
def getValueList(self, value):
out = []
for i in value.split(','):
if i.strip():
out.append(i.strip())
return out
def setDeviceFilter(self, value):
self.devicefilter = self.getValueList(value)
def setCallgraphFilter(self, value):
self.cgfilter = self.getValueList(value)
def skipKprobes(self, value):
for k in self.getValueList(value):
if k in self.tracefuncs:
del self.tracefuncs[k]
if k in self.dev_tracefuncs:
del self.dev_tracefuncs[k]
def setCallgraphBlacklist(self, file):
self.cgblacklist = self.listFromFile(file)
def rtcWakeAlarmOn(self):
call('echo 0 > '+self.rtcpath+'/wakealarm', shell=True)
nowtime = open(self.rtcpath+'/since_epoch', 'r').read().strip()
if nowtime:
nowtime = int(nowtime)
else:
# if hardware time fails, use the software time
nowtime = int(datetime.now().strftime('%s'))
alarm = nowtime + self.rtcwaketime
call('echo %d > %s/wakealarm' % (alarm, self.rtcpath), shell=True)
def rtcWakeAlarmOff(self):
call('echo 0 > %s/wakealarm' % self.rtcpath, shell=True)
def initdmesg(self):
# get the latest time stamp from the dmesg log
lines = Popen('dmesg', stdout=PIPE).stdout.readlines()
ktime = '0'
for line in reversed(lines):
line = ascii(line).replace('\r\n', '')
idx = line.find('[')
if idx > 1:
line = line[idx:]
m = re.match('[ \t]*(\[ *)(?P<ktime>[0-9\.]*)(\]) (?P<msg>.*)', line)
if(m):
ktime = m.group('ktime')
break
self.dmesgstart = float(ktime)
def getdmesg(self, testdata):
op = self.writeDatafileHeader(self.dmesgfile, testdata)
# store all new dmesg lines since initdmesg was called
fp = Popen('dmesg', stdout=PIPE).stdout
for line in fp:
line = ascii(line).replace('\r\n', '')
idx = line.find('[')
if idx > 1:
line = line[idx:]
m = re.match('[ \t]*(\[ *)(?P<ktime>[0-9\.]*)(\]) (?P<msg>.*)', line)
if(not m):
continue
ktime = float(m.group('ktime'))
if ktime > self.dmesgstart:
op.write(line)
fp.close()
op.close()
def listFromFile(self, file):
list = []
fp = open(file)
for i in fp.read().split('\n'):
i = i.strip()
if i and i[0] != '#':
list.append(i)
fp.close()
return list
def addFtraceFilterFunctions(self, file):
for i in self.listFromFile(file):
if len(i) < 2:
continue
self.tracefuncs[i] = dict()
def getFtraceFilterFunctions(self, current):
self.rootCheck(True)
if not current:
call('cat '+self.tpath+'available_filter_functions', shell=True)
return
master = self.listFromFile(self.tpath+'available_filter_functions')
for i in sorted(self.tracefuncs):
if 'func' in self.tracefuncs[i]:
i = self.tracefuncs[i]['func']
if i in master:
print(i)
else:
print(self.colorText(i))
def setFtraceFilterFunctions(self, list):
master = self.listFromFile(self.tpath+'available_filter_functions')
flist = ''
for i in list:
if i not in master:
continue
if ' [' in i:
flist += i.split(' ')[0]+'\n'
else:
flist += i+'\n'
fp = open(self.tpath+'set_graph_function', 'w')
fp.write(flist)
fp.close()
def basicKprobe(self, name):
self.kprobes[name] = {'name': name,'func': name,'args': dict(),'format': name}
def defaultKprobe(self, name, kdata):
k = kdata
for field in ['name', 'format', 'func']:
if field not in k:
k[field] = name
if self.archargs in k:
k['args'] = k[self.archargs]
else:
k['args'] = dict()
k['format'] = name
self.kprobes[name] = k
def kprobeColor(self, name):
if name not in self.kprobes or 'color' not in self.kprobes[name]:
return ''
return self.kprobes[name]['color']
def kprobeDisplayName(self, name, dataraw):
if name not in self.kprobes:
self.basicKprobe(name)
data = ''
quote=0
# first remvoe any spaces inside quotes, and the quotes
for c in dataraw:
if c == '"':
quote = (quote + 1) % 2
if quote and c == ' ':
data += '_'
elif c != '"':
data += c
fmt, args = self.kprobes[name]['format'], self.kprobes[name]['args']
arglist = dict()
# now process the args
for arg in sorted(args):
arglist[arg] = ''
m = re.match('.* '+arg+'=(?P<arg>.*) ', data);
if m:
arglist[arg] = m.group('arg')
else:
m = re.match('.* '+arg+'=(?P<arg>.*)', data);
if m:
arglist[arg] = m.group('arg')
out = fmt.format(**arglist)
out = out.replace(' ', '_').replace('"', '')
return out
def kprobeText(self, kname, kprobe):
name = fmt = func = kname
args = dict()
if 'name' in kprobe:
name = kprobe['name']
if 'format' in kprobe:
fmt = kprobe['format']
if 'func' in kprobe:
func = kprobe['func']
if self.archargs in kprobe:
args = kprobe[self.archargs]
if 'args' in kprobe:
args = kprobe['args']
if re.findall('{(?P<n>[a-z,A-Z,0-9]*)}', func):
doError('Kprobe "%s" has format info in the function name "%s"' % (name, func))
for arg in re.findall('{(?P<n>[a-z,A-Z,0-9]*)}', fmt):
if arg not in args:
doError('Kprobe "%s" is missing argument "%s"' % (name, arg))
val = 'p:%s_cal %s' % (name, func)
for i in sorted(args):
val += ' %s=%s' % (i, args[i])
val += '\nr:%s_ret %s $retval\n' % (name, func)
return val
def addKprobes(self, output=False):
if len(self.kprobes) < 1:
return
if output:
pprint(' kprobe functions in this kernel:')
# first test each kprobe
rejects = []
# sort kprobes: trace, ub-dev, custom, dev
kpl = [[], [], [], []]
linesout = len(self.kprobes)
for name in sorted(self.kprobes):
res = self.colorText('YES', 32)
if not self.testKprobe(name, self.kprobes[name]):
res = self.colorText('NO')
rejects.append(name)
else:
if name in self.tracefuncs:
kpl[0].append(name)
elif name in self.dev_tracefuncs:
if 'ub' in self.dev_tracefuncs[name]:
kpl[1].append(name)
else:
kpl[3].append(name)
else:
kpl[2].append(name)
if output:
pprint(' %s: %s' % (name, res))
kplist = kpl[0] + kpl[1] + kpl[2] + kpl[3]
# remove all failed ones from the list
for name in rejects:
self.kprobes.pop(name)
# set the kprobes all at once
self.fsetVal('', 'kprobe_events')
kprobeevents = ''
for kp in kplist:
kprobeevents += self.kprobeText(kp, self.kprobes[kp])
self.fsetVal(kprobeevents, 'kprobe_events')
if output:
check = self.fgetVal('kprobe_events')
linesack = (len(check.split('\n')) - 1) // 2
pprint(' kprobe functions enabled: %d/%d' % (linesack, linesout))
self.fsetVal('1', 'events/kprobes/enable')
def testKprobe(self, kname, kprobe):
self.fsetVal('0', 'events/kprobes/enable')
kprobeevents = self.kprobeText(kname, kprobe)
if not kprobeevents:
return False
try:
self.fsetVal(kprobeevents, 'kprobe_events')
check = self.fgetVal('kprobe_events')
except:
return False
linesout = len(kprobeevents.split('\n'))
linesack = len(check.split('\n'))
if linesack < linesout:
return False
return True
def setVal(self, val, file):
if not os.path.exists(file):
return False
try:
fp = open(file, 'wb', 0)
fp.write(val.encode())
fp.flush()
fp.close()
except:
return False
return True
def fsetVal(self, val, path):
if not self.useftrace:
return False
return self.setVal(val, self.tpath+path)
def getVal(self, file):
res = ''
if not os.path.exists(file):
return res
try:
fp = open(file, 'r')
res = fp.read()
fp.close()
except:
pass
return res
def fgetVal(self, path):
if not self.useftrace:
return ''
return self.getVal(self.tpath+path)
def cleanupFtrace(self):
if self.useftrace:
self.fsetVal('0', 'events/kprobes/enable')
self.fsetVal('', 'kprobe_events')
self.fsetVal('1024', 'buffer_size_kb')
def setupAllKprobes(self):
for name in self.tracefuncs:
self.defaultKprobe(name, self.tracefuncs[name])
for name in self.dev_tracefuncs:
self.defaultKprobe(name, self.dev_tracefuncs[name])
def isCallgraphFunc(self, name):
if len(self.tracefuncs) < 1 and self.suspendmode == 'command':
return True
for i in self.tracefuncs:
if 'func' in self.tracefuncs[i]:
f = self.tracefuncs[i]['func']
else:
f = i
if name == f:
return True
return False
def initFtrace(self, quiet=False):
if not self.useftrace:
return
if not quiet:
sysvals.printSystemInfo(False)
pprint('INITIALIZING FTRACE')
# turn trace off
self.fsetVal('0', 'tracing_on')
self.cleanupFtrace()
# set the trace clock to global
self.fsetVal('global', 'trace_clock')
self.fsetVal('nop', 'current_tracer')
# set trace buffer to an appropriate value
cpus = max(1, self.cpucount)
if self.bufsize > 0:
tgtsize = self.bufsize
elif self.usecallgraph or self.usedevsrc:
bmax = (1*1024*1024) if self.suspendmode in ['disk', 'command'] \
else (3*1024*1024)
tgtsize = min(self.memfree, bmax)
else:
tgtsize = 65536
while not self.fsetVal('%d' % (tgtsize // cpus), 'buffer_size_kb'):
# if the size failed to set, lower it and keep trying
tgtsize -= 65536
if tgtsize < 65536:
tgtsize = int(self.fgetVal('buffer_size_kb')) * cpus
break
self.vprint('Setting trace buffers to %d kB (%d kB per cpu)' % (tgtsize, tgtsize/cpus))
# initialize the callgraph trace
if(self.usecallgraph):
# set trace type
self.fsetVal('function_graph', 'current_tracer')
self.fsetVal('', 'set_ftrace_filter')
# temporary hack to fix https://bugzilla.kernel.org/show_bug.cgi?id=212761
fp = open(self.tpath+'set_ftrace_notrace', 'w')
fp.write('native_queued_spin_lock_slowpath\ndev_driver_string')
fp.close()
# set trace format options
self.fsetVal('print-parent', 'trace_options')
self.fsetVal('funcgraph-abstime', 'trace_options')
self.fsetVal('funcgraph-cpu', 'trace_options')
self.fsetVal('funcgraph-duration', 'trace_options')
self.fsetVal('funcgraph-proc', 'trace_options')
self.fsetVal('funcgraph-tail', 'trace_options')
self.fsetVal('nofuncgraph-overhead', 'trace_options')
self.fsetVal('context-info', 'trace_options')
self.fsetVal('graph-time', 'trace_options')
self.fsetVal('%d' % self.max_graph_depth, 'max_graph_depth')
cf = ['dpm_run_callback']
if(self.usetraceevents):
cf += ['dpm_prepare', 'dpm_complete']
for fn in self.tracefuncs:
if 'func' in self.tracefuncs[fn]:
cf.append(self.tracefuncs[fn]['func'])
else:
cf.append(fn)
if self.ftop:
self.setFtraceFilterFunctions([self.ftopfunc])
else:
self.setFtraceFilterFunctions(cf)
# initialize the kprobe trace
elif self.usekprobes:
for name in self.tracefuncs:
self.defaultKprobe(name, self.tracefuncs[name])
if self.usedevsrc:
for name in self.dev_tracefuncs:
self.defaultKprobe(name, self.dev_tracefuncs[name])
if not quiet:
pprint('INITIALIZING KPROBES')
self.addKprobes(self.verbose)
if(self.usetraceevents):
# turn trace events on
events = iter(self.traceevents)
for e in events:
self.fsetVal('1', 'events/power/'+e+'/enable')
# clear the trace buffer
self.fsetVal('', 'trace')
def verifyFtrace(self):
# files needed for any trace data
files = ['buffer_size_kb', 'current_tracer', 'trace', 'trace_clock',
'trace_marker', 'trace_options', 'tracing_on']
# files needed for callgraph trace data
tp = self.tpath
if(self.usecallgraph):
files += [
'available_filter_functions',
'set_ftrace_filter',
'set_graph_function'
]
for f in files:
if(os.path.exists(tp+f) == False):
return False
return True
def verifyKprobes(self):
# files needed for kprobes to work
files = ['kprobe_events', 'events']
tp = self.tpath
for f in files:
if(os.path.exists(tp+f) == False):
return False
return True
def colorText(self, str, color=31):
if not self.ansi:
return str
return '\x1B[%d;40m%s\x1B[m' % (color, str)
def writeDatafileHeader(self, filename, testdata):
fp = self.openlog(filename, 'w')
fp.write('%s\n%s\n# command | %s\n' % (self.teststamp, self.sysstamp, self.cmdline))
for test in testdata:
if 'fw' in test:
fw = test['fw']
if(fw):
fp.write('# fwsuspend %u fwresume %u\n' % (fw[0], fw[1]))
if 'turbo' in test:
fp.write('# turbostat %s\n' % test['turbo'])
if 'wifi' in test:
fp.write('# wifi %s\n' % test['wifi'])
if 'netfix' in test:
fp.write('# netfix %s\n' % test['netfix'])
if test['error'] or len(testdata) > 1:
fp.write('# enter_sleep_error %s\n' % test['error'])
return fp
def sudoUserchown(self, dir):
if os.path.exists(dir) and self.sudouser:
cmd = 'chown -R {0}:{0} {1} > /dev/null 2>&1'
call(cmd.format(self.sudouser, dir), shell=True)
def outputResult(self, testdata, num=0):
if not self.result:
return
n = ''
if num > 0:
n = '%d' % num
fp = open(self.result, 'a')
if 'error' in testdata:
fp.write('result%s: fail\n' % n)
fp.write('error%s: %s\n' % (n, testdata['error']))
else:
fp.write('result%s: pass\n' % n)
if 'mode' in testdata:
fp.write('mode%s: %s\n' % (n, testdata['mode']))
for v in ['suspend', 'resume', 'boot', 'lastinit']:
if v in testdata:
fp.write('%s%s: %.3f\n' % (v, n, testdata[v]))
for v in ['fwsuspend', 'fwresume']:
if v in testdata:
fp.write('%s%s: %.3f\n' % (v, n, testdata[v] / 1000000.0))
if 'bugurl' in testdata:
fp.write('url%s: %s\n' % (n, testdata['bugurl']))
fp.close()
self.sudoUserchown(self.result)
def configFile(self, file):
dir = os.path.dirname(os.path.realpath(__file__))
if os.path.exists(file):
return file
elif os.path.exists(dir+'/'+file):
return dir+'/'+file
elif os.path.exists(dir+'/config/'+file):
return dir+'/config/'+file
return ''
def openlog(self, filename, mode):
isgz = self.gzip
if mode == 'r':
try:
with gzip.open(filename, mode+'t') as fp:
test = fp.read(64)
isgz = True
except:
isgz = False
if isgz:
return gzip.open(filename, mode+'t')
return open(filename, mode)
def putlog(self, filename, text):
with self.openlog(filename, 'a') as fp:
fp.write(text)
fp.close()
def dlog(self, text):
if not self.dmesgfile:
return
self.putlog(self.dmesgfile, '# %s\n' % text)
def flog(self, text):
self.putlog(self.ftracefile, text)
def b64unzip(self, data):
try:
out = codecs.decode(base64.b64decode(data), 'zlib').decode()
except:
out = data
return out
def b64zip(self, data):
out = base64.b64encode(codecs.encode(data.encode(), 'zlib')).decode()
return out
def platforminfo(self, cmdafter):
# add platform info on to a completed ftrace file
if not os.path.exists(self.ftracefile):
return False
footer = '#\n'
# add test command string line if need be
if self.suspendmode == 'command' and self.testcommand:
footer += '# platform-testcmd: %s\n' % (self.testcommand)
# get a list of target devices from the ftrace file
props = dict()
tp = TestProps()
tf = self.openlog(self.ftracefile, 'r')
for line in tf:
if tp.stampInfo(line, self):
continue
# parse only valid lines, if this is not one move on
m = re.match(tp.ftrace_line_fmt, line)
if(not m or 'device_pm_callback_start' not in line):
continue
m = re.match('.*: (?P<drv>.*) (?P<d>.*), parent: *(?P<p>.*), .*', m.group('msg'));
if(not m):
continue
dev = m.group('d')
if dev not in props:
props[dev] = DevProps()
tf.close()
# now get the syspath for each target device
for dirname, dirnames, filenames in os.walk('/sys/devices'):
if(re.match('.*/power', dirname) and 'async' in filenames):
dev = dirname.split('/')[-2]
if dev in props and (not props[dev].syspath or len(dirname) < len(props[dev].syspath)):
props[dev].syspath = dirname[:-6]
# now fill in the properties for our target devices
for dev in sorted(props):
dirname = props[dev].syspath
if not dirname or not os.path.exists(dirname):
continue
props[dev].isasync = False
if os.path.exists(dirname+'/power/async'):
fp = open(dirname+'/power/async')
if 'enabled' in fp.read():
props[dev].isasync = True
fp.close()
fields = os.listdir(dirname)
for file in ['product', 'name', 'model', 'description', 'id', 'idVendor']:
if file not in fields:
continue
try:
with open(os.path.join(dirname, file), 'rb') as fp:
props[dev].altname = ascii(fp.read())
except:
continue
if file == 'idVendor':
idv, idp = props[dev].altname.strip(), ''
try:
with open(os.path.join(dirname, 'idProduct'), 'rb') as fp:
idp = ascii(fp.read()).strip()
except:
props[dev].altname = ''
break
props[dev].altname = '%s:%s' % (idv, idp)
break
if props[dev].altname:
out = props[dev].altname.strip().replace('\n', ' ')\
.replace(',', ' ').replace(';', ' ')
props[dev].altname = out
# add a devinfo line to the bottom of ftrace
out = ''
for dev in sorted(props):
out += props[dev].out(dev)
footer += '# platform-devinfo: %s\n' % self.b64zip(out)
# add a line for each of these commands with their outputs
for name, cmdline, info in cmdafter:
footer += '# platform-%s: %s | %s\n' % (name, cmdline, self.b64zip(info))
self.flog(footer)
return True
def commonPrefix(self, list):
if len(list) < 2:
return ''
prefix = list[0]
for s in list[1:]:
while s[:len(prefix)] != prefix and prefix:
prefix = prefix[:len(prefix)-1]
if not prefix:
break
if '/' in prefix and prefix[-1] != '/':
prefix = prefix[0:prefix.rfind('/')+1]
return prefix
def dictify(self, text, format):
out = dict()
header = True if format == 1 else False
delim = ' ' if format == 1 else ':'
for line in text.split('\n'):
if header:
header, out['@'] = False, line
continue
line = line.strip()
if delim in line:
data = line.split(delim, 1)
num = re.search(r'[\d]+', data[1])
if format == 2 and num:
out[data[0].strip()] = num.group()
else:
out[data[0].strip()] = data[1]
return out
def cmdinfo(self, begin, debug=False):
out = []
if begin:
self.cmd1 = dict()
for cargs in self.infocmds:
delta, name = cargs[0], cargs[1]
cmdline, cmdpath = ' '.join(cargs[2:]), self.getExec(cargs[2])
if not cmdpath or (begin and not delta):
continue
self.dlog('[%s]' % cmdline)
try:
fp = Popen([cmdpath]+cargs[3:], stdout=PIPE, stderr=PIPE).stdout
info = ascii(fp.read()).strip()
fp.close()
except:
continue
if not debug and begin:
self.cmd1[name] = self.dictify(info, delta)
elif not debug and delta and name in self.cmd1:
before, after = self.cmd1[name], self.dictify(info, delta)
dinfo = ('\t%s\n' % before['@']) if '@' in before and len(before) > 1 else ''
prefix = self.commonPrefix(list(before.keys()))
for key in sorted(before):
if key in after and before[key] != after[key]:
title = key.replace(prefix, '')
if delta == 2:
dinfo += '\t%s : %s -> %s\n' % \
(title, before[key].strip(), after[key].strip())
else:
dinfo += '%10s (start) : %s\n%10s (after) : %s\n' % \
(title, before[key], title, after[key])
dinfo = '\tnothing changed' if not dinfo else dinfo.rstrip()
out.append((name, cmdline, dinfo))
else:
out.append((name, cmdline, '\tnothing' if not info else info))
return out
def testVal(self, file, fmt='basic', value=''):
if file == 'restoreall':
for f in self.cfgdef:
if os.path.exists(f):
fp = open(f, 'w')
fp.write(self.cfgdef[f])
fp.close()
self.cfgdef = dict()
elif value and os.path.exists(file):
fp = open(file, 'r+')
if fmt == 'radio':
m = re.match('.*\[(?P<v>.*)\].*', fp.read())
if m:
self.cfgdef[file] = m.group('v')
elif fmt == 'acpi':
line = fp.read().strip().split('\n')[-1]
m = re.match('.* (?P<v>[0-9A-Fx]*) .*', line)
if m:
self.cfgdef[file] = m.group('v')
else:
self.cfgdef[file] = fp.read().strip()
fp.write(value)
fp.close()
def s0ixSupport(self):
if not os.path.exists(self.s0ixres) or not os.path.exists(self.mempowerfile):
return False
fp = open(sysvals.mempowerfile, 'r')
data = fp.read().strip()
fp.close()
if '[s2idle]' in data:
return True
return False
def haveTurbostat(self):
if not self.tstat:
return False
cmd = self.getExec('turbostat')
if not cmd:
return False
fp = Popen([cmd, '-v'], stdout=PIPE, stderr=PIPE).stderr
out = ascii(fp.read()).strip()
fp.close()
if re.match('turbostat version .*', out):
self.vprint(out)
return True
return False
def turbostat(self, s0ixready):
cmd = self.getExec('turbostat')
rawout = keyline = valline = ''
fullcmd = '%s -q -S echo freeze > %s' % (cmd, self.powerfile)
fp = Popen(['sh', '-c', fullcmd], stdout=PIPE, stderr=PIPE).stderr
for line in fp:
line = ascii(line)
rawout += line
if keyline and valline:
continue
if re.match('(?i)Avg_MHz.*', line):
keyline = line.strip().split()
elif keyline:
valline = line.strip().split()
fp.close()
if not keyline or not valline or len(keyline) != len(valline):
errmsg = 'unrecognized turbostat output:\n'+rawout.strip()
self.vprint(errmsg)
if not self.verbose:
pprint(errmsg)
return ''
if self.verbose:
pprint(rawout.strip())
out = []
for key in keyline:
idx = keyline.index(key)
val = valline[idx]
if key == 'SYS%LPI' and not s0ixready and re.match('^[0\.]*$', val):
continue
out.append('%s=%s' % (key, val))
return '|'.join(out)
def netfixon(self, net='both'):
cmd = self.getExec('netfix')
if not cmd:
return ''
fp = Popen([cmd, '-s', net, 'on'], stdout=PIPE, stderr=PIPE).stdout
out = ascii(fp.read()).strip()
fp.close()
return out
def wifiDetails(self, dev):
try:
info = open('/sys/class/net/%s/device/uevent' % dev, 'r').read().strip()
except:
return dev
vals = [dev]
for prop in info.split('\n'):
if prop.startswith('DRIVER=') or prop.startswith('PCI_ID='):
vals.append(prop.split('=')[-1])
return ':'.join(vals)
def checkWifi(self, dev=''):
try:
w = open('/proc/net/wireless', 'r').read().strip()
except:
return ''
for line in reversed(w.split('\n')):
m = re.match(' *(?P<dev>.*): (?P<stat>[0-9a-f]*) .*', line)
if not m or (dev and dev != m.group('dev')):
continue
return m.group('dev')
return ''
def pollWifi(self, dev, timeout=10):
start = time.time()
while (time.time() - start) < timeout:
w = self.checkWifi(dev)
if w:
return '%s reconnected %.2f' % \
(self.wifiDetails(dev), max(0, time.time() - start))
time.sleep(0.01)
return '%s timeout %d' % (self.wifiDetails(dev), timeout)
def errorSummary(self, errinfo, msg):
found = False
for entry in errinfo:
if re.match(entry['match'], msg):
entry['count'] += 1
if self.hostname not in entry['urls']:
entry['urls'][self.hostname] = [self.htmlfile]
elif self.htmlfile not in entry['urls'][self.hostname]:
entry['urls'][self.hostname].append(self.htmlfile)
found = True
break
if found:
return
arr = msg.split()
for j in range(len(arr)):
if re.match('^[0-9,\-\.]*$', arr[j]):
arr[j] = '[0-9,\-\.]*'
else:
arr[j] = arr[j]\
.replace('\\', '\\\\').replace(']', '\]').replace('[', '\[')\
.replace('.', '\.').replace('+', '\+').replace('*', '\*')\
.replace('(', '\(').replace(')', '\)').replace('}', '\}')\
.replace('{', '\{')
mstr = ' *'.join(arr)
entry = {
'line': msg,
'match': mstr,
'count': 1,
'urls': {self.hostname: [self.htmlfile]}
}
errinfo.append(entry)
def multistat(self, start, idx, finish):
if 'time' in self.multitest:
id = '%d Duration=%dmin' % (idx+1, self.multitest['time'])
else:
id = '%d/%d' % (idx+1, self.multitest['count'])
t = time.time()
if 'start' not in self.multitest:
self.multitest['start'] = self.multitest['last'] = t
self.multitest['total'] = 0.0
pprint('TEST (%s) START' % id)
return
dt = t - self.multitest['last']
if not start:
if idx == 0 and self.multitest['delay'] > 0:
self.multitest['total'] += self.multitest['delay']
pprint('TEST (%s) COMPLETE -- Duration %.1fs' % (id, dt))
return
self.multitest['total'] += dt
self.multitest['last'] = t
avg = self.multitest['total'] / idx
if 'time' in self.multitest:
left = finish - datetime.now()
left -= timedelta(microseconds=left.microseconds)
else:
left = timedelta(seconds=((self.multitest['count'] - idx) * int(avg)))
pprint('TEST (%s) START - Avg Duration %.1fs, Time left %s' % \
(id, avg, str(left)))
def multiinit(self, c, d):
sz, unit = 'count', 'm'
if c.endswith('d') or c.endswith('h') or c.endswith('m'):
sz, unit, c = 'time', c[-1], c[:-1]
self.multitest['run'] = True
self.multitest[sz] = getArgInt('multi: n d (exec count)', c, 1, 1000000, False)
self.multitest['delay'] = getArgInt('multi: n d (delay between tests)', d, 0, 3600, False)
if unit == 'd':
self.multitest[sz] *= 1440
elif unit == 'h':
self.multitest[sz] *= 60
def displayControl(self, cmd):
xset, ret = 'timeout 10 xset -d :0.0 {0}', 0
if self.sudouser:
xset = 'sudo -u %s %s' % (self.sudouser, xset)
if cmd == 'init':
ret = call(xset.format('dpms 0 0 0'), shell=True)
if not ret:
ret = call(xset.format('s off'), shell=True)
elif cmd == 'reset':
ret = call(xset.format('s reset'), shell=True)
elif cmd in ['on', 'off', 'standby', 'suspend']:
b4 = self.displayControl('stat')
ret = call(xset.format('dpms force %s' % cmd), shell=True)
if not ret:
curr = self.displayControl('stat')
self.vprint('Display Switched: %s -> %s' % (b4, curr))
if curr != cmd:
self.vprint('WARNING: Display failed to change to %s' % cmd)
if ret:
self.vprint('WARNING: Display failed to change to %s with xset' % cmd)
return ret
elif cmd == 'stat':
fp = Popen(xset.format('q').split(' '), stdout=PIPE).stdout
ret = 'unknown'
for line in fp:
m = re.match('[\s]*Monitor is (?P<m>.*)', ascii(line))
if(m and len(m.group('m')) >= 2):
out = m.group('m').lower()
ret = out[3:] if out[0:2] == 'in' else out
break
fp.close()
return ret
def setRuntimeSuspend(self, before=True):
if before:
# runtime suspend disable or enable
if self.rs > 0:
self.rstgt, self.rsval, self.rsdir = 'on', 'auto', 'enabled'
else:
self.rstgt, self.rsval, self.rsdir = 'auto', 'on', 'disabled'
pprint('CONFIGURING RUNTIME SUSPEND...')
self.rslist = deviceInfo(self.rstgt)
for i in self.rslist:
self.setVal(self.rsval, i)
pprint('runtime suspend %s on all devices (%d changed)' % (self.rsdir, len(self.rslist)))
pprint('waiting 5 seconds...')
time.sleep(5)
else:
# runtime suspend re-enable or re-disable
for i in self.rslist:
self.setVal(self.rstgt, i)
pprint('runtime suspend settings restored on %d devices' % len(self.rslist))
def start(self, pm):
if self.useftrace:
self.dlog('start ftrace tracing')
self.fsetVal('1', 'tracing_on')
if self.useprocmon:
self.dlog('start the process monitor')
pm.start()
def stop(self, pm):
if self.useftrace:
if self.useprocmon:
self.dlog('stop the process monitor')
pm.stop()
self.dlog('stop ftrace tracing')
self.fsetVal('0', 'tracing_on')
sysvals = SystemValues()
switchvalues = ['enable', 'disable', 'on', 'off', 'true', 'false', '1', '0']
switchoff = ['disable', 'off', 'false', '0']
suspendmodename = {
'standby': 'standby (S1)',
'freeze': 'freeze (S2idle)',
'mem': 'suspend (S3)',
'disk': 'hibernate (S4)'
}
# Class: DevProps
# Description:
# Simple class which holds property values collected
# for all the devices used in the timeline.
class DevProps:
def __init__(self):
self.syspath = ''
self.altname = ''
self.isasync = True
self.xtraclass = ''
self.xtrainfo = ''
def out(self, dev):
return '%s,%s,%d;' % (dev, self.altname, self.isasync)
def debug(self, dev):
pprint('%s:\n\taltname = %s\n\t async = %s' % (dev, self.altname, self.isasync))
def altName(self, dev):
if not self.altname or self.altname == dev:
return dev
return '%s [%s]' % (self.altname, dev)
def xtraClass(self):
if self.xtraclass:
return ' '+self.xtraclass
if not self.isasync:
return ' sync'
return ''
def xtraInfo(self):
if self.xtraclass:
return ' '+self.xtraclass
if self.isasync:
return ' (async)'
return ' (sync)'
# Class: DeviceNode
# Description:
# A container used to create a device hierachy, with a single root node
# and a tree of child nodes. Used by Data.deviceTopology()
class DeviceNode:
def __init__(self, nodename, nodedepth):
self.name = nodename
self.children = []
self.depth = nodedepth
# Class: Data
# Description:
# The primary container for suspend/resume test data. There is one for
# each test run. The data is organized into a cronological hierarchy:
# Data.dmesg {
# phases {
# 10 sequential, non-overlapping phases of S/R
# contents: times for phase start/end, order/color data for html
# devlist {
# device callback or action list for this phase
# device {
# a single device callback or generic action
# contents: start/stop times, pid/cpu/driver info
# parents/children, html id for timeline/callgraph
# optionally includes an ftrace callgraph
# optionally includes dev/ps data
# }
# }
# }
# }
#
class Data:
phasedef = {
'suspend_prepare': {'order': 0, 'color': '#CCFFCC'},
'suspend': {'order': 1, 'color': '#88FF88'},
'suspend_late': {'order': 2, 'color': '#00AA00'},
'suspend_noirq': {'order': 3, 'color': '#008888'},
'suspend_machine': {'order': 4, 'color': '#0000FF'},
'resume_machine': {'order': 5, 'color': '#FF0000'},
'resume_noirq': {'order': 6, 'color': '#FF9900'},
'resume_early': {'order': 7, 'color': '#FFCC00'},
'resume': {'order': 8, 'color': '#FFFF88'},
'resume_complete': {'order': 9, 'color': '#FFFFCC'},
}
errlist = {
'HWERROR' : r'.*\[ *Hardware Error *\].*',
'FWBUG' : r'.*\[ *Firmware Bug *\].*',
'BUG' : r'(?i).*\bBUG\b.*',
'ERROR' : r'(?i).*\bERROR\b.*',
'WARNING' : r'(?i).*\bWARNING\b.*',
'FAULT' : r'(?i).*\bFAULT\b.*',
'FAIL' : r'(?i).*\bFAILED\b.*',
'INVALID' : r'(?i).*\bINVALID\b.*',
'CRASH' : r'(?i).*\bCRASHED\b.*',
'TIMEOUT' : r'(?i).*\bTIMEOUT\b.*',
'ABORT' : r'(?i).*\bABORT\b.*',
'IRQ' : r'.*\bgenirq: .*',
'TASKFAIL': r'.*Freezing of tasks *.*',
'ACPI' : r'.*\bACPI *(?P<b>[A-Za-z]*) *Error[: ].*',
'DISKFULL': r'.*\bNo space left on device.*',
'USBERR' : r'.*usb .*device .*, error [0-9-]*',
'ATAERR' : r' *ata[0-9\.]*: .*failed.*',
'MEIERR' : r' *mei.*: .*failed.*',
'TPMERR' : r'(?i) *tpm *tpm[0-9]*: .*error.*',
}
def __init__(self, num):
idchar = 'abcdefghij'
self.start = 0.0 # test start
self.end = 0.0 # test end
self.hwstart = 0 # rtc test start
self.hwend = 0 # rtc test end
self.tSuspended = 0.0 # low-level suspend start
self.tResumed = 0.0 # low-level resume start
self.tKernSus = 0.0 # kernel level suspend start
self.tKernRes = 0.0 # kernel level resume end
self.fwValid = False # is firmware data available
self.fwSuspend = 0 # time spent in firmware suspend
self.fwResume = 0 # time spent in firmware resume
self.html_device_id = 0
self.stamp = 0
self.outfile = ''
self.kerror = False
self.wifi = dict()
self.turbostat = 0
self.enterfail = ''
self.currphase = ''
self.pstl = dict() # process timeline
self.testnumber = num
self.idstr = idchar[num]
self.dmesgtext = [] # dmesg text file in memory
self.dmesg = dict() # root data structure
self.errorinfo = {'suspend':[],'resume':[]}
self.tLow = [] # time spent in low-level suspends (standby/freeze)
self.devpids = []
self.devicegroups = 0
def sortedPhases(self):
return sorted(self.dmesg, key=lambda k:self.dmesg[k]['order'])
def initDevicegroups(self):
# called when phases are all finished being added
for phase in sorted(self.dmesg.keys()):
if '*' in phase:
p = phase.split('*')
pnew = '%s%d' % (p[0], len(p))
self.dmesg[pnew] = self.dmesg.pop(phase)
self.devicegroups = []
for phase in self.sortedPhases():
self.devicegroups.append([phase])
def nextPhase(self, phase, offset):
order = self.dmesg[phase]['order'] + offset
for p in self.dmesg:
if self.dmesg[p]['order'] == order:
return p
return ''
def lastPhase(self, depth=1):
plist = self.sortedPhases()
if len(plist) < depth:
return ''
return plist[-1*depth]
def turbostatInfo(self):
tp = TestProps()
out = {'syslpi':'N/A','pkgpc10':'N/A'}
for line in self.dmesgtext:
m = re.match(tp.tstatfmt, line)
if not m:
continue
for i in m.group('t').split('|'):
if 'SYS%LPI' in i:
out['syslpi'] = i.split('=')[-1]+'%'
elif 'pc10' in i:
out['pkgpc10'] = i.split('=')[-1]+'%'
break
return out
def extractErrorInfo(self):
lf = self.dmesgtext
if len(self.dmesgtext) < 1 and sysvals.dmesgfile:
lf = sysvals.openlog(sysvals.dmesgfile, 'r')
i = 0
tp = TestProps()
list = []
for line in lf:
i += 1
if tp.stampInfo(line, sysvals):
continue
m = re.match('[ \t]*(\[ *)(?P<ktime>[0-9\.]*)(\]) (?P<msg>.*)', line)
if not m:
continue
t = float(m.group('ktime'))
if t < self.start or t > self.end:
continue
dir = 'suspend' if t < self.tSuspended else 'resume'
msg = m.group('msg')
if re.match('capability: warning: .*', msg):
continue
for err in self.errlist:
if re.match(self.errlist[err], msg):
list.append((msg, err, dir, t, i, i))
self.kerror = True
break
tp.msglist = []
for msg, type, dir, t, idx1, idx2 in list:
tp.msglist.append(msg)
self.errorinfo[dir].append((type, t, idx1, idx2))
if self.kerror:
sysvals.dmesglog = True
if len(self.dmesgtext) < 1 and sysvals.dmesgfile:
lf.close()
return tp
def setStart(self, time, msg=''):
self.start = time
if msg:
try:
self.hwstart = datetime.strptime(msg, sysvals.tmstart)
except:
self.hwstart = 0
def setEnd(self, time, msg=''):
self.end = time
if msg:
try:
self.hwend = datetime.strptime(msg, sysvals.tmend)
except:
self.hwend = 0
def isTraceEventOutsideDeviceCalls(self, pid, time):
for phase in self.sortedPhases():
list = self.dmesg[phase]['list']
for dev in list:
d = list[dev]
if(d['pid'] == pid and time >= d['start'] and
time < d['end']):
return False
return True
def sourcePhase(self, start):
for phase in self.sortedPhases():
if 'machine' in phase:
continue
pend = self.dmesg[phase]['end']
if start <= pend:
return phase
return 'resume_complete'
def sourceDevice(self, phaselist, start, end, pid, type):
tgtdev = ''
for phase in phaselist:
list = self.dmesg[phase]['list']
for devname in list:
dev = list[devname]
# pid must match
if dev['pid'] != pid:
continue
devS = dev['start']
devE = dev['end']
if type == 'device':
# device target event is entirely inside the source boundary
if(start < devS or start >= devE or end <= devS or end > devE):
continue
elif type == 'thread':
# thread target event will expand the source boundary
if start < devS:
dev['start'] = start
if end > devE:
dev['end'] = end
tgtdev = dev
break
return tgtdev
def addDeviceFunctionCall(self, displayname, kprobename, proc, pid, start, end, cdata, rdata):
# try to place the call in a device
phases = self.sortedPhases()
tgtdev = self.sourceDevice(phases, start, end, pid, 'device')
# calls with device pids that occur outside device bounds are dropped
# TODO: include these somehow
if not tgtdev and pid in self.devpids:
return False
# try to place the call in a thread
if not tgtdev:
tgtdev = self.sourceDevice(phases, start, end, pid, 'thread')
# create new thread blocks, expand as new calls are found
if not tgtdev:
if proc == '<...>':
threadname = 'kthread-%d' % (pid)
else:
threadname = '%s-%d' % (proc, pid)
tgtphase = self.sourcePhase(start)
self.newAction(tgtphase, threadname, pid, '', start, end, '', ' kth', '')
return self.addDeviceFunctionCall(displayname, kprobename, proc, pid, start, end, cdata, rdata)
# this should not happen
if not tgtdev:
sysvals.vprint('[%f - %f] %s-%d %s %s %s' % \
(start, end, proc, pid, kprobename, cdata, rdata))
return False
# place the call data inside the src element of the tgtdev
if('src' not in tgtdev):
tgtdev['src'] = []
dtf = sysvals.dev_tracefuncs
ubiquitous = False
if kprobename in dtf and 'ub' in dtf[kprobename]:
ubiquitous = True
mc = re.match('\(.*\) *(?P<args>.*)', cdata)
mr = re.match('\((?P<caller>\S*).* arg1=(?P<ret>.*)', rdata)
if mc and mr:
c = mr.group('caller').split('+')[0]
a = mc.group('args').strip()
r = mr.group('ret')
if len(r) > 6:
r = ''
else:
r = 'ret=%s ' % r
if ubiquitous and c in dtf and 'ub' in dtf[c]:
return False
else:
return False
color = sysvals.kprobeColor(kprobename)
e = DevFunction(displayname, a, c, r, start, end, ubiquitous, proc, pid, color)
tgtdev['src'].append(e)
return True
def overflowDevices(self):
# get a list of devices that extend beyond the end of this test run
devlist = []
for phase in self.sortedPhases():
list = self.dmesg[phase]['list']
for devname in list:
dev = list[devname]
if dev['end'] > self.end:
devlist.append(dev)
return devlist
def mergeOverlapDevices(self, devlist):
# merge any devices that overlap devlist
for dev in devlist:
devname = dev['name']
for phase in self.sortedPhases():
list = self.dmesg[phase]['list']
if devname not in list:
continue
tdev = list[devname]
o = min(dev['end'], tdev['end']) - max(dev['start'], tdev['start'])
if o <= 0:
continue
dev['end'] = tdev['end']
if 'src' not in dev or 'src' not in tdev:
continue
dev['src'] += tdev['src']
del list[devname]
def usurpTouchingThread(self, name, dev):
# the caller test has priority of this thread, give it to him
for phase in self.sortedPhases():
list = self.dmesg[phase]['list']
if name in list:
tdev = list[name]
if tdev['start'] - dev['end'] < 0.1:
dev['end'] = tdev['end']
if 'src' not in dev:
dev['src'] = []
if 'src' in tdev:
dev['src'] += tdev['src']
del list[name]
break
def stitchTouchingThreads(self, testlist):
# merge any threads between tests that touch
for phase in self.sortedPhases():
list = self.dmesg[phase]['list']
for devname in list:
dev = list[devname]
if 'htmlclass' not in dev or 'kth' not in dev['htmlclass']:
continue
for data in testlist:
data.usurpTouchingThread(devname, dev)
def optimizeDevSrc(self):
# merge any src call loops to reduce timeline size
for phase in self.sortedPhases():
list = self.dmesg[phase]['list']
for dev in list:
if 'src' not in list[dev]:
continue
src = list[dev]['src']
p = 0
for e in sorted(src, key=lambda event: event.time):
if not p or not e.repeat(p):
p = e
continue
# e is another iteration of p, move it into p
p.end = e.end
p.length = p.end - p.time
p.count += 1
src.remove(e)
def trimTimeVal(self, t, t0, dT, left):
if left:
if(t > t0):
if(t - dT < t0):
return t0
return t - dT
else:
return t
else:
if(t < t0 + dT):
if(t > t0):
return t0 + dT
return t + dT
else:
return t
def trimTime(self, t0, dT, left):
self.tSuspended = self.trimTimeVal(self.tSuspended, t0, dT, left)
self.tResumed = self.trimTimeVal(self.tResumed, t0, dT, left)
self.start = self.trimTimeVal(self.start, t0, dT, left)
self.tKernSus = self.trimTimeVal(self.tKernSus, t0, dT, left)
self.tKernRes = self.trimTimeVal(self.tKernRes, t0, dT, left)
self.end = self.trimTimeVal(self.end, t0, dT, left)
for phase in self.sortedPhases():
p = self.dmesg[phase]
p['start'] = self.trimTimeVal(p['start'], t0, dT, left)
p['end'] = self.trimTimeVal(p['end'], t0, dT, left)
list = p['list']
for name in list:
d = list[name]
d['start'] = self.trimTimeVal(d['start'], t0, dT, left)
d['end'] = self.trimTimeVal(d['end'], t0, dT, left)
d['length'] = d['end'] - d['start']
if('ftrace' in d):
cg = d['ftrace']
cg.start = self.trimTimeVal(cg.start, t0, dT, left)
cg.end = self.trimTimeVal(cg.end, t0, dT, left)
for line in cg.list:
line.time = self.trimTimeVal(line.time, t0, dT, left)
if('src' in d):
for e in d['src']:
e.time = self.trimTimeVal(e.time, t0, dT, left)
e.end = self.trimTimeVal(e.end, t0, dT, left)
e.length = e.end - e.time
if('cpuexec' in d):
cpuexec = dict()
for e in d['cpuexec']:
c0, cN = e
c0 = self.trimTimeVal(c0, t0, dT, left)
cN = self.trimTimeVal(cN, t0, dT, left)
cpuexec[(c0, cN)] = d['cpuexec'][e]
d['cpuexec'] = cpuexec
for dir in ['suspend', 'resume']:
list = []
for e in self.errorinfo[dir]:
type, tm, idx1, idx2 = e
tm = self.trimTimeVal(tm, t0, dT, left)
list.append((type, tm, idx1, idx2))
self.errorinfo[dir] = list
def trimFreezeTime(self, tZero):
# trim out any standby or freeze clock time
lp = ''
for phase in self.sortedPhases():
if 'resume_machine' in phase and 'suspend_machine' in lp:
tS, tR = self.dmesg[lp]['end'], self.dmesg[phase]['start']
tL = tR - tS
if tL <= 0:
continue
left = True if tR > tZero else False
self.trimTime(tS, tL, left)
if 'waking' in self.dmesg[lp]:
tCnt = self.dmesg[lp]['waking'][0]
if self.dmesg[lp]['waking'][1] >= 0.001:
tTry = '%.0f' % (round(self.dmesg[lp]['waking'][1] * 1000))
else:
tTry = '%.3f' % (self.dmesg[lp]['waking'][1] * 1000)
text = '%.0f (%s ms waking %d times)' % (tL * 1000, tTry, tCnt)
else:
text = '%.0f' % (tL * 1000)
self.tLow.append(text)
lp = phase
def getMemTime(self):
if not self.hwstart or not self.hwend:
return
stime = (self.tSuspended - self.start) * 1000000
rtime = (self.end - self.tResumed) * 1000000
hws = self.hwstart + timedelta(microseconds=stime)
hwr = self.hwend - timedelta(microseconds=rtime)
self.tLow.append('%.0f'%((hwr - hws).total_seconds() * 1000))
def getTimeValues(self):
sktime = (self.tSuspended - self.tKernSus) * 1000
rktime = (self.tKernRes - self.tResumed) * 1000
return (sktime, rktime)
def setPhase(self, phase, ktime, isbegin, order=-1):
if(isbegin):
# phase start over current phase
if self.currphase:
if 'resume_machine' not in self.currphase:
sysvals.vprint('WARNING: phase %s failed to end' % self.currphase)
self.dmesg[self.currphase]['end'] = ktime
phases = self.dmesg.keys()
color = self.phasedef[phase]['color']
count = len(phases) if order < 0 else order
# create unique name for every new phase
while phase in phases:
phase += '*'
self.dmesg[phase] = {'list': dict(), 'start': -1.0, 'end': -1.0,
'row': 0, 'color': color, 'order': count}
self.dmesg[phase]['start'] = ktime
self.currphase = phase
else:
# phase end without a start
if phase not in self.currphase:
if self.currphase:
sysvals.vprint('WARNING: %s ended instead of %s, ftrace corruption?' % (phase, self.currphase))
else:
sysvals.vprint('WARNING: %s ended without a start, ftrace corruption?' % phase)
return phase
phase = self.currphase
self.dmesg[phase]['end'] = ktime
self.currphase = ''
return phase
def sortedDevices(self, phase):
list = self.dmesg[phase]['list']
return sorted(list, key=lambda k:list[k]['start'])
def fixupInitcalls(self, phase):
# if any calls never returned, clip them at system resume end
phaselist = self.dmesg[phase]['list']
for devname in phaselist:
dev = phaselist[devname]
if(dev['end'] < 0):
for p in self.sortedPhases():
if self.dmesg[p]['end'] > dev['start']:
dev['end'] = self.dmesg[p]['end']
break
sysvals.vprint('%s (%s): callback didnt return' % (devname, phase))
def deviceFilter(self, devicefilter):
for phase in self.sortedPhases():
list = self.dmesg[phase]['list']
rmlist = []
for name in list:
keep = False
for filter in devicefilter:
if filter in name or \
('drv' in list[name] and filter in list[name]['drv']):
keep = True
if not keep:
rmlist.append(name)
for name in rmlist:
del list[name]
def fixupInitcallsThatDidntReturn(self):
# if any calls never returned, clip them at system resume end
for phase in self.sortedPhases():
self.fixupInitcalls(phase)
def phaseOverlap(self, phases):
rmgroups = []
newgroup = []
for group in self.devicegroups:
for phase in phases:
if phase not in group:
continue
for p in group:
if p not in newgroup:
newgroup.append(p)
if group not in rmgroups:
rmgroups.append(group)
for group in rmgroups:
self.devicegroups.remove(group)
self.devicegroups.append(newgroup)
def newActionGlobal(self, name, start, end, pid=-1, color=''):
# which phase is this device callback or action in
phases = self.sortedPhases()
targetphase = 'none'
htmlclass = ''
overlap = 0.0
myphases = []
for phase in phases:
pstart = self.dmesg[phase]['start']
pend = self.dmesg[phase]['end']
# see if the action overlaps this phase
o = max(0, min(end, pend) - max(start, pstart))
if o > 0:
myphases.append(phase)
# set the target phase to the one that overlaps most
if o > overlap:
if overlap > 0 and phase == 'post_resume':
continue
targetphase = phase
overlap = o
# if no target phase was found, pin it to the edge
if targetphase == 'none':
p0start = self.dmesg[phases[0]]['start']
if start <= p0start:
targetphase = phases[0]
else:
targetphase = phases[-1]
if pid == -2:
htmlclass = ' bg'
elif pid == -3:
htmlclass = ' ps'
if len(myphases) > 1:
htmlclass = ' bg'
self.phaseOverlap(myphases)
if targetphase in phases:
newname = self.newAction(targetphase, name, pid, '', start, end, '', htmlclass, color)
return (targetphase, newname)
return False
def newAction(self, phase, name, pid, parent, start, end, drv, htmlclass='', color=''):
# new device callback for a specific phase
self.html_device_id += 1
devid = '%s%d' % (self.idstr, self.html_device_id)
list = self.dmesg[phase]['list']
length = -1.0
if(start >= 0 and end >= 0):
length = end - start
if pid == -2 or name not in sysvals.tracefuncs.keys():
i = 2
origname = name
while(name in list):
name = '%s[%d]' % (origname, i)
i += 1
list[name] = {'name': name, 'start': start, 'end': end, 'pid': pid,
'par': parent, 'length': length, 'row': 0, 'id': devid, 'drv': drv }
if htmlclass:
list[name]['htmlclass'] = htmlclass
if color:
list[name]['color'] = color
return name
def findDevice(self, phase, name):
list = self.dmesg[phase]['list']
mydev = ''
for devname in sorted(list):
if name == devname or re.match('^%s\[(?P<num>[0-9]*)\]$' % name, devname):
mydev = devname
if mydev:
return list[mydev]
return False
def deviceChildren(self, devname, phase):
devlist = []
list = self.dmesg[phase]['list']
for child in list:
if(list[child]['par'] == devname):
devlist.append(child)
return devlist
def maxDeviceNameSize(self, phase):
size = 0
for name in self.dmesg[phase]['list']:
if len(name) > size:
size = len(name)
return size
def printDetails(self):
sysvals.vprint('Timeline Details:')
sysvals.vprint(' test start: %f' % self.start)
sysvals.vprint('kernel suspend start: %f' % self.tKernSus)
tS = tR = False
for phase in self.sortedPhases():
devlist = self.dmesg[phase]['list']
dc, ps, pe = len(devlist), self.dmesg[phase]['start'], self.dmesg[phase]['end']
if not tS and ps >= self.tSuspended:
sysvals.vprint(' machine suspended: %f' % self.tSuspended)
tS = True
if not tR and ps >= self.tResumed:
sysvals.vprint(' machine resumed: %f' % self.tResumed)
tR = True
sysvals.vprint('%20s: %f - %f (%d devices)' % (phase, ps, pe, dc))
if sysvals.devdump:
sysvals.vprint(''.join('-' for i in range(80)))
maxname = '%d' % self.maxDeviceNameSize(phase)
fmt = '%3d) %'+maxname+'s - %f - %f'
c = 1
for name in sorted(devlist):
s = devlist[name]['start']
e = devlist[name]['end']
sysvals.vprint(fmt % (c, name, s, e))
c += 1
sysvals.vprint(''.join('-' for i in range(80)))
sysvals.vprint(' kernel resume end: %f' % self.tKernRes)
sysvals.vprint(' test end: %f' % self.end)
def deviceChildrenAllPhases(self, devname):
devlist = []
for phase in self.sortedPhases():
list = self.deviceChildren(devname, phase)
for dev in sorted(list):
if dev not in devlist:
devlist.append(dev)
return devlist
def masterTopology(self, name, list, depth):
node = DeviceNode(name, depth)
for cname in list:
# avoid recursions
if name == cname:
continue
clist = self.deviceChildrenAllPhases(cname)
cnode = self.masterTopology(cname, clist, depth+1)
node.children.append(cnode)
return node
def printTopology(self, node):
html = ''
if node.name:
info = ''
drv = ''
for phase in self.sortedPhases():
list = self.dmesg[phase]['list']
if node.name in list:
s = list[node.name]['start']
e = list[node.name]['end']
if list[node.name]['drv']:
drv = ' {'+list[node.name]['drv']+'}'
info += ('<li>%s: %.3fms</li>' % (phase, (e-s)*1000))
html += '<li><b>'+node.name+drv+'</b>'
if info:
html += '<ul>'+info+'</ul>'
html += '</li>'
if len(node.children) > 0:
html += '<ul>'
for cnode in node.children:
html += self.printTopology(cnode)
html += '</ul>'
return html
def rootDeviceList(self):
# list of devices graphed
real = []
for phase in self.sortedPhases():
list = self.dmesg[phase]['list']
for dev in sorted(list):
if list[dev]['pid'] >= 0 and dev not in real:
real.append(dev)
# list of top-most root devices
rootlist = []
for phase in self.sortedPhases():
list = self.dmesg[phase]['list']
for dev in sorted(list):
pdev = list[dev]['par']
pid = list[dev]['pid']
if(pid < 0 or re.match('[0-9]*-[0-9]*\.[0-9]*[\.0-9]*\:[\.0-9]*$', pdev)):
continue
if pdev and pdev not in real and pdev not in rootlist:
rootlist.append(pdev)
return rootlist
def deviceTopology(self):
rootlist = self.rootDeviceList()
master = self.masterTopology('', rootlist, 0)
return self.printTopology(master)
def selectTimelineDevices(self, widfmt, tTotal, mindevlen):
# only select devices that will actually show up in html
self.tdevlist = dict()
for phase in self.dmesg:
devlist = []
list = self.dmesg[phase]['list']
for dev in list:
length = (list[dev]['end'] - list[dev]['start']) * 1000
width = widfmt % (((list[dev]['end']-list[dev]['start'])*100)/tTotal)
if length >= mindevlen:
devlist.append(dev)
self.tdevlist[phase] = devlist
def addHorizontalDivider(self, devname, devend):
phase = 'suspend_prepare'
self.newAction(phase, devname, -2, '', \
self.start, devend, '', ' sec', '')
if phase not in self.tdevlist:
self.tdevlist[phase] = []
self.tdevlist[phase].append(devname)
d = DevItem(0, phase, self.dmesg[phase]['list'][devname])
return d
def addProcessUsageEvent(self, name, times):
# get the start and end times for this process
cpuexec = dict()
tlast = start = end = -1
for t in sorted(times):
if tlast < 0:
tlast = t
continue
if name in self.pstl[t] and self.pstl[t][name] > 0:
if start < 0:
start = tlast
end, key = t, (tlast, t)
maxj = (t - tlast) * 1024.0
cpuexec[key] = min(1.0, float(self.pstl[t][name]) / maxj)
tlast = t
if start < 0 or end < 0:
return
# add a new action for this process and get the object
out = self.newActionGlobal(name, start, end, -3)
if out:
phase, devname = out
dev = self.dmesg[phase]['list'][devname]
dev['cpuexec'] = cpuexec
def createProcessUsageEvents(self):
# get an array of process names and times
proclist = {'sus': dict(), 'res': dict()}
tdata = {'sus': [], 'res': []}
for t in sorted(self.pstl):
dir = 'sus' if t < self.tSuspended else 'res'
for ps in sorted(self.pstl[t]):
if ps not in proclist[dir]:
proclist[dir][ps] = 0
tdata[dir].append(t)
# process the events for suspend and resume
if len(proclist['sus']) > 0 or len(proclist['res']) > 0:
sysvals.vprint('Process Execution:')
for dir in ['sus', 'res']:
for ps in sorted(proclist[dir]):
self.addProcessUsageEvent(ps, tdata[dir])
def handleEndMarker(self, time, msg=''):
dm = self.dmesg
self.setEnd(time, msg)
self.initDevicegroups()
# give suspend_prepare an end if needed
if 'suspend_prepare' in dm and dm['suspend_prepare']['end'] < 0:
dm['suspend_prepare']['end'] = time
# assume resume machine ends at next phase start
if 'resume_machine' in dm and dm['resume_machine']['end'] < 0:
np = self.nextPhase('resume_machine', 1)
if np:
dm['resume_machine']['end'] = dm[np]['start']
# if kernel resume end not found, assume its the end marker
if self.tKernRes == 0.0:
self.tKernRes = time
# if kernel suspend start not found, assume its the end marker
if self.tKernSus == 0.0:
self.tKernSus = time
# set resume complete to end at end marker
if 'resume_complete' in dm:
dm['resume_complete']['end'] = time
def initcall_debug_call(self, line, quick=False):
m = re.match('.*(\[ *)(?P<t>[0-9\.]*)(\]) .* (?P<f>.*)\: '+\
'PM: *calling .* @ (?P<n>.*), parent: (?P<p>.*)', line)
if not m:
m = re.match('.*(\[ *)(?P<t>[0-9\.]*)(\]) .* (?P<f>.*)\: '+\
'calling .* @ (?P<n>.*), parent: (?P<p>.*)', line)
if not m:
m = re.match('.*(\[ *)(?P<t>[0-9\.]*)(\]) calling '+\
'(?P<f>.*)\+ @ (?P<n>.*), parent: (?P<p>.*)', line)
if m:
return True if quick else m.group('t', 'f', 'n', 'p')
return False if quick else ('', '', '', '')
def initcall_debug_return(self, line, quick=False):
m = re.match('.*(\[ *)(?P<t>[0-9\.]*)(\]) .* (?P<f>.*)\: PM: '+\
'.* returned (?P<r>[0-9]*) after (?P<dt>[0-9]*) usecs', line)
if not m:
m = re.match('.*(\[ *)(?P<t>[0-9\.]*)(\]) .* (?P<f>.*)\: '+\
'.* returned (?P<r>[0-9]*) after (?P<dt>[0-9]*) usecs', line)
if not m:
m = re.match('.*(\[ *)(?P<t>[0-9\.]*)(\]) call '+\
'(?P<f>.*)\+ returned .* after (?P<dt>.*) usecs', line)
if m:
return True if quick else m.group('t', 'f', 'dt')
return False if quick else ('', '', '')
def debugPrint(self):
for p in self.sortedPhases():
list = self.dmesg[p]['list']
for devname in sorted(list):
dev = list[devname]
if 'ftrace' in dev:
dev['ftrace'].debugPrint(' [%s]' % devname)
# Class: DevFunction
# Description:
# A container for kprobe function data we want in the dev timeline
class DevFunction:
def __init__(self, name, args, caller, ret, start, end, u, proc, pid, color):
self.row = 0
self.count = 1
self.name = name
self.args = args
self.caller = caller
self.ret = ret
self.time = start
self.length = end - start
self.end = end
self.ubiquitous = u
self.proc = proc
self.pid = pid
self.color = color
def title(self):
cnt = ''
if self.count > 1:
cnt = '(x%d)' % self.count
l = '%0.3fms' % (self.length * 1000)
if self.ubiquitous:
title = '%s(%s)%s <- %s, %s(%s)' % \
(self.name, self.args, cnt, self.caller, self.ret, l)
else:
title = '%s(%s) %s%s(%s)' % (self.name, self.args, self.ret, cnt, l)
return title.replace('"', '')
def text(self):
if self.count > 1:
text = '%s(x%d)' % (self.name, self.count)
else:
text = self.name
return text
def repeat(self, tgt):
# is the tgt call just a repeat of this call (e.g. are we in a loop)
dt = self.time - tgt.end
# only combine calls if -all- attributes are identical
if tgt.caller == self.caller and \
tgt.name == self.name and tgt.args == self.args and \
tgt.proc == self.proc and tgt.pid == self.pid and \
tgt.ret == self.ret and dt >= 0 and \
dt <= sysvals.callloopmaxgap and \
self.length < sysvals.callloopmaxlen:
return True
return False
# Class: FTraceLine
# Description:
# A container for a single line of ftrace data. There are six basic types:
# callgraph line:
# call: " dpm_run_callback() {"
# return: " }"
# leaf: " dpm_run_callback();"
# trace event:
# tracing_mark_write: SUSPEND START or RESUME COMPLETE
# suspend_resume: phase or custom exec block data
# device_pm_callback: device callback info
class FTraceLine:
def __init__(self, t, m='', d=''):
self.length = 0.0
self.fcall = False
self.freturn = False
self.fevent = False
self.fkprobe = False
self.depth = 0
self.name = ''
self.type = ''
self.time = float(t)
if not m and not d:
return
# is this a trace event
if(d == 'traceevent' or re.match('^ *\/\* *(?P<msg>.*) \*\/ *$', m)):
if(d == 'traceevent'):
# nop format trace event
msg = m
else:
# function_graph format trace event
em = re.match('^ *\/\* *(?P<msg>.*) \*\/ *$', m)
msg = em.group('msg')
emm = re.match('^(?P<call>.*?): (?P<msg>.*)', msg)
if(emm):
self.name = emm.group('msg')
self.type = emm.group('call')
else:
self.name = msg
km = re.match('^(?P<n>.*)_cal$', self.type)
if km:
self.fcall = True
self.fkprobe = True
self.type = km.group('n')
return
km = re.match('^(?P<n>.*)_ret$', self.type)
if km:
self.freturn = True
self.fkprobe = True
self.type = km.group('n')
return
self.fevent = True
return
# convert the duration to seconds
if(d):
self.length = float(d)/1000000
# the indentation determines the depth
match = re.match('^(?P<d> *)(?P<o>.*)$', m)
if(not match):
return
self.depth = self.getDepth(match.group('d'))
m = match.group('o')
# function return
if(m[0] == '}'):
self.freturn = True
if(len(m) > 1):
# includes comment with function name
match = re.match('^} *\/\* *(?P<n>.*) *\*\/$', m)
if(match):
self.name = match.group('n').strip()
# function call
else:
self.fcall = True
# function call with children
if(m[-1] == '{'):
match = re.match('^(?P<n>.*) *\(.*', m)
if(match):
self.name = match.group('n').strip()
# function call with no children (leaf)
elif(m[-1] == ';'):
self.freturn = True
match = re.match('^(?P<n>.*) *\(.*', m)
if(match):
self.name = match.group('n').strip()
# something else (possibly a trace marker)
else:
self.name = m
def isCall(self):
return self.fcall and not self.freturn
def isReturn(self):
return self.freturn and not self.fcall
def isLeaf(self):
return self.fcall and self.freturn
def getDepth(self, str):
return len(str)/2
def debugPrint(self, info=''):
if self.isLeaf():
pprint(' -- %12.6f (depth=%02d): %s(); (%.3f us) %s' % (self.time, \
self.depth, self.name, self.length*1000000, info))
elif self.freturn:
pprint(' -- %12.6f (depth=%02d): %s} (%.3f us) %s' % (self.time, \
self.depth, self.name, self.length*1000000, info))
else:
pprint(' -- %12.6f (depth=%02d): %s() { (%.3f us) %s' % (self.time, \
self.depth, self.name, self.length*1000000, info))
def startMarker(self):
# Is this the starting line of a suspend?
if not self.fevent:
return False
if sysvals.usetracemarkers:
if(self.name.startswith('SUSPEND START')):
return True
return False
else:
if(self.type == 'suspend_resume' and
re.match('suspend_enter\[.*\] begin', self.name)):
return True
return False
def endMarker(self):
# Is this the ending line of a resume?
if not self.fevent:
return False
if sysvals.usetracemarkers:
if(self.name.startswith('RESUME COMPLETE')):
return True
return False
else:
if(self.type == 'suspend_resume' and
re.match('thaw_processes\[.*\] end', self.name)):
return True
return False
# Class: FTraceCallGraph
# Description:
# A container for the ftrace callgraph of a single recursive function.
# This can be a dpm_run_callback, dpm_prepare, or dpm_complete callgraph
# Each instance is tied to a single device in a single phase, and is
# comprised of an ordered list of FTraceLine objects
class FTraceCallGraph:
vfname = 'missing_function_name'
def __init__(self, pid, sv):
self.id = ''
self.invalid = False
self.name = ''
self.partial = False
self.ignore = False
self.start = -1.0
self.end = -1.0
self.list = []
self.depth = 0
self.pid = pid
self.sv = sv
def addLine(self, line):
# if this is already invalid, just leave
if(self.invalid):
if(line.depth == 0 and line.freturn):
return 1
return 0
# invalidate on bad depth
if(self.depth < 0):
self.invalidate(line)
return 0
# ignore data til we return to the current depth
if self.ignore:
if line.depth > self.depth:
return 0
else:
self.list[-1].freturn = True
self.list[-1].length = line.time - self.list[-1].time
self.ignore = False
# if this is a return at self.depth, no more work is needed
if line.depth == self.depth and line.isReturn():
if line.depth == 0:
self.end = line.time
return 1
return 0
# compare current depth with this lines pre-call depth
prelinedep = line.depth
if line.isReturn():
prelinedep += 1
last = 0
lasttime = line.time
if len(self.list) > 0:
last = self.list[-1]
lasttime = last.time
if last.isLeaf():
lasttime += last.length
# handle low misalignments by inserting returns
mismatch = prelinedep - self.depth
warning = self.sv.verbose and abs(mismatch) > 1
info = []
if mismatch < 0:
idx = 0
# add return calls to get the depth down
while prelinedep < self.depth:
self.depth -= 1
if idx == 0 and last and last.isCall():
# special case, turn last call into a leaf
last.depth = self.depth
last.freturn = True
last.length = line.time - last.time
if warning:
info.append(('[make leaf]', last))
else:
vline = FTraceLine(lasttime)
vline.depth = self.depth
vline.name = self.vfname
vline.freturn = True
self.list.append(vline)
if warning:
if idx == 0:
info.append(('', last))
info.append(('[add return]', vline))
idx += 1
if warning:
info.append(('', line))
# handle high misalignments by inserting calls
elif mismatch > 0:
idx = 0
if warning:
info.append(('', last))
# add calls to get the depth up
while prelinedep > self.depth:
if idx == 0 and line.isReturn():
# special case, turn this return into a leaf
line.fcall = True
prelinedep -= 1
if warning:
info.append(('[make leaf]', line))
else:
vline = FTraceLine(lasttime)
vline.depth = self.depth
vline.name = self.vfname
vline.fcall = True
self.list.append(vline)
self.depth += 1
if not last:
self.start = vline.time
if warning:
info.append(('[add call]', vline))
idx += 1
if warning and ('[make leaf]', line) not in info:
info.append(('', line))
if warning:
pprint('WARNING: ftrace data missing, corrections made:')
for i in info:
t, obj = i
if obj:
obj.debugPrint(t)
# process the call and set the new depth
skipadd = False
md = self.sv.max_graph_depth
if line.isCall():
# ignore blacklisted/overdepth funcs
if (md and self.depth >= md - 1) or (line.name in self.sv.cgblacklist):
self.ignore = True
else:
self.depth += 1
elif line.isReturn():
self.depth -= 1
# remove blacklisted/overdepth/empty funcs that slipped through
if (last and last.isCall() and last.depth == line.depth) or \
(md and last and last.depth >= md) or \
(line.name in self.sv.cgblacklist):
while len(self.list) > 0 and self.list[-1].depth > line.depth:
self.list.pop(-1)
if len(self.list) == 0:
self.invalid = True
return 1
self.list[-1].freturn = True
self.list[-1].length = line.time - self.list[-1].time
self.list[-1].name = line.name
skipadd = True
if len(self.list) < 1:
self.start = line.time
# check for a mismatch that returned all the way to callgraph end
res = 1
if mismatch < 0 and self.list[-1].depth == 0 and self.list[-1].freturn:
line = self.list[-1]
skipadd = True
res = -1
if not skipadd:
self.list.append(line)
if(line.depth == 0 and line.freturn):
if(self.start < 0):
self.start = line.time
self.end = line.time
if line.fcall:
self.end += line.length
if self.list[0].name == self.vfname:
self.invalid = True
if res == -1:
self.partial = True
return res
return 0
def invalidate(self, line):
if(len(self.list) > 0):
first = self.list[0]
self.list = []
self.list.append(first)
self.invalid = True
id = 'task %s' % (self.pid)
window = '(%f - %f)' % (self.start, line.time)
if(self.depth < 0):
pprint('Data misalignment for '+id+\
' (buffer overflow), ignoring this callback')
else:
pprint('Too much data for '+id+\
' '+window+', ignoring this callback')
def slice(self, dev):
minicg = FTraceCallGraph(dev['pid'], self.sv)
minicg.name = self.name
mydepth = -1
good = False
for l in self.list:
if(l.time < dev['start'] or l.time > dev['end']):
continue
if mydepth < 0:
if l.name == 'mutex_lock' and l.freturn:
mydepth = l.depth
continue
elif l.depth == mydepth and l.name == 'mutex_unlock' and l.fcall:
good = True
break
l.depth -= mydepth
minicg.addLine(l)
if not good or len(minicg.list) < 1:
return 0
return minicg
def repair(self, enddepth):
# bring the depth back to 0 with additional returns
fixed = False
last = self.list[-1]
for i in reversed(range(enddepth)):
t = FTraceLine(last.time)
t.depth = i
t.freturn = True
fixed = self.addLine(t)
if fixed != 0:
self.end = last.time
return True
return False
def postProcess(self):
if len(self.list) > 0:
self.name = self.list[0].name
stack = dict()
cnt = 0
last = 0
for l in self.list:
# ftrace bug: reported duration is not reliable
# check each leaf and clip it at max possible length
if last and last.isLeaf():
if last.length > l.time - last.time:
last.length = l.time - last.time
if l.isCall():
stack[l.depth] = l
cnt += 1
elif l.isReturn():
if(l.depth not in stack):
if self.sv.verbose:
pprint('Post Process Error: Depth missing')
l.debugPrint()
return False
# calculate call length from call/return lines
cl = stack[l.depth]
cl.length = l.time - cl.time
if cl.name == self.vfname:
cl.name = l.name
stack.pop(l.depth)
l.length = 0
cnt -= 1
last = l
if(cnt == 0):
# trace caught the whole call tree
return True
elif(cnt < 0):
if self.sv.verbose:
pprint('Post Process Error: Depth is less than 0')
return False
# trace ended before call tree finished
return self.repair(cnt)
def deviceMatch(self, pid, data):
found = ''
# add the callgraph data to the device hierarchy
borderphase = {
'dpm_prepare': 'suspend_prepare',
'dpm_complete': 'resume_complete'
}
if(self.name in borderphase):
p = borderphase[self.name]
list = data.dmesg[p]['list']
for devname in list:
dev = list[devname]
if(pid == dev['pid'] and
self.start <= dev['start'] and
self.end >= dev['end']):
cg = self.slice(dev)
if cg:
dev['ftrace'] = cg
found = devname
return found
for p in data.sortedPhases():
if(data.dmesg[p]['start'] <= self.start and
self.start <= data.dmesg[p]['end']):
list = data.dmesg[p]['list']
for devname in sorted(list, key=lambda k:list[k]['start']):
dev = list[devname]
if(pid == dev['pid'] and
self.start <= dev['start'] and
self.end >= dev['end']):
dev['ftrace'] = self
found = devname
break
break
return found
def newActionFromFunction(self, data):
name = self.name
if name in ['dpm_run_callback', 'dpm_prepare', 'dpm_complete']:
return
fs = self.start
fe = self.end
if fs < data.start or fe > data.end:
return
phase = ''
for p in data.sortedPhases():
if(data.dmesg[p]['start'] <= self.start and
self.start < data.dmesg[p]['end']):
phase = p
break
if not phase:
return
out = data.newActionGlobal(name, fs, fe, -2)
if out:
phase, myname = out
data.dmesg[phase]['list'][myname]['ftrace'] = self
def debugPrint(self, info=''):
pprint('%s pid=%d [%f - %f] %.3f us' % \
(self.name, self.pid, self.start, self.end,
(self.end - self.start)*1000000))
for l in self.list:
if l.isLeaf():
pprint('%f (%02d): %s(); (%.3f us)%s' % (l.time, \
l.depth, l.name, l.length*1000000, info))
elif l.freturn:
pprint('%f (%02d): %s} (%.3f us)%s' % (l.time, \
l.depth, l.name, l.length*1000000, info))
else:
pprint('%f (%02d): %s() { (%.3f us)%s' % (l.time, \
l.depth, l.name, l.length*1000000, info))
pprint(' ')
class DevItem:
def __init__(self, test, phase, dev):
self.test = test
self.phase = phase
self.dev = dev
def isa(self, cls):
if 'htmlclass' in self.dev and cls in self.dev['htmlclass']:
return True
return False
# Class: Timeline
# Description:
# A container for a device timeline which calculates
# all the html properties to display it correctly
class Timeline:
html_tblock = '<div id="block{0}" class="tblock" style="left:{1}%;width:{2}%;"><div class="tback" style="height:{3}px"></div>\n'
html_device = '<div id="{0}" title="{1}" class="thread{7}" style="left:{2}%;top:{3}px;height:{4}px;width:{5}%;{8}">{6}</div>\n'
html_phase = '<div class="phase" style="left:{0}%;width:{1}%;top:{2}px;height:{3}px;background:{4}">{5}</div>\n'
html_phaselet = '<div id="{0}" class="phaselet" style="left:{1}%;width:{2}%;background:{3}"></div>\n'
html_legend = '<div id="p{3}" class="square" style="left:{0}%;background:{1}"> {2}</div>\n'
def __init__(self, rowheight, scaleheight):
self.html = ''
self.height = 0 # total timeline height
self.scaleH = scaleheight # timescale (top) row height
self.rowH = rowheight # device row height
self.bodyH = 0 # body height
self.rows = 0 # total timeline rows
self.rowlines = dict()
self.rowheight = dict()
def createHeader(self, sv, stamp):
if(not stamp['time']):
return
self.html += '<div class="version"><a href="https://01.org/pm-graph">%s v%s</a></div>' \
% (sv.title, sv.version)
if sv.logmsg and sv.testlog:
self.html += '<button id="showtest" class="logbtn btnfmt">log</button>'
if sv.dmesglog:
self.html += '<button id="showdmesg" class="logbtn btnfmt">dmesg</button>'
if sv.ftracelog:
self.html += '<button id="showftrace" class="logbtn btnfmt">ftrace</button>'
headline_stamp = '<div class="stamp">{0} {1} {2} {3}</div>\n'
self.html += headline_stamp.format(stamp['host'], stamp['kernel'],
stamp['mode'], stamp['time'])
if 'man' in stamp and 'plat' in stamp and 'cpu' in stamp and \
stamp['man'] and stamp['plat'] and stamp['cpu']:
headline_sysinfo = '<div class="stamp sysinfo">{0} {1} <i>with</i> {2}</div>\n'
self.html += headline_sysinfo.format(stamp['man'], stamp['plat'], stamp['cpu'])
# Function: getDeviceRows
# Description:
# determine how may rows the device funcs will take
# Arguments:
# rawlist: the list of devices/actions for a single phase
# Output:
# The total number of rows needed to display this phase of the timeline
def getDeviceRows(self, rawlist):
# clear all rows and set them to undefined
sortdict = dict()
for item in rawlist:
item.row = -1
sortdict[item] = item.length
sortlist = sorted(sortdict, key=sortdict.get, reverse=True)
remaining = len(sortlist)
rowdata = dict()
row = 1
# try to pack each row with as many ranges as possible
while(remaining > 0):
if(row not in rowdata):
rowdata[row] = []
for i in sortlist:
if(i.row >= 0):
continue
s = i.time
e = i.time + i.length
valid = True
for ritem in rowdata[row]:
rs = ritem.time
re = ritem.time + ritem.length
if(not (((s <= rs) and (e <= rs)) or
((s >= re) and (e >= re)))):
valid = False
break
if(valid):
rowdata[row].append(i)
i.row = row
remaining -= 1
row += 1
return row
# Function: getPhaseRows
# Description:
# Organize the timeline entries into the smallest
# number of rows possible, with no entry overlapping
# Arguments:
# devlist: the list of devices/actions in a group of contiguous phases
# Output:
# The total number of rows needed to display this phase of the timeline
def getPhaseRows(self, devlist, row=0, sortby='length'):
# clear all rows and set them to undefined
remaining = len(devlist)
rowdata = dict()
sortdict = dict()
myphases = []
# initialize all device rows to -1 and calculate devrows
for item in devlist:
dev = item.dev
tp = (item.test, item.phase)
if tp not in myphases:
myphases.append(tp)
dev['row'] = -1
if sortby == 'start':
# sort by start 1st, then length 2nd
sortdict[item] = (-1*float(dev['start']), float(dev['end']) - float(dev['start']))
else:
# sort by length 1st, then name 2nd
sortdict[item] = (float(dev['end']) - float(dev['start']), item.dev['name'])
if 'src' in dev:
dev['devrows'] = self.getDeviceRows(dev['src'])
# sort the devlist by length so that large items graph on top
sortlist = sorted(sortdict, key=sortdict.get, reverse=True)
orderedlist = []
for item in sortlist:
if item.dev['pid'] == -2:
orderedlist.append(item)
for item in sortlist:
if item not in orderedlist:
orderedlist.append(item)
# try to pack each row with as many devices as possible
while(remaining > 0):
rowheight = 1
if(row not in rowdata):
rowdata[row] = []
for item in orderedlist:
dev = item.dev
if(dev['row'] < 0):
s = dev['start']
e = dev['end']
valid = True
for ritem in rowdata[row]:
rs = ritem.dev['start']
re = ritem.dev['end']
if(not (((s <= rs) and (e <= rs)) or
((s >= re) and (e >= re)))):
valid = False
break
if(valid):
rowdata[row].append(item)
dev['row'] = row
remaining -= 1
if 'devrows' in dev and dev['devrows'] > rowheight:
rowheight = dev['devrows']
for t, p in myphases:
if t not in self.rowlines or t not in self.rowheight:
self.rowlines[t] = dict()
self.rowheight[t] = dict()
if p not in self.rowlines[t] or p not in self.rowheight[t]:
self.rowlines[t][p] = dict()
self.rowheight[t][p] = dict()
rh = self.rowH
# section headers should use a different row height
if len(rowdata[row]) == 1 and \
'htmlclass' in rowdata[row][0].dev and \
'sec' in rowdata[row][0].dev['htmlclass']:
rh = 15
self.rowlines[t][p][row] = rowheight
self.rowheight[t][p][row] = rowheight * rh
row += 1
if(row > self.rows):
self.rows = int(row)
return row
def phaseRowHeight(self, test, phase, row):
return self.rowheight[test][phase][row]
def phaseRowTop(self, test, phase, row):
top = 0
for i in sorted(self.rowheight[test][phase]):
if i >= row:
break
top += self.rowheight[test][phase][i]
return top
def calcTotalRows(self):
# Calculate the heights and offsets for the header and rows
maxrows = 0
standardphases = []
for t in self.rowlines:
for p in self.rowlines[t]:
total = 0
for i in sorted(self.rowlines[t][p]):
total += self.rowlines[t][p][i]
if total > maxrows:
maxrows = total
if total == len(self.rowlines[t][p]):
standardphases.append((t, p))
self.height = self.scaleH + (maxrows*self.rowH)
self.bodyH = self.height - self.scaleH
# if there is 1 line per row, draw them the standard way
for t, p in standardphases:
for i in sorted(self.rowheight[t][p]):
self.rowheight[t][p][i] = float(self.bodyH)/len(self.rowlines[t][p])
def createZoomBox(self, mode='command', testcount=1):
# Create bounding box, add buttons
html_zoombox = '<center><button id="zoomin">ZOOM IN +</button><button id="zoomout">ZOOM OUT -</button><button id="zoomdef">ZOOM 1:1</button></center>\n'
html_timeline = '<div id="dmesgzoombox" class="zoombox">\n<div id="{0}" class="timeline" style="height:{1}px">\n'
html_devlist1 = '<button id="devlist1" class="devlist" style="float:left;">Device Detail{0}</button>'
html_devlist2 = '<button id="devlist2" class="devlist" style="float:right;">Device Detail2</button>\n'
if mode != 'command':
if testcount > 1:
self.html += html_devlist2
self.html += html_devlist1.format('1')
else:
self.html += html_devlist1.format('')
self.html += html_zoombox
self.html += html_timeline.format('dmesg', self.height)
# Function: createTimeScale
# Description:
# Create the timescale for a timeline block
# Arguments:
# m0: start time (mode begin)
# mMax: end time (mode end)
# tTotal: total timeline time
# mode: suspend or resume
# Output:
# The html code needed to display the time scale
def createTimeScale(self, m0, mMax, tTotal, mode):
timescale = '<div class="t" style="right:{0}%">{1}</div>\n'
rline = '<div class="t" style="left:0;border-left:1px solid black;border-right:0;">{0}</div>\n'
output = '<div class="timescale">\n'
# set scale for timeline
mTotal = mMax - m0
tS = 0.1
if(tTotal <= 0):
return output+'</div>\n'
if(tTotal > 4):
tS = 1
divTotal = int(mTotal/tS) + 1
divEdge = (mTotal - tS*(divTotal-1))*100/mTotal
for i in range(divTotal):
htmlline = ''
if(mode == 'suspend'):
pos = '%0.3f' % (100 - ((float(i)*tS*100)/mTotal) - divEdge)
val = '%0.fms' % (float(i-divTotal+1)*tS*1000)
if(i == divTotal - 1):
val = mode
htmlline = timescale.format(pos, val)
else:
pos = '%0.3f' % (100 - ((float(i)*tS*100)/mTotal))
val = '%0.fms' % (float(i)*tS*1000)
htmlline = timescale.format(pos, val)
if(i == 0):
htmlline = rline.format(mode)
output += htmlline
self.html += output+'</div>\n'
# Class: TestProps
# Description:
# A list of values describing the properties of these test runs
class TestProps:
stampfmt = '# [a-z]*-(?P<m>[0-9]{2})(?P<d>[0-9]{2})(?P<y>[0-9]{2})-'+\
'(?P<H>[0-9]{2})(?P<M>[0-9]{2})(?P<S>[0-9]{2})'+\
' (?P<host>.*) (?P<mode>.*) (?P<kernel>.*)$'
wififmt = '^# wifi *(?P<d>\S*) *(?P<s>\S*) *(?P<t>[0-9\.]+).*'
tstatfmt = '^# turbostat (?P<t>\S*)'
testerrfmt = '^# enter_sleep_error (?P<e>.*)'
sysinfofmt = '^# sysinfo .*'
cmdlinefmt = '^# command \| (?P<cmd>.*)'
kparamsfmt = '^# kparams \| (?P<kp>.*)'
devpropfmt = '# Device Properties: .*'
pinfofmt = '# platform-(?P<val>[a-z,A-Z,0-9,_]*): (?P<info>.*)'
tracertypefmt = '# tracer: (?P<t>.*)'
firmwarefmt = '# fwsuspend (?P<s>[0-9]*) fwresume (?P<r>[0-9]*)$'
procexecfmt = 'ps - (?P<ps>.*)$'
procmultifmt = '@(?P<n>[0-9]*)\|(?P<ps>.*)$'
ftrace_line_fmt_fg = \
'^ *(?P<time>[0-9\.]*) *\| *(?P<cpu>[0-9]*)\)'+\
' *(?P<proc>.*)-(?P<pid>[0-9]*) *\|'+\
'[ +!#\*@$]*(?P<dur>[0-9\.]*) .*\| (?P<msg>.*)'
ftrace_line_fmt_nop = \
' *(?P<proc>.*)-(?P<pid>[0-9]*) *\[(?P<cpu>[0-9]*)\] *'+\
'(?P<flags>\S*) *(?P<time>[0-9\.]*): *'+\
'(?P<msg>.*)'
machinesuspend = 'machine_suspend\[.*'
multiproclist = dict()
multiproctime = 0.0
multiproccnt = 0
def __init__(self):
self.stamp = ''
self.sysinfo = ''
self.cmdline = ''
self.testerror = []
self.turbostat = []
self.wifi = []
self.fwdata = []
self.ftrace_line_fmt = self.ftrace_line_fmt_nop
self.cgformat = False
self.data = 0
self.ktemp = dict()
def setTracerType(self, tracer):
if(tracer == 'function_graph'):
self.cgformat = True
self.ftrace_line_fmt = self.ftrace_line_fmt_fg
elif(tracer == 'nop'):
self.ftrace_line_fmt = self.ftrace_line_fmt_nop
else:
doError('Invalid tracer format: [%s]' % tracer)
def stampInfo(self, line, sv):
if re.match(self.stampfmt, line):
self.stamp = line
return True
elif re.match(self.sysinfofmt, line):
self.sysinfo = line
return True
elif re.match(self.tstatfmt, line):
self.turbostat.append(line)
return True
elif re.match(self.wififmt, line):
self.wifi.append(line)
return True
elif re.match(self.testerrfmt, line):
self.testerror.append(line)
return True
elif re.match(self.firmwarefmt, line):
self.fwdata.append(line)
return True
elif(re.match(self.devpropfmt, line)):
self.parseDevprops(line, sv)
return True
elif(re.match(self.pinfofmt, line)):
self.parsePlatformInfo(line, sv)
return True
m = re.match(self.cmdlinefmt, line)
if m:
self.cmdline = m.group('cmd')
return True
m = re.match(self.tracertypefmt, line)
if(m):
self.setTracerType(m.group('t'))
return True
return False
def parseStamp(self, data, sv):
# global test data
m = re.match(self.stampfmt, self.stamp)
if not self.stamp or not m:
doError('data does not include the expected stamp')
data.stamp = {'time': '', 'host': '', 'mode': ''}
dt = datetime(int(m.group('y'))+2000, int(m.group('m')),
int(m.group('d')), int(m.group('H')), int(m.group('M')),
int(m.group('S')))
data.stamp['time'] = dt.strftime('%B %d %Y, %I:%M:%S %p')
data.stamp['host'] = m.group('host')
data.stamp['mode'] = m.group('mode')
data.stamp['kernel'] = m.group('kernel')
if re.match(self.sysinfofmt, self.sysinfo):
for f in self.sysinfo.split('|'):
if '#' in f:
continue
tmp = f.strip().split(':', 1)
key = tmp[0]
val = tmp[1]
data.stamp[key] = val
sv.hostname = data.stamp['host']
sv.suspendmode = data.stamp['mode']
if sv.suspendmode == 'freeze':
self.machinesuspend = 'timekeeping_freeze\[.*'
else:
self.machinesuspend = 'machine_suspend\[.*'
if sv.suspendmode == 'command' and sv.ftracefile != '':
modes = ['on', 'freeze', 'standby', 'mem', 'disk']
fp = sv.openlog(sv.ftracefile, 'r')
for line in fp:
m = re.match('.* machine_suspend\[(?P<mode>.*)\]', line)
if m and m.group('mode') in ['1', '2', '3', '4']:
sv.suspendmode = modes[int(m.group('mode'))]
data.stamp['mode'] = sv.suspendmode
break
fp.close()
sv.cmdline = self.cmdline
if not sv.stamp:
sv.stamp = data.stamp
# firmware data
if sv.suspendmode == 'mem' and len(self.fwdata) > data.testnumber:
m = re.match(self.firmwarefmt, self.fwdata[data.testnumber])
if m:
data.fwSuspend, data.fwResume = int(m.group('s')), int(m.group('r'))
if(data.fwSuspend > 0 or data.fwResume > 0):
data.fwValid = True
# turbostat data
if len(self.turbostat) > data.testnumber:
m = re.match(self.tstatfmt, self.turbostat[data.testnumber])
if m:
data.turbostat = m.group('t')
# wifi data
if len(self.wifi) > data.testnumber:
m = re.match(self.wififmt, self.wifi[data.testnumber])
if m:
data.wifi = {'dev': m.group('d'), 'stat': m.group('s'),
'time': float(m.group('t'))}
data.stamp['wifi'] = m.group('d')
# sleep mode enter errors
if len(self.testerror) > data.testnumber:
m = re.match(self.testerrfmt, self.testerror[data.testnumber])
if m:
data.enterfail = m.group('e')
def devprops(self, data):
props = dict()
devlist = data.split(';')
for dev in devlist:
f = dev.split(',')
if len(f) < 3:
continue
dev = f[0]
props[dev] = DevProps()
props[dev].altname = f[1]
if int(f[2]):
props[dev].isasync = True
else:
props[dev].isasync = False
return props
def parseDevprops(self, line, sv):
idx = line.index(': ') + 2
if idx >= len(line):
return
props = self.devprops(line[idx:])
if sv.suspendmode == 'command' and 'testcommandstring' in props:
sv.testcommand = props['testcommandstring'].altname
sv.devprops = props
def parsePlatformInfo(self, line, sv):
m = re.match(self.pinfofmt, line)
if not m:
return
name, info = m.group('val'), m.group('info')
if name == 'devinfo':
sv.devprops = self.devprops(sv.b64unzip(info))
return
elif name == 'testcmd':
sv.testcommand = info
return
field = info.split('|')
if len(field) < 2:
return
cmdline = field[0].strip()
output = sv.b64unzip(field[1].strip())
sv.platinfo.append([name, cmdline, output])
# Class: TestRun
# Description:
# A container for a suspend/resume test run. This is necessary as
# there could be more than one, and they need to be separate.
class TestRun:
def __init__(self, dataobj):
self.data = dataobj
self.ftemp = dict()
self.ttemp = dict()
class ProcessMonitor:
maxchars = 512
def __init__(self):
self.proclist = dict()
self.running = False
def procstat(self):
c = ['cat /proc/[1-9]*/stat 2>/dev/null']
process = Popen(c, shell=True, stdout=PIPE)
running = dict()
for line in process.stdout:
data = ascii(line).split()
pid = data[0]
name = re.sub('[()]', '', data[1])
user = int(data[13])
kern = int(data[14])
kjiff = ujiff = 0
if pid not in self.proclist:
self.proclist[pid] = {'name' : name, 'user' : user, 'kern' : kern}
else:
val = self.proclist[pid]
ujiff = user - val['user']
kjiff = kern - val['kern']
val['user'] = user
val['kern'] = kern
if ujiff > 0 or kjiff > 0:
running[pid] = ujiff + kjiff
process.wait()
out = ['']
for pid in running:
jiffies = running[pid]
val = self.proclist[pid]
if len(out[-1]) > self.maxchars:
out.append('')
elif len(out[-1]) > 0:
out[-1] += ','
out[-1] += '%s-%s %d' % (val['name'], pid, jiffies)
if len(out) > 1:
for line in out:
sysvals.fsetVal('ps - @%d|%s' % (len(out), line), 'trace_marker')
else:
sysvals.fsetVal('ps - %s' % out[0], 'trace_marker')
def processMonitor(self, tid):
while self.running:
self.procstat()
def start(self):
self.thread = Thread(target=self.processMonitor, args=(0,))
self.running = True
self.thread.start()
def stop(self):
self.running = False
# ----------------- FUNCTIONS --------------------
# Function: doesTraceLogHaveTraceEvents
# Description:
# Quickly determine if the ftrace log has all of the trace events,
# markers, and/or kprobes required for primary parsing.
def doesTraceLogHaveTraceEvents():
kpcheck = ['_cal: (', '_ret: (']
techeck = ['suspend_resume', 'device_pm_callback', 'tracing_mark_write']
tmcheck = ['SUSPEND START', 'RESUME COMPLETE']
sysvals.usekprobes = False
fp = sysvals.openlog(sysvals.ftracefile, 'r')
for line in fp:
# check for kprobes
if not sysvals.usekprobes:
for i in kpcheck:
if i in line:
sysvals.usekprobes = True
# check for all necessary trace events
check = techeck[:]
for i in techeck:
if i in line:
check.remove(i)
techeck = check
# check for all necessary trace markers
check = tmcheck[:]
for i in tmcheck:
if i in line:
check.remove(i)
tmcheck = check
fp.close()
sysvals.usetraceevents = True if len(techeck) < 3 else False
sysvals.usetracemarkers = True if len(tmcheck) == 0 else False
# Function: appendIncompleteTraceLog
# Description:
# Adds callgraph data which lacks trace event data. This is only
# for timelines generated from 3.15 or older
# Arguments:
# testruns: the array of Data objects obtained from parseKernelLog
def appendIncompleteTraceLog(testruns):
# create TestRun vessels for ftrace parsing
testcnt = len(testruns)
testidx = 0
testrun = []
for data in testruns:
testrun.append(TestRun(data))
# extract the callgraph and traceevent data
sysvals.vprint('Analyzing the ftrace data (%s)...' % \
os.path.basename(sysvals.ftracefile))
tp = TestProps()
tf = sysvals.openlog(sysvals.ftracefile, 'r')
data = 0
for line in tf:
# remove any latent carriage returns
line = line.replace('\r\n', '')
if tp.stampInfo(line, sysvals):
continue
# parse only valid lines, if this is not one move on
m = re.match(tp.ftrace_line_fmt, line)
if(not m):
continue
# gather the basic message data from the line
m_time = m.group('time')
m_pid = m.group('pid')
m_msg = m.group('msg')
if(tp.cgformat):
m_param3 = m.group('dur')
else:
m_param3 = 'traceevent'
if(m_time and m_pid and m_msg):
t = FTraceLine(m_time, m_msg, m_param3)
pid = int(m_pid)
else:
continue
# the line should be a call, return, or event
if(not t.fcall and not t.freturn and not t.fevent):
continue
# look for the suspend start marker
if(t.startMarker()):
data = testrun[testidx].data
tp.parseStamp(data, sysvals)
data.setStart(t.time, t.name)
continue
if(not data):
continue
# find the end of resume
if(t.endMarker()):
data.setEnd(t.time, t.name)
testidx += 1
if(testidx >= testcnt):
break
continue
# trace event processing
if(t.fevent):
continue
# call/return processing
elif sysvals.usecallgraph:
# create a callgraph object for the data
if(pid not in testrun[testidx].ftemp):
testrun[testidx].ftemp[pid] = []
testrun[testidx].ftemp[pid].append(FTraceCallGraph(pid, sysvals))
# when the call is finished, see which device matches it
cg = testrun[testidx].ftemp[pid][-1]
res = cg.addLine(t)
if(res != 0):
testrun[testidx].ftemp[pid].append(FTraceCallGraph(pid, sysvals))
if(res == -1):
testrun[testidx].ftemp[pid][-1].addLine(t)
tf.close()
for test in testrun:
# add the callgraph data to the device hierarchy
for pid in test.ftemp:
for cg in test.ftemp[pid]:
if len(cg.list) < 1 or cg.invalid or (cg.end - cg.start == 0):
continue
if(not cg.postProcess()):
id = 'task %s cpu %s' % (pid, m.group('cpu'))
sysvals.vprint('Sanity check failed for '+\
id+', ignoring this callback')
continue
callstart = cg.start
callend = cg.end
for p in test.data.sortedPhases():
if(test.data.dmesg[p]['start'] <= callstart and
callstart <= test.data.dmesg[p]['end']):
list = test.data.dmesg[p]['list']
for devname in list:
dev = list[devname]
if(pid == dev['pid'] and
callstart <= dev['start'] and
callend >= dev['end']):
dev['ftrace'] = cg
break
# Function: loadTraceLog
# Description:
# load the ftrace file into memory and fix up any ordering issues
# Output:
# TestProps instance and an array of lines in proper order
def loadTraceLog():
tp, data, lines, trace = TestProps(), dict(), [], []
tf = sysvals.openlog(sysvals.ftracefile, 'r')
for line in tf:
# remove any latent carriage returns
line = line.replace('\r\n', '')
if tp.stampInfo(line, sysvals):
continue
# ignore all other commented lines
if line[0] == '#':
continue
# ftrace line: parse only valid lines
m = re.match(tp.ftrace_line_fmt, line)
if(not m):
continue
dur = m.group('dur') if tp.cgformat else 'traceevent'
info = (m.group('time'), m.group('proc'), m.group('pid'),
m.group('msg'), dur)
# group the data by timestamp
t = float(info[0])
if t in data:
data[t].append(info)
else:
data[t] = [info]
# we only care about trace event ordering
if (info[3].startswith('suspend_resume:') or \
info[3].startswith('tracing_mark_write:')) and t not in trace:
trace.append(t)
tf.close()
for t in sorted(data):
first, last, blk = [], [], data[t]
if len(blk) > 1 and t in trace:
# move certain lines to the start or end of a timestamp block
for i in range(len(blk)):
if 'SUSPEND START' in blk[i][3]:
first.append(i)
elif re.match('.* timekeeping_freeze.*begin', blk[i][3]):
last.append(i)
elif re.match('.* timekeeping_freeze.*end', blk[i][3]):
first.append(i)
elif 'RESUME COMPLETE' in blk[i][3]:
last.append(i)
if len(first) == 1 and len(last) == 0:
blk.insert(0, blk.pop(first[0]))
elif len(last) == 1 and len(first) == 0:
blk.append(blk.pop(last[0]))
for info in blk:
lines.append(info)
return (tp, lines)
# Function: parseTraceLog
# Description:
# Analyze an ftrace log output file generated from this app during
# the execution phase. Used when the ftrace log is the primary data source
# and includes the suspend_resume and device_pm_callback trace events
# The ftrace filename is taken from sysvals
# Output:
# An array of Data objects
def parseTraceLog(live=False):
sysvals.vprint('Analyzing the ftrace data (%s)...' % \
os.path.basename(sysvals.ftracefile))
if(os.path.exists(sysvals.ftracefile) == False):
doError('%s does not exist' % sysvals.ftracefile)
if not live:
sysvals.setupAllKprobes()
ksuscalls = ['ksys_sync', 'pm_prepare_console']
krescalls = ['pm_restore_console']
tracewatch = ['irq_wakeup']
if sysvals.usekprobes:
tracewatch += ['sync_filesystems', 'freeze_processes', 'syscore_suspend',
'syscore_resume', 'resume_console', 'thaw_processes', 'CPU_ON',
'CPU_OFF', 'acpi_suspend']
# extract the callgraph and traceevent data
s2idle_enter = hwsus = False
testruns, testdata = [], []
testrun, data, limbo = 0, 0, True
phase = 'suspend_prepare'
tp, tf = loadTraceLog()
for m_time, m_proc, m_pid, m_msg, m_param3 in tf:
# gather the basic message data from the line
if(m_time and m_pid and m_msg):
t = FTraceLine(m_time, m_msg, m_param3)
pid = int(m_pid)
else:
continue
# the line should be a call, return, or event
if(not t.fcall and not t.freturn and not t.fevent):
continue
# find the start of suspend
if(t.startMarker()):
data, limbo = Data(len(testdata)), False
testdata.append(data)
testrun = TestRun(data)
testruns.append(testrun)
tp.parseStamp(data, sysvals)
data.setStart(t.time, t.name)
data.first_suspend_prepare = True
phase = data.setPhase('suspend_prepare', t.time, True)
continue
if(not data or limbo):
continue
# process cpu exec line
if t.type == 'tracing_mark_write':
if t.name == 'CMD COMPLETE' and data.tKernRes == 0:
data.tKernRes = t.time
m = re.match(tp.procexecfmt, t.name)
if(m):
parts, msg = 1, m.group('ps')
m = re.match(tp.procmultifmt, msg)
if(m):
parts, msg = int(m.group('n')), m.group('ps')
if tp.multiproccnt == 0:
tp.multiproctime = t.time
tp.multiproclist = dict()
proclist = tp.multiproclist
tp.multiproccnt += 1
else:
proclist = dict()
tp.multiproccnt = 0
for ps in msg.split(','):
val = ps.split()
if not val or len(val) != 2:
continue
name = val[0].replace('--', '-')
proclist[name] = int(val[1])
if parts == 1:
data.pstl[t.time] = proclist
elif parts == tp.multiproccnt:
data.pstl[tp.multiproctime] = proclist
tp.multiproccnt = 0
continue
# find the end of resume
if(t.endMarker()):
if data.tKernRes == 0:
data.tKernRes = t.time
data.handleEndMarker(t.time, t.name)
if(not sysvals.usetracemarkers):
# no trace markers? then quit and be sure to finish recording
# the event we used to trigger resume end
if('thaw_processes' in testrun.ttemp and len(testrun.ttemp['thaw_processes']) > 0):
# if an entry exists, assume this is its end
testrun.ttemp['thaw_processes'][-1]['end'] = t.time
limbo = True
continue
# trace event processing
if(t.fevent):
if(t.type == 'suspend_resume'):
# suspend_resume trace events have two types, begin and end
if(re.match('(?P<name>.*) begin$', t.name)):
isbegin = True
elif(re.match('(?P<name>.*) end$', t.name)):
isbegin = False
else:
continue
if '[' in t.name:
m = re.match('(?P<name>.*)\[.*', t.name)
else:
m = re.match('(?P<name>.*) .*', t.name)
name = m.group('name')
# ignore these events
if(name.split('[')[0] in tracewatch):
continue
# -- phase changes --
# start of kernel suspend
if(re.match('suspend_enter\[.*', t.name)):
if(isbegin and data.tKernSus == 0):
data.tKernSus = t.time
continue
# suspend_prepare start
elif(re.match('dpm_prepare\[.*', t.name)):
if isbegin and data.first_suspend_prepare:
data.first_suspend_prepare = False
if data.tKernSus == 0:
data.tKernSus = t.time
continue
phase = data.setPhase('suspend_prepare', t.time, isbegin)
continue
# suspend start
elif(re.match('dpm_suspend\[.*', t.name)):
phase = data.setPhase('suspend', t.time, isbegin)
continue
# suspend_late start
elif(re.match('dpm_suspend_late\[.*', t.name)):
phase = data.setPhase('suspend_late', t.time, isbegin)
continue
# suspend_noirq start
elif(re.match('dpm_suspend_noirq\[.*', t.name)):
phase = data.setPhase('suspend_noirq', t.time, isbegin)
continue
# suspend_machine/resume_machine
elif(re.match(tp.machinesuspend, t.name)):
lp = data.lastPhase()
if(isbegin):
hwsus = True
if lp.startswith('resume_machine'):
# trim out s2idle loops, track time trying to freeze
llp = data.lastPhase(2)
if llp.startswith('suspend_machine'):
if 'waking' not in data.dmesg[llp]:
data.dmesg[llp]['waking'] = [0, 0.0]
data.dmesg[llp]['waking'][0] += 1
data.dmesg[llp]['waking'][1] += \
t.time - data.dmesg[lp]['start']
data.currphase = ''
del data.dmesg[lp]
continue
phase = data.setPhase('suspend_machine', data.dmesg[lp]['end'], True)
data.setPhase(phase, t.time, False)
if data.tSuspended == 0:
data.tSuspended = t.time
else:
if lp.startswith('resume_machine'):
data.dmesg[lp]['end'] = t.time
continue
phase = data.setPhase('resume_machine', t.time, True)
if(sysvals.suspendmode in ['mem', 'disk']):
susp = phase.replace('resume', 'suspend')
if susp in data.dmesg:
data.dmesg[susp]['end'] = t.time
data.tSuspended = t.time
data.tResumed = t.time
continue
# resume_noirq start
elif(re.match('dpm_resume_noirq\[.*', t.name)):
phase = data.setPhase('resume_noirq', t.time, isbegin)
continue
# resume_early start
elif(re.match('dpm_resume_early\[.*', t.name)):
phase = data.setPhase('resume_early', t.time, isbegin)
continue
# resume start
elif(re.match('dpm_resume\[.*', t.name)):
phase = data.setPhase('resume', t.time, isbegin)
continue
# resume complete start
elif(re.match('dpm_complete\[.*', t.name)):
phase = data.setPhase('resume_complete', t.time, isbegin)
continue
# skip trace events inside devices calls
if(not data.isTraceEventOutsideDeviceCalls(pid, t.time)):
continue
# global events (outside device calls) are graphed
if(name not in testrun.ttemp):
testrun.ttemp[name] = []
# special handling for s2idle_enter
if name == 'machine_suspend':
if hwsus:
s2idle_enter = hwsus = False
elif s2idle_enter and not isbegin:
if(len(testrun.ttemp[name]) > 0):
testrun.ttemp[name][-1]['end'] = t.time
testrun.ttemp[name][-1]['loop'] += 1
elif not s2idle_enter and isbegin:
s2idle_enter = True
testrun.ttemp[name].append({'begin': t.time,
'end': t.time, 'pid': pid, 'loop': 0})
continue
if(isbegin):
# create a new list entry
testrun.ttemp[name].append(\
{'begin': t.time, 'end': t.time, 'pid': pid})
else:
if(len(testrun.ttemp[name]) > 0):
# if an entry exists, assume this is its end
testrun.ttemp[name][-1]['end'] = t.time
# device callback start
elif(t.type == 'device_pm_callback_start'):
if phase not in data.dmesg:
continue
m = re.match('(?P<drv>.*) (?P<d>.*), parent: *(?P<p>.*), .*',\
t.name);
if(not m):
continue
drv = m.group('drv')
n = m.group('d')
p = m.group('p')
if(n and p):
data.newAction(phase, n, pid, p, t.time, -1, drv)
if pid not in data.devpids:
data.devpids.append(pid)
# device callback finish
elif(t.type == 'device_pm_callback_end'):
if phase not in data.dmesg:
continue
m = re.match('(?P<drv>.*) (?P<d>.*), err.*', t.name);
if(not m):
continue
n = m.group('d')
dev = data.findDevice(phase, n)
if dev:
dev['length'] = t.time - dev['start']
dev['end'] = t.time
# kprobe event processing
elif(t.fkprobe):
kprobename = t.type
kprobedata = t.name
key = (kprobename, pid)
# displayname is generated from kprobe data
displayname = ''
if(t.fcall):
displayname = sysvals.kprobeDisplayName(kprobename, kprobedata)
if not displayname:
continue
if(key not in tp.ktemp):
tp.ktemp[key] = []
tp.ktemp[key].append({
'pid': pid,
'begin': t.time,
'end': -1,
'name': displayname,
'cdata': kprobedata,
'proc': m_proc,
})
# start of kernel resume
if(data.tKernSus == 0 and phase == 'suspend_prepare' \
and kprobename in ksuscalls):
data.tKernSus = t.time
elif(t.freturn):
if(key not in tp.ktemp) or len(tp.ktemp[key]) < 1:
continue
e = next((x for x in reversed(tp.ktemp[key]) if x['end'] < 0), 0)
if not e:
continue
if (t.time - e['begin']) * 1000 < sysvals.mindevlen:
tp.ktemp[key].pop()
continue
e['end'] = t.time
e['rdata'] = kprobedata
# end of kernel resume
if(phase != 'suspend_prepare' and kprobename in krescalls):
if phase in data.dmesg:
data.dmesg[phase]['end'] = t.time
data.tKernRes = t.time
# callgraph processing
elif sysvals.usecallgraph:
# create a callgraph object for the data
key = (m_proc, pid)
if(key not in testrun.ftemp):
testrun.ftemp[key] = []
testrun.ftemp[key].append(FTraceCallGraph(pid, sysvals))
# when the call is finished, see which device matches it
cg = testrun.ftemp[key][-1]
res = cg.addLine(t)
if(res != 0):
testrun.ftemp[key].append(FTraceCallGraph(pid, sysvals))
if(res == -1):
testrun.ftemp[key][-1].addLine(t)
if len(testdata) < 1:
sysvals.vprint('WARNING: ftrace start marker is missing')
if data and not data.devicegroups:
sysvals.vprint('WARNING: ftrace end marker is missing')
data.handleEndMarker(t.time, t.name)
if sysvals.suspendmode == 'command':
for test in testruns:
for p in test.data.sortedPhases():
if p == 'suspend_prepare':
test.data.dmesg[p]['start'] = test.data.start
test.data.dmesg[p]['end'] = test.data.end
else:
test.data.dmesg[p]['start'] = test.data.end
test.data.dmesg[p]['end'] = test.data.end
test.data.tSuspended = test.data.end
test.data.tResumed = test.data.end
test.data.fwValid = False
# dev source and procmon events can be unreadable with mixed phase height
if sysvals.usedevsrc or sysvals.useprocmon:
sysvals.mixedphaseheight = False
# expand phase boundaries so there are no gaps
for data in testdata:
lp = data.sortedPhases()[0]
for p in data.sortedPhases():
if(p != lp and not ('machine' in p and 'machine' in lp)):
data.dmesg[lp]['end'] = data.dmesg[p]['start']
lp = p
for i in range(len(testruns)):
test = testruns[i]
data = test.data
# find the total time range for this test (begin, end)
tlb, tle = data.start, data.end
if i < len(testruns) - 1:
tle = testruns[i+1].data.start
# add the process usage data to the timeline
if sysvals.useprocmon:
data.createProcessUsageEvents()
# add the traceevent data to the device hierarchy
if(sysvals.usetraceevents):
# add actual trace funcs
for name in sorted(test.ttemp):
for event in test.ttemp[name]:
if event['end'] - event['begin'] <= 0:
continue
title = name
if name == 'machine_suspend' and 'loop' in event:
title = 's2idle_enter_%dx' % event['loop']
data.newActionGlobal(title, event['begin'], event['end'], event['pid'])
# add the kprobe based virtual tracefuncs as actual devices
for key in sorted(tp.ktemp):
name, pid = key
if name not in sysvals.tracefuncs:
continue
if pid not in data.devpids:
data.devpids.append(pid)
for e in tp.ktemp[key]:
kb, ke = e['begin'], e['end']
if ke - kb < 0.000001 or tlb > kb or tle <= kb:
continue
color = sysvals.kprobeColor(name)
data.newActionGlobal(e['name'], kb, ke, pid, color)
# add config base kprobes and dev kprobes
if sysvals.usedevsrc:
for key in sorted(tp.ktemp):
name, pid = key
if name in sysvals.tracefuncs or name not in sysvals.dev_tracefuncs:
continue
for e in tp.ktemp[key]:
kb, ke = e['begin'], e['end']
if ke - kb < 0.000001 or tlb > kb or tle <= kb:
continue
data.addDeviceFunctionCall(e['name'], name, e['proc'], pid, kb,
ke, e['cdata'], e['rdata'])
if sysvals.usecallgraph:
# add the callgraph data to the device hierarchy
sortlist = dict()
for key in sorted(test.ftemp):
proc, pid = key
for cg in test.ftemp[key]:
if len(cg.list) < 1 or cg.invalid or (cg.end - cg.start == 0):
continue
if(not cg.postProcess()):
id = 'task %s' % (pid)
sysvals.vprint('Sanity check failed for '+\
id+', ignoring this callback')
continue
# match cg data to devices
devname = ''
if sysvals.suspendmode != 'command':
devname = cg.deviceMatch(pid, data)
if not devname:
sortkey = '%f%f%d' % (cg.start, cg.end, pid)
sortlist[sortkey] = cg
elif len(cg.list) > 1000000 and cg.name != sysvals.ftopfunc:
sysvals.vprint('WARNING: the callgraph for %s is massive (%d lines)' %\
(devname, len(cg.list)))
# create blocks for orphan cg data
for sortkey in sorted(sortlist):
cg = sortlist[sortkey]
name = cg.name
if sysvals.isCallgraphFunc(name):
sysvals.vprint('Callgraph found for task %d: %.3fms, %s' % (cg.pid, (cg.end - cg.start)*1000, name))
cg.newActionFromFunction(data)
if sysvals.suspendmode == 'command':
return (testdata, '')
# fill in any missing phases
error = []
for data in testdata:
tn = '' if len(testdata) == 1 else ('%d' % (data.testnumber + 1))
terr = ''
phasedef = data.phasedef
lp = 'suspend_prepare'
for p in sorted(phasedef, key=lambda k:phasedef[k]['order']):
if p not in data.dmesg:
if not terr:
ph = p if 'machine' in p else lp
if p == 'suspend_machine':
sm = sysvals.suspendmode
if sm in suspendmodename:
sm = suspendmodename[sm]
terr = 'test%s did not enter %s power mode' % (tn, sm)
else:
terr = '%s%s failed in %s phase' % (sysvals.suspendmode, tn, ph)
pprint('TEST%s FAILED: %s' % (tn, terr))
error.append(terr)
if data.tSuspended == 0:
data.tSuspended = data.dmesg[lp]['end']
if data.tResumed == 0:
data.tResumed = data.dmesg[lp]['end']
data.fwValid = False
sysvals.vprint('WARNING: phase "%s" is missing!' % p)
lp = p
if not terr and 'dev' in data.wifi and data.wifi['stat'] == 'timeout':
terr = '%s%s failed in wifi_resume <i>(%s %.0fs timeout)</i>' % \
(sysvals.suspendmode, tn, data.wifi['dev'], data.wifi['time'])
error.append(terr)
if not terr and data.enterfail:
pprint('test%s FAILED: enter %s failed with %s' % (tn, sysvals.suspendmode, data.enterfail))
terr = 'test%s failed to enter %s mode' % (tn, sysvals.suspendmode)
error.append(terr)
if data.tSuspended == 0:
data.tSuspended = data.tKernRes
if data.tResumed == 0:
data.tResumed = data.tSuspended
if(len(sysvals.devicefilter) > 0):
data.deviceFilter(sysvals.devicefilter)
data.fixupInitcallsThatDidntReturn()
if sysvals.usedevsrc:
data.optimizeDevSrc()
# x2: merge any overlapping devices between test runs
if sysvals.usedevsrc and len(testdata) > 1:
tc = len(testdata)
for i in range(tc - 1):
devlist = testdata[i].overflowDevices()
for j in range(i + 1, tc):
testdata[j].mergeOverlapDevices(devlist)
testdata[0].stitchTouchingThreads(testdata[1:])
return (testdata, ', '.join(error))
# Function: loadKernelLog
# Description:
# load the dmesg file into memory and fix up any ordering issues
# Output:
# An array of empty Data objects with only their dmesgtext attributes set
def loadKernelLog():
sysvals.vprint('Analyzing the dmesg data (%s)...' % \
os.path.basename(sysvals.dmesgfile))
if(os.path.exists(sysvals.dmesgfile) == False):
doError('%s does not exist' % sysvals.dmesgfile)
# there can be multiple test runs in a single file
tp = TestProps()
tp.stamp = datetime.now().strftime('# suspend-%m%d%y-%H%M%S localhost mem unknown')
testruns = []
data = 0
lf = sysvals.openlog(sysvals.dmesgfile, 'r')
for line in lf:
line = line.replace('\r\n', '')
idx = line.find('[')
if idx > 1:
line = line[idx:]
if tp.stampInfo(line, sysvals):
continue
m = re.match('[ \t]*(\[ *)(?P<ktime>[0-9\.]*)(\]) (?P<msg>.*)', line)
if(not m):
continue
msg = m.group("msg")
if re.match('PM: Syncing filesystems.*', msg) or \
re.match('PM: suspend entry.*', msg):
if(data):
testruns.append(data)
data = Data(len(testruns))
tp.parseStamp(data, sysvals)
if(not data):
continue
m = re.match('.* *(?P<k>[0-9]\.[0-9]{2}\.[0-9]-.*) .*', msg)
if(m):
sysvals.stamp['kernel'] = m.group('k')
m = re.match('PM: Preparing system for (?P<m>.*) sleep', msg)
if not m:
m = re.match('PM: Preparing system for sleep \((?P<m>.*)\)', msg)
if m:
sysvals.stamp['mode'] = sysvals.suspendmode = m.group('m')
data.dmesgtext.append(line)
lf.close()
if sysvals.suspendmode == 's2idle':
sysvals.suspendmode = 'freeze'
elif sysvals.suspendmode == 'deep':
sysvals.suspendmode = 'mem'
if data:
testruns.append(data)
if len(testruns) < 1:
doError('dmesg log has no suspend/resume data: %s' \
% sysvals.dmesgfile)
# fix lines with same timestamp/function with the call and return swapped
for data in testruns:
last = ''
for line in data.dmesgtext:
ct, cf, n, p = data.initcall_debug_call(line)
rt, rf, l = data.initcall_debug_return(last)
if ct and rt and ct == rt and cf == rf:
i = data.dmesgtext.index(last)
j = data.dmesgtext.index(line)
data.dmesgtext[i] = line
data.dmesgtext[j] = last
last = line
return testruns
# Function: parseKernelLog
# Description:
# Analyse a dmesg log output file generated from this app during
# the execution phase. Create a set of device structures in memory
# for subsequent formatting in the html output file
# This call is only for legacy support on kernels where the ftrace
# data lacks the suspend_resume or device_pm_callbacks trace events.
# Arguments:
# data: an empty Data object (with dmesgtext) obtained from loadKernelLog
# Output:
# The filled Data object
def parseKernelLog(data):
phase = 'suspend_runtime'
if(data.fwValid):
sysvals.vprint('Firmware Suspend = %u ns, Firmware Resume = %u ns' % \
(data.fwSuspend, data.fwResume))
# dmesg phase match table
dm = {
'suspend_prepare': ['PM: Syncing filesystems.*', 'PM: suspend entry.*'],
'suspend': ['PM: Entering [a-z]* sleep.*', 'Suspending console.*',
'PM: Suspending system .*'],
'suspend_late': ['PM: suspend of devices complete after.*',
'PM: freeze of devices complete after.*'],
'suspend_noirq': ['PM: late suspend of devices complete after.*',
'PM: late freeze of devices complete after.*'],
'suspend_machine': ['PM: suspend-to-idle',
'PM: noirq suspend of devices complete after.*',
'PM: noirq freeze of devices complete after.*'],
'resume_machine': ['PM: Timekeeping suspended for.*',
'ACPI: Low-level resume complete.*',
'ACPI: resume from mwait',
'Suspended for [0-9\.]* seconds'],
'resume_noirq': ['PM: resume from suspend-to-idle',
'ACPI: Waking up from system sleep state.*'],
'resume_early': ['PM: noirq resume of devices complete after.*',
'PM: noirq restore of devices complete after.*'],
'resume': ['PM: early resume of devices complete after.*',
'PM: early restore of devices complete after.*'],
'resume_complete': ['PM: resume of devices complete after.*',
'PM: restore of devices complete after.*'],
'post_resume': ['.*Restarting tasks \.\.\..*'],
}
# action table (expected events that occur and show up in dmesg)
at = {
'sync_filesystems': {
'smsg': 'PM: Syncing filesystems.*',
'emsg': 'PM: Preparing system for mem sleep.*' },
'freeze_user_processes': {
'smsg': 'Freezing user space processes .*',
'emsg': 'Freezing remaining freezable tasks.*' },
'freeze_tasks': {
'smsg': 'Freezing remaining freezable tasks.*',
'emsg': 'PM: Entering (?P<mode>[a-z,A-Z]*) sleep.*' },
'ACPI prepare': {
'smsg': 'ACPI: Preparing to enter system sleep state.*',
'emsg': 'PM: Saving platform NVS memory.*' },
'PM vns': {
'smsg': 'PM: Saving platform NVS memory.*',
'emsg': 'Disabling non-boot CPUs .*' },
}
t0 = -1.0
cpu_start = -1.0
prevktime = -1.0
actions = dict()
for line in data.dmesgtext:
# parse each dmesg line into the time and message
m = re.match('[ \t]*(\[ *)(?P<ktime>[0-9\.]*)(\]) (?P<msg>.*)', line)
if(m):
val = m.group('ktime')
try:
ktime = float(val)
except:
continue
msg = m.group('msg')
# initialize data start to first line time
if t0 < 0:
data.setStart(ktime)
t0 = ktime
else:
continue
# check for a phase change line
phasechange = False
for p in dm:
for s in dm[p]:
if(re.match(s, msg)):
phasechange, phase = True, p
dm[p] = [s]
break
# hack for determining resume_machine end for freeze
if(not sysvals.usetraceevents and sysvals.suspendmode == 'freeze' \
and phase == 'resume_machine' and \
data.initcall_debug_call(line, True)):
data.setPhase(phase, ktime, False)
phase = 'resume_noirq'
data.setPhase(phase, ktime, True)
if phasechange:
if phase == 'suspend_prepare':
data.setPhase(phase, ktime, True)
data.setStart(ktime)
data.tKernSus = ktime
elif phase == 'suspend':
lp = data.lastPhase()
if lp:
data.setPhase(lp, ktime, False)
data.setPhase(phase, ktime, True)
elif phase == 'suspend_late':
lp = data.lastPhase()
if lp:
data.setPhase(lp, ktime, False)
data.setPhase(phase, ktime, True)
elif phase == 'suspend_noirq':
lp = data.lastPhase()
if lp:
data.setPhase(lp, ktime, False)
data.setPhase(phase, ktime, True)
elif phase == 'suspend_machine':
lp = data.lastPhase()
if lp:
data.setPhase(lp, ktime, False)
data.setPhase(phase, ktime, True)
elif phase == 'resume_machine':
lp = data.lastPhase()
if(sysvals.suspendmode in ['freeze', 'standby']):
data.tSuspended = prevktime
if lp:
data.setPhase(lp, prevktime, False)
else:
data.tSuspended = ktime
if lp:
data.setPhase(lp, prevktime, False)
data.tResumed = ktime
data.setPhase(phase, ktime, True)
elif phase == 'resume_noirq':
lp = data.lastPhase()
if lp:
data.setPhase(lp, ktime, False)
data.setPhase(phase, ktime, True)
elif phase == 'resume_early':
lp = data.lastPhase()
if lp:
data.setPhase(lp, ktime, False)
data.setPhase(phase, ktime, True)
elif phase == 'resume':
lp = data.lastPhase()
if lp:
data.setPhase(lp, ktime, False)
data.setPhase(phase, ktime, True)
elif phase == 'resume_complete':
lp = data.lastPhase()
if lp:
data.setPhase(lp, ktime, False)
data.setPhase(phase, ktime, True)
elif phase == 'post_resume':
lp = data.lastPhase()
if lp:
data.setPhase(lp, ktime, False)
data.setEnd(ktime)
data.tKernRes = ktime
break
# -- device callbacks --
if(phase in data.sortedPhases()):
# device init call
t, f, n, p = data.initcall_debug_call(line)
if t and f and n and p:
data.newAction(phase, f, int(n), p, ktime, -1, '')
else:
# device init return
t, f, l = data.initcall_debug_return(line)
if t and f and l:
list = data.dmesg[phase]['list']
if(f in list):
dev = list[f]
dev['length'] = int(l)
dev['end'] = ktime
# if trace events are not available, these are better than nothing
if(not sysvals.usetraceevents):
# look for known actions
for a in sorted(at):
if(re.match(at[a]['smsg'], msg)):
if(a not in actions):
actions[a] = []
actions[a].append({'begin': ktime, 'end': ktime})
if(re.match(at[a]['emsg'], msg)):
if(a in actions):
actions[a][-1]['end'] = ktime
# now look for CPU on/off events
if(re.match('Disabling non-boot CPUs .*', msg)):
# start of first cpu suspend
cpu_start = ktime
elif(re.match('Enabling non-boot CPUs .*', msg)):
# start of first cpu resume
cpu_start = ktime
elif(re.match('smpboot: CPU (?P<cpu>[0-9]*) is now offline', msg)):
# end of a cpu suspend, start of the next
m = re.match('smpboot: CPU (?P<cpu>[0-9]*) is now offline', msg)
cpu = 'CPU'+m.group('cpu')
if(cpu not in actions):
actions[cpu] = []
actions[cpu].append({'begin': cpu_start, 'end': ktime})
cpu_start = ktime
elif(re.match('CPU(?P<cpu>[0-9]*) is up', msg)):
# end of a cpu resume, start of the next
m = re.match('CPU(?P<cpu>[0-9]*) is up', msg)
cpu = 'CPU'+m.group('cpu')
if(cpu not in actions):
actions[cpu] = []
actions[cpu].append({'begin': cpu_start, 'end': ktime})
cpu_start = ktime
prevktime = ktime
data.initDevicegroups()
# fill in any missing phases
phasedef = data.phasedef
terr, lp = '', 'suspend_prepare'
if lp not in data.dmesg:
doError('dmesg log format has changed, could not find start of suspend')
for p in sorted(phasedef, key=lambda k:phasedef[k]['order']):
if p not in data.dmesg:
if not terr:
pprint('TEST FAILED: %s failed in %s phase' % (sysvals.suspendmode, lp))
terr = '%s failed in %s phase' % (sysvals.suspendmode, lp)
if data.tSuspended == 0:
data.tSuspended = data.dmesg[lp]['end']
if data.tResumed == 0:
data.tResumed = data.dmesg[lp]['end']
sysvals.vprint('WARNING: phase "%s" is missing!' % p)
lp = p
lp = data.sortedPhases()[0]
for p in data.sortedPhases():
if(p != lp and not ('machine' in p and 'machine' in lp)):
data.dmesg[lp]['end'] = data.dmesg[p]['start']
lp = p
if data.tSuspended == 0:
data.tSuspended = data.tKernRes
if data.tResumed == 0:
data.tResumed = data.tSuspended
# fill in any actions we've found
for name in sorted(actions):
for event in actions[name]:
data.newActionGlobal(name, event['begin'], event['end'])
if(len(sysvals.devicefilter) > 0):
data.deviceFilter(sysvals.devicefilter)
data.fixupInitcallsThatDidntReturn()
return True
def callgraphHTML(sv, hf, num, cg, title, color, devid):
html_func_top = '<article id="{0}" class="atop" style="background:{1}">\n<input type="checkbox" class="pf" id="f{2}" checked/><label for="f{2}">{3} {4}</label>\n'
html_func_start = '<article>\n<input type="checkbox" class="pf" id="f{0}" checked/><label for="f{0}">{1} {2}</label>\n'
html_func_end = '</article>\n'
html_func_leaf = '<article>{0} {1}</article>\n'
cgid = devid
if cg.id:
cgid += cg.id
cglen = (cg.end - cg.start) * 1000
if cglen < sv.mincglen:
return num
fmt = '<r>(%.3f ms @ '+sv.timeformat+' to '+sv.timeformat+')</r>'
flen = fmt % (cglen, cg.start, cg.end)
hf.write(html_func_top.format(cgid, color, num, title, flen))
num += 1
for line in cg.list:
if(line.length < 0.000000001):
flen = ''
else:
fmt = '<n>(%.3f ms @ '+sv.timeformat+')</n>'
flen = fmt % (line.length*1000, line.time)
if line.isLeaf():
if line.length * 1000 < sv.mincglen:
continue
hf.write(html_func_leaf.format(line.name, flen))
elif line.freturn:
hf.write(html_func_end)
else:
hf.write(html_func_start.format(num, line.name, flen))
num += 1
hf.write(html_func_end)
return num
def addCallgraphs(sv, hf, data):
hf.write('<section id="callgraphs" class="callgraph">\n')
# write out the ftrace data converted to html
num = 0
for p in data.sortedPhases():
if sv.cgphase and p != sv.cgphase:
continue
list = data.dmesg[p]['list']
for d in data.sortedDevices(p):
if len(sv.cgfilter) > 0 and d not in sv.cgfilter:
continue
dev = list[d]
color = 'white'
if 'color' in data.dmesg[p]:
color = data.dmesg[p]['color']
if 'color' in dev:
color = dev['color']
name = d if '[' not in d else d.split('[')[0]
if(d in sv.devprops):
name = sv.devprops[d].altName(d)
if 'drv' in dev and dev['drv']:
name += ' {%s}' % dev['drv']
if sv.suspendmode in suspendmodename:
name += ' '+p
if('ftrace' in dev):
cg = dev['ftrace']
if cg.name == sv.ftopfunc:
name = 'top level suspend/resume call'
num = callgraphHTML(sv, hf, num, cg,
name, color, dev['id'])
if('ftraces' in dev):
for cg in dev['ftraces']:
num = callgraphHTML(sv, hf, num, cg,
name+' → '+cg.name, color, dev['id'])
hf.write('\n\n </section>\n')
def summaryCSS(title, center=True):
tdcenter = 'text-align:center;' if center else ''
out = '<!DOCTYPE html>\n<html>\n<head>\n\
<meta http-equiv="content-type" content="text/html; charset=UTF-8">\n\
<title>'+title+'</title>\n\
<style type=\'text/css\'>\n\
.stamp {width: 100%;text-align:center;background:#888;line-height:30px;color:white;font: 25px Arial;}\n\
table {width:100%;border-collapse: collapse;border:1px solid;}\n\
th {border: 1px solid black;background:#222;color:white;}\n\
td {font: 14px "Times New Roman";'+tdcenter+'}\n\
tr.head td {border: 1px solid black;background:#aaa;}\n\
tr.alt {background-color:#ddd;}\n\
tr.notice {color:red;}\n\
.minval {background-color:#BBFFBB;}\n\
.medval {background-color:#BBBBFF;}\n\
.maxval {background-color:#FFBBBB;}\n\
.head a {color:#000;text-decoration: none;}\n\
</style>\n</head>\n<body>\n'
return out
# Function: createHTMLSummarySimple
# Description:
# Create summary html file for a series of tests
# Arguments:
# testruns: array of Data objects from parseTraceLog
def createHTMLSummarySimple(testruns, htmlfile, title):
# write the html header first (html head, css code, up to body start)
html = summaryCSS('Summary - SleepGraph')
# extract the test data into list
list = dict()
tAvg, tMin, tMax, tMed = [0.0, 0.0], [0.0, 0.0], [0.0, 0.0], [dict(), dict()]
iMin, iMed, iMax = [0, 0], [0, 0], [0, 0]
num = 0
useturbo = usewifi = False
lastmode = ''
cnt = dict()
for data in sorted(testruns, key=lambda v:(v['mode'], v['host'], v['kernel'], v['time'])):
mode = data['mode']
if mode not in list:
list[mode] = {'data': [], 'avg': [0,0], 'min': [0,0], 'max': [0,0], 'med': [0,0]}
if lastmode and lastmode != mode and num > 0:
for i in range(2):
s = sorted(tMed[i])
list[lastmode]['med'][i] = s[int(len(s)//2)]
iMed[i] = tMed[i][list[lastmode]['med'][i]]
list[lastmode]['avg'] = [tAvg[0] / num, tAvg[1] / num]
list[lastmode]['min'] = tMin
list[lastmode]['max'] = tMax
list[lastmode]['idx'] = (iMin, iMed, iMax)
tAvg, tMin, tMax, tMed = [0.0, 0.0], [0.0, 0.0], [0.0, 0.0], [dict(), dict()]
iMin, iMed, iMax = [0, 0], [0, 0], [0, 0]
num = 0
pkgpc10 = syslpi = wifi = ''
if 'pkgpc10' in data and 'syslpi' in data:
pkgpc10, syslpi, useturbo = data['pkgpc10'], data['syslpi'], True
if 'wifi' in data:
wifi, usewifi = data['wifi'], True
res = data['result']
tVal = [float(data['suspend']), float(data['resume'])]
list[mode]['data'].append([data['host'], data['kernel'],
data['time'], tVal[0], tVal[1], data['url'], res,
data['issues'], data['sus_worst'], data['sus_worsttime'],
data['res_worst'], data['res_worsttime'], pkgpc10, syslpi, wifi])
idx = len(list[mode]['data']) - 1
if res.startswith('fail in'):
res = 'fail'
if res not in cnt:
cnt[res] = 1
else:
cnt[res] += 1
if res == 'pass':
for i in range(2):
tMed[i][tVal[i]] = idx
tAvg[i] += tVal[i]
if tMin[i] == 0 or tVal[i] < tMin[i]:
iMin[i] = idx
tMin[i] = tVal[i]
if tMax[i] == 0 or tVal[i] > tMax[i]:
iMax[i] = idx
tMax[i] = tVal[i]
num += 1
lastmode = mode
if lastmode and num > 0:
for i in range(2):
s = sorted(tMed[i])
list[lastmode]['med'][i] = s[int(len(s)//2)]
iMed[i] = tMed[i][list[lastmode]['med'][i]]
list[lastmode]['avg'] = [tAvg[0] / num, tAvg[1] / num]
list[lastmode]['min'] = tMin
list[lastmode]['max'] = tMax
list[lastmode]['idx'] = (iMin, iMed, iMax)
# group test header
desc = []
for ilk in sorted(cnt, reverse=True):
if cnt[ilk] > 0:
desc.append('%d %s' % (cnt[ilk], ilk))
html += '<div class="stamp">%s (%d tests: %s)</div>\n' % (title, len(testruns), ', '.join(desc))
th = '\t<th>{0}</th>\n'
td = '\t<td>{0}</td>\n'
tdh = '\t<td{1}>{0}</td>\n'
tdlink = '\t<td><a href="{0}">html</a></td>\n'
cols = 12
if useturbo:
cols += 2
if usewifi:
cols += 1
colspan = '%d' % cols
# table header
html += '<table>\n<tr>\n' + th.format('#') +\
th.format('Mode') + th.format('Host') + th.format('Kernel') +\
th.format('Test Time') + th.format('Result') + th.format('Issues') +\
th.format('Suspend') + th.format('Resume') +\
th.format('Worst Suspend Device') + th.format('SD Time') +\
th.format('Worst Resume Device') + th.format('RD Time')
if useturbo:
html += th.format('PkgPC10') + th.format('SysLPI')
if usewifi:
html += th.format('Wifi')
html += th.format('Detail')+'</tr>\n'
# export list into html
head = '<tr class="head"><td>{0}</td><td>{1}</td>'+\
'<td colspan='+colspan+' class="sus">Suspend Avg={2} '+\
'<span class=minval><a href="#s{10}min">Min={3}</a></span> '+\
'<span class=medval><a href="#s{10}med">Med={4}</a></span> '+\
'<span class=maxval><a href="#s{10}max">Max={5}</a></span> '+\
'Resume Avg={6} '+\
'<span class=minval><a href="#r{10}min">Min={7}</a></span> '+\
'<span class=medval><a href="#r{10}med">Med={8}</a></span> '+\
'<span class=maxval><a href="#r{10}max">Max={9}</a></span></td>'+\
'</tr>\n'
headnone = '<tr class="head"><td>{0}</td><td>{1}</td><td colspan='+\
colspan+'></td></tr>\n'
for mode in sorted(list):
# header line for each suspend mode
num = 0
tAvg, tMin, tMax, tMed = list[mode]['avg'], list[mode]['min'],\
list[mode]['max'], list[mode]['med']
count = len(list[mode]['data'])
if 'idx' in list[mode]:
iMin, iMed, iMax = list[mode]['idx']
html += head.format('%d' % count, mode.upper(),
'%.3f' % tAvg[0], '%.3f' % tMin[0], '%.3f' % tMed[0], '%.3f' % tMax[0],
'%.3f' % tAvg[1], '%.3f' % tMin[1], '%.3f' % tMed[1], '%.3f' % tMax[1],
mode.lower()
)
else:
iMin = iMed = iMax = [-1, -1, -1]
html += headnone.format('%d' % count, mode.upper())
for d in list[mode]['data']:
# row classes - alternate row color
rcls = ['alt'] if num % 2 == 1 else []
if d[6] != 'pass':
rcls.append('notice')
html += '<tr class="'+(' '.join(rcls))+'">\n' if len(rcls) > 0 else '<tr>\n'
# figure out if the line has sus or res highlighted
idx = list[mode]['data'].index(d)
tHigh = ['', '']
for i in range(2):
tag = 's%s' % mode if i == 0 else 'r%s' % mode
if idx == iMin[i]:
tHigh[i] = ' id="%smin" class=minval title="Minimum"' % tag
elif idx == iMax[i]:
tHigh[i] = ' id="%smax" class=maxval title="Maximum"' % tag
elif idx == iMed[i]:
tHigh[i] = ' id="%smed" class=medval title="Median"' % tag
html += td.format("%d" % (list[mode]['data'].index(d) + 1)) # row
html += td.format(mode) # mode
html += td.format(d[0]) # host
html += td.format(d[1]) # kernel
html += td.format(d[2]) # time
html += td.format(d[6]) # result
html += td.format(d[7]) # issues
html += tdh.format('%.3f ms' % d[3], tHigh[0]) if d[3] else td.format('') # suspend
html += tdh.format('%.3f ms' % d[4], tHigh[1]) if d[4] else td.format('') # resume
html += td.format(d[8]) # sus_worst
html += td.format('%.3f ms' % d[9]) if d[9] else td.format('') # sus_worst time
html += td.format(d[10]) # res_worst
html += td.format('%.3f ms' % d[11]) if d[11] else td.format('') # res_worst time
if useturbo:
html += td.format(d[12]) # pkg_pc10
html += td.format(d[13]) # syslpi
if usewifi:
html += td.format(d[14]) # wifi
html += tdlink.format(d[5]) if d[5] else td.format('') # url
html += '</tr>\n'
num += 1
# flush the data to file
hf = open(htmlfile, 'w')
hf.write(html+'</table>\n</body>\n</html>\n')
hf.close()
def createHTMLDeviceSummary(testruns, htmlfile, title):
html = summaryCSS('Device Summary - SleepGraph', False)
# create global device list from all tests
devall = dict()
for data in testruns:
host, url, devlist = data['host'], data['url'], data['devlist']
for type in devlist:
if type not in devall:
devall[type] = dict()
mdevlist, devlist = devall[type], data['devlist'][type]
for name in devlist:
length = devlist[name]
if name not in mdevlist:
mdevlist[name] = {'name': name, 'host': host,
'worst': length, 'total': length, 'count': 1,
'url': url}
else:
if length > mdevlist[name]['worst']:
mdevlist[name]['worst'] = length
mdevlist[name]['url'] = url
mdevlist[name]['host'] = host
mdevlist[name]['total'] += length
mdevlist[name]['count'] += 1
# generate the html
th = '\t<th>{0}</th>\n'
td = '\t<td align=center>{0}</td>\n'
tdr = '\t<td align=right>{0}</td>\n'
tdlink = '\t<td align=center><a href="{0}">html</a></td>\n'
limit = 1
for type in sorted(devall, reverse=True):
num = 0
devlist = devall[type]
# table header
html += '<div class="stamp">%s (%s devices > %d ms)</div><table>\n' % \
(title, type.upper(), limit)
html += '<tr>\n' + '<th align=right>Device Name</th>' +\
th.format('Average Time') + th.format('Count') +\
th.format('Worst Time') + th.format('Host (worst time)') +\
th.format('Link (worst time)') + '</tr>\n'
for name in sorted(devlist, key=lambda k:(devlist[k]['worst'], \
devlist[k]['total'], devlist[k]['name']), reverse=True):
data = devall[type][name]
data['average'] = data['total'] / data['count']
if data['average'] < limit:
continue
# row classes - alternate row color
rcls = ['alt'] if num % 2 == 1 else []
html += '<tr class="'+(' '.join(rcls))+'">\n' if len(rcls) > 0 else '<tr>\n'
html += tdr.format(data['name']) # name
html += td.format('%.3f ms' % data['average']) # average
html += td.format(data['count']) # count
html += td.format('%.3f ms' % data['worst']) # worst
html += td.format(data['host']) # host
html += tdlink.format(data['url']) # url
html += '</tr>\n'
num += 1
html += '</table>\n'
# flush the data to file
hf = open(htmlfile, 'w')
hf.write(html+'</body>\n</html>\n')
hf.close()
return devall
def createHTMLIssuesSummary(testruns, issues, htmlfile, title, extra=''):
multihost = len([e for e in issues if len(e['urls']) > 1]) > 0
html = summaryCSS('Issues Summary - SleepGraph', False)
total = len(testruns)
# generate the html
th = '\t<th>{0}</th>\n'
td = '\t<td align={0}>{1}</td>\n'
tdlink = '<a href="{1}">{0}</a>'
subtitle = '%d issues' % len(issues) if len(issues) > 0 else 'no issues'
html += '<div class="stamp">%s (%s)</div><table>\n' % (title, subtitle)
html += '<tr>\n' + th.format('Issue') + th.format('Count')
if multihost:
html += th.format('Hosts')
html += th.format('Tests') + th.format('Fail Rate') +\
th.format('First Instance') + '</tr>\n'
num = 0
for e in sorted(issues, key=lambda v:v['count'], reverse=True):
testtotal = 0
links = []
for host in sorted(e['urls']):
links.append(tdlink.format(host, e['urls'][host][0]))
testtotal += len(e['urls'][host])
rate = '%d/%d (%.2f%%)' % (testtotal, total, 100*float(testtotal)/float(total))
# row classes - alternate row color
rcls = ['alt'] if num % 2 == 1 else []
html += '<tr class="'+(' '.join(rcls))+'">\n' if len(rcls) > 0 else '<tr>\n'
html += td.format('left', e['line']) # issue
html += td.format('center', e['count']) # count
if multihost:
html += td.format('center', len(e['urls'])) # hosts
html += td.format('center', testtotal) # test count
html += td.format('center', rate) # test rate
html += td.format('center nowrap', '<br>'.join(links)) # links
html += '</tr>\n'
num += 1
# flush the data to file
hf = open(htmlfile, 'w')
hf.write(html+'</table>\n'+extra+'</body>\n</html>\n')
hf.close()
return issues
def ordinal(value):
suffix = 'th'
if value < 10 or value > 19:
if value % 10 == 1:
suffix = 'st'
elif value % 10 == 2:
suffix = 'nd'
elif value % 10 == 3:
suffix = 'rd'
return '%d%s' % (value, suffix)
# Function: createHTML
# Description:
# Create the output html file from the resident test data
# Arguments:
# testruns: array of Data objects from parseKernelLog or parseTraceLog
# Output:
# True if the html file was created, false if it failed
def createHTML(testruns, testfail):
if len(testruns) < 1:
pprint('ERROR: Not enough test data to build a timeline')
return
kerror = False
for data in testruns:
if data.kerror:
kerror = True
if(sysvals.suspendmode in ['freeze', 'standby']):
data.trimFreezeTime(testruns[-1].tSuspended)
else:
data.getMemTime()
# html function templates
html_error = '<div id="{1}" title="kernel error/warning" class="err" style="right:{0}%">{2}→</div>\n'
html_traceevent = '<div title="{0}" class="traceevent{6}" style="left:{1}%;top:{2}px;height:{3}px;width:{4}%;line-height:{3}px;{7}">{5}</div>\n'
html_cpuexec = '<div class="jiffie" style="left:{0}%;top:{1}px;height:{2}px;width:{3}%;background:{4};"></div>\n'
html_timetotal = '<table class="time1">\n<tr>'\
'<td class="green" title="{3}">{2} Suspend Time: <b>{0} ms</b></td>'\
'<td class="yellow" title="{4}">{2} Resume Time: <b>{1} ms</b></td>'\
'</tr>\n</table>\n'
html_timetotal2 = '<table class="time1">\n<tr>'\
'<td class="green" title="{4}">{3} Suspend Time: <b>{0} ms</b></td>'\
'<td class="gray" title="time spent in low-power mode with clock running">'+sysvals.suspendmode+' time: <b>{1} ms</b></td>'\
'<td class="yellow" title="{5}">{3} Resume Time: <b>{2} ms</b></td>'\
'</tr>\n</table>\n'
html_timetotal3 = '<table class="time1">\n<tr>'\
'<td class="green">Execution Time: <b>{0} ms</b></td>'\
'<td class="yellow">Command: <b>{1}</b></td>'\
'</tr>\n</table>\n'
html_fail = '<table class="testfail"><tr><td>{0}</td></tr></table>\n'
html_kdesc = '<td class="{3}" title="time spent in kernel execution">{0}Kernel {2}: {1} ms</td>'
html_fwdesc = '<td class="{3}" title="time spent in firmware">{0}Firmware {2}: {1} ms</td>'
html_wifdesc = '<td class="yellow" title="time for wifi to reconnect after resume complete ({2})">{0}Wifi Resume: {1}</td>'
# html format variables
scaleH = 20
if kerror:
scaleH = 40
# device timeline
devtl = Timeline(30, scaleH)
# write the test title and general info header
devtl.createHeader(sysvals, testruns[0].stamp)
# Generate the header for this timeline
for data in testruns:
tTotal = data.end - data.start
if(tTotal == 0):
doError('No timeline data')
if sysvals.suspendmode == 'command':
run_time = '%.0f' % (tTotal * 1000)
if sysvals.testcommand:
testdesc = sysvals.testcommand
else:
testdesc = 'unknown'
if(len(testruns) > 1):
testdesc = ordinal(data.testnumber+1)+' '+testdesc
thtml = html_timetotal3.format(run_time, testdesc)
devtl.html += thtml
continue
# typical full suspend/resume header
stot, rtot = sktime, rktime = data.getTimeValues()
ssrc, rsrc, testdesc, testdesc2 = ['kernel'], ['kernel'], 'Kernel', ''
if data.fwValid:
stot += (data.fwSuspend/1000000.0)
rtot += (data.fwResume/1000000.0)
ssrc.append('firmware')
rsrc.append('firmware')
testdesc = 'Total'
if 'time' in data.wifi and data.wifi['stat'] != 'timeout':
rtot += data.end - data.tKernRes + (data.wifi['time'] * 1000.0)
rsrc.append('wifi')
testdesc = 'Total'
suspend_time, resume_time = '%.3f' % stot, '%.3f' % rtot
stitle = 'time from kernel suspend start to %s mode [%s time]' % \
(sysvals.suspendmode, ' & '.join(ssrc))
rtitle = 'time from %s mode to kernel resume complete [%s time]' % \
(sysvals.suspendmode, ' & '.join(rsrc))
if(len(testruns) > 1):
testdesc = testdesc2 = ordinal(data.testnumber+1)
testdesc2 += ' '
if(len(data.tLow) == 0):
thtml = html_timetotal.format(suspend_time, \
resume_time, testdesc, stitle, rtitle)
else:
low_time = '+'.join(data.tLow)
thtml = html_timetotal2.format(suspend_time, low_time, \
resume_time, testdesc, stitle, rtitle)
devtl.html += thtml
if not data.fwValid and 'dev' not in data.wifi:
continue
# extra detail when the times come from multiple sources
thtml = '<table class="time2">\n<tr>'
thtml += html_kdesc.format(testdesc2, '%.3f'%sktime, 'Suspend', 'green')
if data.fwValid:
sftime = '%.3f'%(data.fwSuspend / 1000000.0)
rftime = '%.3f'%(data.fwResume / 1000000.0)
thtml += html_fwdesc.format(testdesc2, sftime, 'Suspend', 'green')
thtml += html_fwdesc.format(testdesc2, rftime, 'Resume', 'yellow')
thtml += html_kdesc.format(testdesc2, '%.3f'%rktime, 'Resume', 'yellow')
if 'time' in data.wifi:
if data.wifi['stat'] != 'timeout':
wtime = '%.0f ms'%(data.end - data.tKernRes + (data.wifi['time'] * 1000.0))
else:
wtime = 'TIMEOUT'
thtml += html_wifdesc.format(testdesc2, wtime, data.wifi['dev'])
thtml += '</tr>\n</table>\n'
devtl.html += thtml
if testfail:
devtl.html += html_fail.format(testfail)
# time scale for potentially multiple datasets
t0 = testruns[0].start
tMax = testruns[-1].end
tTotal = tMax - t0
# determine the maximum number of rows we need to draw
fulllist = []
threadlist = []
pscnt = 0
devcnt = 0
for data in testruns:
data.selectTimelineDevices('%f', tTotal, sysvals.mindevlen)
for group in data.devicegroups:
devlist = []
for phase in group:
for devname in sorted(data.tdevlist[phase]):
d = DevItem(data.testnumber, phase, data.dmesg[phase]['list'][devname])
devlist.append(d)
if d.isa('kth'):
threadlist.append(d)
else:
if d.isa('ps'):
pscnt += 1
else:
devcnt += 1
fulllist.append(d)
if sysvals.mixedphaseheight:
devtl.getPhaseRows(devlist)
if not sysvals.mixedphaseheight:
if len(threadlist) > 0 and len(fulllist) > 0:
if pscnt > 0 and devcnt > 0:
msg = 'user processes & device pm callbacks'
elif pscnt > 0:
msg = 'user processes'
else:
msg = 'device pm callbacks'
d = testruns[0].addHorizontalDivider(msg, testruns[-1].end)
fulllist.insert(0, d)
devtl.getPhaseRows(fulllist)
if len(threadlist) > 0:
d = testruns[0].addHorizontalDivider('asynchronous kernel threads', testruns[-1].end)
threadlist.insert(0, d)
devtl.getPhaseRows(threadlist, devtl.rows)
devtl.calcTotalRows()
# draw the full timeline
devtl.createZoomBox(sysvals.suspendmode, len(testruns))
for data in testruns:
# draw each test run and block chronologically
phases = {'suspend':[],'resume':[]}
for phase in data.sortedPhases():
if data.dmesg[phase]['start'] >= data.tSuspended:
phases['resume'].append(phase)
else:
phases['suspend'].append(phase)
# now draw the actual timeline blocks
for dir in phases:
# draw suspend and resume blocks separately
bname = '%s%d' % (dir[0], data.testnumber)
if dir == 'suspend':
m0 = data.start
mMax = data.tSuspended
left = '%f' % (((m0-t0)*100.0)/tTotal)
else:
m0 = data.tSuspended
mMax = data.end
# in an x2 run, remove any gap between blocks
if len(testruns) > 1 and data.testnumber == 0:
mMax = testruns[1].start
left = '%f' % ((((m0-t0)*100.0)+sysvals.srgap/2)/tTotal)
mTotal = mMax - m0
# if a timeline block is 0 length, skip altogether
if mTotal == 0:
continue
width = '%f' % (((mTotal*100.0)-sysvals.srgap/2)/tTotal)
devtl.html += devtl.html_tblock.format(bname, left, width, devtl.scaleH)
for b in phases[dir]:
# draw the phase color background
phase = data.dmesg[b]
length = phase['end']-phase['start']
left = '%f' % (((phase['start']-m0)*100.0)/mTotal)
width = '%f' % ((length*100.0)/mTotal)
devtl.html += devtl.html_phase.format(left, width, \
'%.3f'%devtl.scaleH, '%.3f'%devtl.bodyH, \
data.dmesg[b]['color'], '')
for e in data.errorinfo[dir]:
# draw red lines for any kernel errors found
type, t, idx1, idx2 = e
id = '%d_%d' % (idx1, idx2)
right = '%f' % (((mMax-t)*100.0)/mTotal)
devtl.html += html_error.format(right, id, type)
for b in phases[dir]:
# draw the devices for this phase
phaselist = data.dmesg[b]['list']
for d in sorted(data.tdevlist[b]):
dname = d if ('[' not in d or 'CPU' in d) else d.split('[')[0]
name, dev = dname, phaselist[d]
drv = xtraclass = xtrainfo = xtrastyle = ''
if 'htmlclass' in dev:
xtraclass = dev['htmlclass']
if 'color' in dev:
xtrastyle = 'background:%s;' % dev['color']
if(d in sysvals.devprops):
name = sysvals.devprops[d].altName(d)
xtraclass = sysvals.devprops[d].xtraClass()
xtrainfo = sysvals.devprops[d].xtraInfo()
elif xtraclass == ' kth':
xtrainfo = ' kernel_thread'
if('drv' in dev and dev['drv']):
drv = ' {%s}' % dev['drv']
rowheight = devtl.phaseRowHeight(data.testnumber, b, dev['row'])
rowtop = devtl.phaseRowTop(data.testnumber, b, dev['row'])
top = '%.3f' % (rowtop + devtl.scaleH)
left = '%f' % (((dev['start']-m0)*100)/mTotal)
width = '%f' % (((dev['end']-dev['start'])*100)/mTotal)
length = ' (%0.3f ms) ' % ((dev['end']-dev['start'])*1000)
title = name+drv+xtrainfo+length
if sysvals.suspendmode == 'command':
title += sysvals.testcommand
elif xtraclass == ' ps':
if 'suspend' in b:
title += 'pre_suspend_process'
else:
title += 'post_resume_process'
else:
title += b
devtl.html += devtl.html_device.format(dev['id'], \
title, left, top, '%.3f'%rowheight, width, \
dname+drv, xtraclass, xtrastyle)
if('cpuexec' in dev):
for t in sorted(dev['cpuexec']):
start, end = t
height = '%.3f' % (rowheight/3)
top = '%.3f' % (rowtop + devtl.scaleH + 2*rowheight/3)
left = '%f' % (((start-m0)*100)/mTotal)
width = '%f' % ((end-start)*100/mTotal)
color = 'rgba(255, 0, 0, %f)' % dev['cpuexec'][t]
devtl.html += \
html_cpuexec.format(left, top, height, width, color)
if('src' not in dev):
continue
# draw any trace events for this device
for e in dev['src']:
if e.length == 0:
continue
height = '%.3f' % devtl.rowH
top = '%.3f' % (rowtop + devtl.scaleH + (e.row*devtl.rowH))
left = '%f' % (((e.time-m0)*100)/mTotal)
width = '%f' % (e.length*100/mTotal)
xtrastyle = ''
if e.color:
xtrastyle = 'background:%s;' % e.color
devtl.html += \
html_traceevent.format(e.title(), \
left, top, height, width, e.text(), '', xtrastyle)
# draw the time scale, try to make the number of labels readable
devtl.createTimeScale(m0, mMax, tTotal, dir)
devtl.html += '</div>\n'
# timeline is finished
devtl.html += '</div>\n</div>\n'
# draw a legend which describes the phases by color
if sysvals.suspendmode != 'command':
phasedef = testruns[-1].phasedef
devtl.html += '<div class="legend">\n'
pdelta = 100.0/len(phasedef.keys())
pmargin = pdelta / 4.0
for phase in sorted(phasedef, key=lambda k:phasedef[k]['order']):
id, p = '', phasedef[phase]
for word in phase.split('_'):
id += word[0]
order = '%.2f' % ((p['order'] * pdelta) + pmargin)
name = phase.replace('_', ' ')
devtl.html += devtl.html_legend.format(order, p['color'], name, id)
devtl.html += '</div>\n'
hf = open(sysvals.htmlfile, 'w')
addCSS(hf, sysvals, len(testruns), kerror)
# write the device timeline
hf.write(devtl.html)
hf.write('<div id="devicedetailtitle"></div>\n')
hf.write('<div id="devicedetail" style="display:none;">\n')
# draw the colored boxes for the device detail section
for data in testruns:
hf.write('<div id="devicedetail%d">\n' % data.testnumber)
pscolor = 'linear-gradient(to top left, #ccc, #eee)'
hf.write(devtl.html_phaselet.format('pre_suspend_process', \
'0', '0', pscolor))
for b in data.sortedPhases():
phase = data.dmesg[b]
length = phase['end']-phase['start']
left = '%.3f' % (((phase['start']-t0)*100.0)/tTotal)
width = '%.3f' % ((length*100.0)/tTotal)
hf.write(devtl.html_phaselet.format(b, left, width, \
data.dmesg[b]['color']))
hf.write(devtl.html_phaselet.format('post_resume_process', \
'0', '0', pscolor))
if sysvals.suspendmode == 'command':
hf.write(devtl.html_phaselet.format('cmdexec', '0', '0', pscolor))
hf.write('</div>\n')
hf.write('</div>\n')
# write the ftrace data (callgraph)
if sysvals.cgtest >= 0 and len(testruns) > sysvals.cgtest:
data = testruns[sysvals.cgtest]
else:
data = testruns[-1]
if sysvals.usecallgraph:
addCallgraphs(sysvals, hf, data)
# add the test log as a hidden div
if sysvals.testlog and sysvals.logmsg:
hf.write('<div id="testlog" style="display:none;">\n'+sysvals.logmsg+'</div>\n')
# add the dmesg log as a hidden div
if sysvals.dmesglog and sysvals.dmesgfile:
hf.write('<div id="dmesglog" style="display:none;">\n')
lf = sysvals.openlog(sysvals.dmesgfile, 'r')
for line in lf:
line = line.replace('<', '<').replace('>', '>')
hf.write(line)
lf.close()
hf.write('</div>\n')
# add the ftrace log as a hidden div
if sysvals.ftracelog and sysvals.ftracefile:
hf.write('<div id="ftracelog" style="display:none;">\n')
lf = sysvals.openlog(sysvals.ftracefile, 'r')
for line in lf:
hf.write(line)
lf.close()
hf.write('</div>\n')
# write the footer and close
addScriptCode(hf, testruns)
hf.write('</body>\n</html>\n')
hf.close()
return True
def addCSS(hf, sv, testcount=1, kerror=False, extra=''):
kernel = sv.stamp['kernel']
host = sv.hostname[0].upper()+sv.hostname[1:]
mode = sv.suspendmode
if sv.suspendmode in suspendmodename:
mode = suspendmodename[sv.suspendmode]
title = host+' '+mode+' '+kernel
# various format changes by flags
cgchk = 'checked'
cgnchk = 'not(:checked)'
if sv.cgexp:
cgchk = 'not(:checked)'
cgnchk = 'checked'
hoverZ = 'z-index:8;'
if sv.usedevsrc:
hoverZ = ''
devlistpos = 'absolute'
if testcount > 1:
devlistpos = 'relative'
scaleTH = 20
if kerror:
scaleTH = 60
# write the html header first (html head, css code, up to body start)
html_header = '<!DOCTYPE html>\n<html>\n<head>\n\
<meta http-equiv="content-type" content="text/html; charset=UTF-8">\n\
<title>'+title+'</title>\n\
<style type=\'text/css\'>\n\
body {overflow-y:scroll;}\n\
.stamp {width:100%;text-align:center;background:gray;line-height:30px;color:white;font:25px Arial;}\n\
.stamp.sysinfo {font:10px Arial;}\n\
.callgraph {margin-top:30px;box-shadow:5px 5px 20px black;}\n\
.callgraph article * {padding-left:28px;}\n\
h1 {color:black;font:bold 30px Times;}\n\
t0 {color:black;font:bold 30px Times;}\n\
t1 {color:black;font:30px Times;}\n\
t2 {color:black;font:25px Times;}\n\
t3 {color:black;font:20px Times;white-space:nowrap;}\n\
t4 {color:black;font:bold 30px Times;line-height:60px;white-space:nowrap;}\n\
cS {font:bold 13px Times;}\n\
table {width:100%;}\n\
.gray {background:rgba(80,80,80,0.1);}\n\
.green {background:rgba(204,255,204,0.4);}\n\
.purple {background:rgba(128,0,128,0.2);}\n\
.yellow {background:rgba(255,255,204,0.4);}\n\
.blue {background:rgba(169,208,245,0.4);}\n\
.time1 {font:22px Arial;border:1px solid;}\n\
.time2 {font:15px Arial;border-bottom:1px solid;border-left:1px solid;border-right:1px solid;}\n\
.testfail {font:bold 22px Arial;color:red;border:1px dashed;}\n\
td {text-align:center;}\n\
r {color:#500000;font:15px Tahoma;}\n\
n {color:#505050;font:15px Tahoma;}\n\
.tdhl {color:red;}\n\
.hide {display:none;}\n\
.pf {display:none;}\n\
.pf:'+cgchk+' + label {background:url(\'data:image/svg+xml;utf,<?xml version="1.0" standalone="no"?><svg xmlns="http://www.w3.org/2000/svg" height="18" width="18" version="1.1"><circle cx="9" cy="9" r="8" stroke="black" stroke-width="1" fill="white"/><rect x="4" y="8" width="10" height="2" style="fill:black;stroke-width:0"/><rect x="8" y="4" width="2" height="10" style="fill:black;stroke-width:0"/></svg>\') no-repeat left center;}\n\
.pf:'+cgnchk+' ~ label {background:url(\'data:image/svg+xml;utf,<?xml version="1.0" standalone="no"?><svg xmlns="http://www.w3.org/2000/svg" height="18" width="18" version="1.1"><circle cx="9" cy="9" r="8" stroke="black" stroke-width="1" fill="white"/><rect x="4" y="8" width="10" height="2" style="fill:black;stroke-width:0"/></svg>\') no-repeat left center;}\n\
.pf:'+cgchk+' ~ *:not(:nth-child(2)) {display:none;}\n\
.zoombox {position:relative;width:100%;overflow-x:scroll;-webkit-user-select:none;-moz-user-select:none;user-select:none;}\n\
.timeline {position:relative;font-size:14px;cursor:pointer;width:100%; overflow:hidden;background:linear-gradient(#cccccc, white);}\n\
.thread {position:absolute;height:0%;overflow:hidden;z-index:7;line-height:30px;font-size:14px;border:1px solid;text-align:center;white-space:nowrap;}\n\
.thread.ps {border-radius:3px;background:linear-gradient(to top, #ccc, #eee);}\n\
.thread:hover {background:white;border:1px solid red;'+hoverZ+'}\n\
.thread.sec,.thread.sec:hover {background:black;border:0;color:white;line-height:15px;font-size:10px;}\n\
.hover {background:white;border:1px solid red;'+hoverZ+'}\n\
.hover.sync {background:white;}\n\
.hover.bg,.hover.kth,.hover.sync,.hover.ps {background:white;}\n\
.jiffie {position:absolute;pointer-events: none;z-index:8;}\n\
.traceevent {position:absolute;font-size:10px;z-index:7;overflow:hidden;color:black;text-align:center;white-space:nowrap;border-radius:5px;border:1px solid black;background:linear-gradient(to bottom right,#CCC,#969696);}\n\
.traceevent:hover {color:white;font-weight:bold;border:1px solid white;}\n\
.phase {position:absolute;overflow:hidden;border:0px;text-align:center;}\n\
.phaselet {float:left;overflow:hidden;border:0px;text-align:center;min-height:100px;font-size:24px;}\n\
.t {position:absolute;line-height:'+('%d'%scaleTH)+'px;pointer-events:none;top:0;height:100%;border-right:1px solid black;z-index:6;}\n\
.err {position:absolute;top:0%;height:100%;border-right:3px solid red;color:red;font:bold 14px Times;line-height:18px;}\n\
.legend {position:relative; width:100%; height:40px; text-align:center;margin-bottom:20px}\n\
.legend .square {position:absolute;cursor:pointer;top:10px; width:0px;height:20px;border:1px solid;padding-left:20px;}\n\
button {height:40px;width:200px;margin-bottom:20px;margin-top:20px;font-size:24px;}\n\
.btnfmt {position:relative;float:right;height:25px;width:auto;margin-top:3px;margin-bottom:0;font-size:10px;text-align:center;}\n\
.devlist {position:'+devlistpos+';width:190px;}\n\
a:link {color:white;text-decoration:none;}\n\
a:visited {color:white;}\n\
a:hover {color:white;}\n\
a:active {color:white;}\n\
.version {position:relative;float:left;color:white;font-size:10px;line-height:30px;margin-left:10px;}\n\
#devicedetail {min-height:100px;box-shadow:5px 5px 20px black;}\n\
.tblock {position:absolute;height:100%;background:#ddd;}\n\
.tback {position:absolute;width:100%;background:linear-gradient(#ccc, #ddd);}\n\
.bg {z-index:1;}\n\
'+extra+'\
</style>\n</head>\n<body>\n'
hf.write(html_header)
# Function: addScriptCode
# Description:
# Adds the javascript code to the output html
# Arguments:
# hf: the open html file pointer
# testruns: array of Data objects from parseKernelLog or parseTraceLog
def addScriptCode(hf, testruns):
t0 = testruns[0].start * 1000
tMax = testruns[-1].end * 1000
# create an array in javascript memory with the device details
detail = ' var devtable = [];\n'
for data in testruns:
topo = data.deviceTopology()
detail += ' devtable[%d] = "%s";\n' % (data.testnumber, topo)
detail += ' var bounds = [%f,%f];\n' % (t0, tMax)
# add the code which will manipulate the data in the browser
script_code = \
'<script type="text/javascript">\n'+detail+\
' var resolution = -1;\n'\
' var dragval = [0, 0];\n'\
' function redrawTimescale(t0, tMax, tS) {\n'\
' var rline = \'<div class="t" style="left:0;border-left:1px solid black;border-right:0;">\';\n'\
' var tTotal = tMax - t0;\n'\
' var list = document.getElementsByClassName("tblock");\n'\
' for (var i = 0; i < list.length; i++) {\n'\
' var timescale = list[i].getElementsByClassName("timescale")[0];\n'\
' var m0 = t0 + (tTotal*parseFloat(list[i].style.left)/100);\n'\
' var mTotal = tTotal*parseFloat(list[i].style.width)/100;\n'\
' var mMax = m0 + mTotal;\n'\
' var html = "";\n'\
' var divTotal = Math.floor(mTotal/tS) + 1;\n'\
' if(divTotal > 1000) continue;\n'\
' var divEdge = (mTotal - tS*(divTotal-1))*100/mTotal;\n'\
' var pos = 0.0, val = 0.0;\n'\
' for (var j = 0; j < divTotal; j++) {\n'\
' var htmlline = "";\n'\
' var mode = list[i].id[5];\n'\
' if(mode == "s") {\n'\
' pos = 100 - (((j)*tS*100)/mTotal) - divEdge;\n'\
' val = (j-divTotal+1)*tS;\n'\
' if(j == divTotal - 1)\n'\
' htmlline = \'<div class="t" style="right:\'+pos+\'%"><cS>S→</cS></div>\';\n'\
' else\n'\
' htmlline = \'<div class="t" style="right:\'+pos+\'%">\'+val+\'ms</div>\';\n'\
' } else {\n'\
' pos = 100 - (((j)*tS*100)/mTotal);\n'\
' val = (j)*tS;\n'\
' htmlline = \'<div class="t" style="right:\'+pos+\'%">\'+val+\'ms</div>\';\n'\
' if(j == 0)\n'\
' if(mode == "r")\n'\
' htmlline = rline+"<cS>←R</cS></div>";\n'\
' else\n'\
' htmlline = rline+"<cS>0ms</div>";\n'\
' }\n'\
' html += htmlline;\n'\
' }\n'\
' timescale.innerHTML = html;\n'\
' }\n'\
' }\n'\
' function zoomTimeline() {\n'\
' var dmesg = document.getElementById("dmesg");\n'\
' var zoombox = document.getElementById("dmesgzoombox");\n'\
' var left = zoombox.scrollLeft;\n'\
' var val = parseFloat(dmesg.style.width);\n'\
' var newval = 100;\n'\
' var sh = window.outerWidth / 2;\n'\
' if(this.id == "zoomin") {\n'\
' newval = val * 1.2;\n'\
' if(newval > 910034) newval = 910034;\n'\
' dmesg.style.width = newval+"%";\n'\
' zoombox.scrollLeft = ((left + sh) * newval / val) - sh;\n'\
' } else if (this.id == "zoomout") {\n'\
' newval = val / 1.2;\n'\
' if(newval < 100) newval = 100;\n'\
' dmesg.style.width = newval+"%";\n'\
' zoombox.scrollLeft = ((left + sh) * newval / val) - sh;\n'\
' } else {\n'\
' zoombox.scrollLeft = 0;\n'\
' dmesg.style.width = "100%";\n'\
' }\n'\
' var tS = [10000, 5000, 2000, 1000, 500, 200, 100, 50, 20, 10, 5, 2, 1];\n'\
' var t0 = bounds[0];\n'\
' var tMax = bounds[1];\n'\
' var tTotal = tMax - t0;\n'\
' var wTotal = tTotal * 100.0 / newval;\n'\
' var idx = 7*window.innerWidth/1100;\n'\
' for(var i = 0; (i < tS.length)&&((wTotal / tS[i]) < idx); i++);\n'\
' if(i >= tS.length) i = tS.length - 1;\n'\
' if(tS[i] == resolution) return;\n'\
' resolution = tS[i];\n'\
' redrawTimescale(t0, tMax, tS[i]);\n'\
' }\n'\
' function deviceName(title) {\n'\
' var name = title.slice(0, title.indexOf(" ("));\n'\
' return name;\n'\
' }\n'\
' function deviceHover() {\n'\
' var name = deviceName(this.title);\n'\
' var dmesg = document.getElementById("dmesg");\n'\
' var dev = dmesg.getElementsByClassName("thread");\n'\
' var cpu = -1;\n'\
' if(name.match("CPU_ON\[[0-9]*\]"))\n'\
' cpu = parseInt(name.slice(7));\n'\
' else if(name.match("CPU_OFF\[[0-9]*\]"))\n'\
' cpu = parseInt(name.slice(8));\n'\
' for (var i = 0; i < dev.length; i++) {\n'\
' dname = deviceName(dev[i].title);\n'\
' var cname = dev[i].className.slice(dev[i].className.indexOf("thread"));\n'\
' if((cpu >= 0 && dname.match("CPU_O[NF]*\\\[*"+cpu+"\\\]")) ||\n'\
' (name == dname))\n'\
' {\n'\
' dev[i].className = "hover "+cname;\n'\
' } else {\n'\
' dev[i].className = cname;\n'\
' }\n'\
' }\n'\
' }\n'\
' function deviceUnhover() {\n'\
' var dmesg = document.getElementById("dmesg");\n'\
' var dev = dmesg.getElementsByClassName("thread");\n'\
' for (var i = 0; i < dev.length; i++) {\n'\
' dev[i].className = dev[i].className.slice(dev[i].className.indexOf("thread"));\n'\
' }\n'\
' }\n'\
' function deviceTitle(title, total, cpu) {\n'\
' var prefix = "Total";\n'\
' if(total.length > 3) {\n'\
' prefix = "Average";\n'\
' total[1] = (total[1]+total[3])/2;\n'\
' total[2] = (total[2]+total[4])/2;\n'\
' }\n'\
' var devtitle = document.getElementById("devicedetailtitle");\n'\
' var name = deviceName(title);\n'\
' if(cpu >= 0) name = "CPU"+cpu;\n'\
' var driver = "";\n'\
' var tS = "<t2>(</t2>";\n'\
' var tR = "<t2>)</t2>";\n'\
' if(total[1] > 0)\n'\
' tS = "<t2>("+prefix+" Suspend:</t2><t0> "+total[1].toFixed(3)+" ms</t0> ";\n'\
' if(total[2] > 0)\n'\
' tR = " <t2>"+prefix+" Resume:</t2><t0> "+total[2].toFixed(3)+" ms<t2>)</t2></t0>";\n'\
' var s = title.indexOf("{");\n'\
' var e = title.indexOf("}");\n'\
' if((s >= 0) && (e >= 0))\n'\
' driver = title.slice(s+1, e) + " <t1>@</t1> ";\n'\
' if(total[1] > 0 && total[2] > 0)\n'\
' devtitle.innerHTML = "<t0>"+driver+name+"</t0> "+tS+tR;\n'\
' else\n'\
' devtitle.innerHTML = "<t0>"+title+"</t0>";\n'\
' return name;\n'\
' }\n'\
' function deviceDetail() {\n'\
' var devinfo = document.getElementById("devicedetail");\n'\
' devinfo.style.display = "block";\n'\
' var name = deviceName(this.title);\n'\
' var cpu = -1;\n'\
' if(name.match("CPU_ON\[[0-9]*\]"))\n'\
' cpu = parseInt(name.slice(7));\n'\
' else if(name.match("CPU_OFF\[[0-9]*\]"))\n'\
' cpu = parseInt(name.slice(8));\n'\
' var dmesg = document.getElementById("dmesg");\n'\
' var dev = dmesg.getElementsByClassName("thread");\n'\
' var idlist = [];\n'\
' var pdata = [[]];\n'\
' if(document.getElementById("devicedetail1"))\n'\
' pdata = [[], []];\n'\
' var pd = pdata[0];\n'\
' var total = [0.0, 0.0, 0.0];\n'\
' for (var i = 0; i < dev.length; i++) {\n'\
' dname = deviceName(dev[i].title);\n'\
' if((cpu >= 0 && dname.match("CPU_O[NF]*\\\[*"+cpu+"\\\]")) ||\n'\
' (name == dname))\n'\
' {\n'\
' idlist[idlist.length] = dev[i].id;\n'\
' var tidx = 1;\n'\
' if(dev[i].id[0] == "a") {\n'\
' pd = pdata[0];\n'\
' } else {\n'\
' if(pdata.length == 1) pdata[1] = [];\n'\
' if(total.length == 3) total[3]=total[4]=0.0;\n'\
' pd = pdata[1];\n'\
' tidx = 3;\n'\
' }\n'\
' var info = dev[i].title.split(" ");\n'\
' var pname = info[info.length-1];\n'\
' pd[pname] = parseFloat(info[info.length-3].slice(1));\n'\
' total[0] += pd[pname];\n'\
' if(pname.indexOf("suspend") >= 0)\n'\
' total[tidx] += pd[pname];\n'\
' else\n'\
' total[tidx+1] += pd[pname];\n'\
' }\n'\
' }\n'\
' var devname = deviceTitle(this.title, total, cpu);\n'\
' var left = 0.0;\n'\
' for (var t = 0; t < pdata.length; t++) {\n'\
' pd = pdata[t];\n'\
' devinfo = document.getElementById("devicedetail"+t);\n'\
' var phases = devinfo.getElementsByClassName("phaselet");\n'\
' for (var i = 0; i < phases.length; i++) {\n'\
' if(phases[i].id in pd) {\n'\
' var w = 100.0*pd[phases[i].id]/total[0];\n'\
' var fs = 32;\n'\
' if(w < 8) fs = 4*w | 0;\n'\
' var fs2 = fs*3/4;\n'\
' phases[i].style.width = w+"%";\n'\
' phases[i].style.left = left+"%";\n'\
' phases[i].title = phases[i].id+" "+pd[phases[i].id]+" ms";\n'\
' left += w;\n'\
' var time = "<t4 style=\\"font-size:"+fs+"px\\">"+pd[phases[i].id]+" ms<br></t4>";\n'\
' var pname = "<t3 style=\\"font-size:"+fs2+"px\\">"+phases[i].id.replace(new RegExp("_", "g"), " ")+"</t3>";\n'\
' phases[i].innerHTML = time+pname;\n'\
' } else {\n'\
' phases[i].style.width = "0%";\n'\
' phases[i].style.left = left+"%";\n'\
' }\n'\
' }\n'\
' }\n'\
' if(typeof devstats !== \'undefined\')\n'\
' callDetail(this.id, this.title);\n'\
' var cglist = document.getElementById("callgraphs");\n'\
' if(!cglist) return;\n'\
' var cg = cglist.getElementsByClassName("atop");\n'\
' if(cg.length < 10) return;\n'\
' for (var i = 0; i < cg.length; i++) {\n'\
' cgid = cg[i].id.split("x")[0]\n'\
' if(idlist.indexOf(cgid) >= 0) {\n'\
' cg[i].style.display = "block";\n'\
' } else {\n'\
' cg[i].style.display = "none";\n'\
' }\n'\
' }\n'\
' }\n'\
' function callDetail(devid, devtitle) {\n'\
' if(!(devid in devstats) || devstats[devid].length < 1)\n'\
' return;\n'\
' var list = devstats[devid];\n'\
' var tmp = devtitle.split(" ");\n'\
' var name = tmp[0], phase = tmp[tmp.length-1];\n'\
' var dd = document.getElementById(phase);\n'\
' var total = parseFloat(tmp[1].slice(1));\n'\
' var mlist = [];\n'\
' var maxlen = 0;\n'\
' var info = []\n'\
' for(var i in list) {\n'\
' if(list[i][0] == "@") {\n'\
' info = list[i].split("|");\n'\
' continue;\n'\
' }\n'\
' var tmp = list[i].split("|");\n'\
' var t = parseFloat(tmp[0]), f = tmp[1], c = parseInt(tmp[2]);\n'\
' var p = (t*100.0/total).toFixed(2);\n'\
' mlist[mlist.length] = [f, c, t.toFixed(2), p+"%"];\n'\
' if(f.length > maxlen)\n'\
' maxlen = f.length;\n'\
' }\n'\
' var pad = 5;\n'\
' if(mlist.length == 0) pad = 30;\n'\
' var html = \'<div style="padding-top:\'+pad+\'px"><t3> <b>\'+name+\':</b>\';\n'\
' if(info.length > 2)\n'\
' html += " start=<b>"+info[1]+"</b>, end=<b>"+info[2]+"</b>";\n'\
' if(info.length > 3)\n'\
' html += ", length<i>(w/o overhead)</i>=<b>"+info[3]+" ms</b>";\n'\
' if(info.length > 4)\n'\
' html += ", return=<b>"+info[4]+"</b>";\n'\
' html += "</t3></div>";\n'\
' if(mlist.length > 0) {\n'\
' html += \'<table class=fstat style="padding-top:\'+(maxlen*5)+\'px;"><tr><th>Function</th>\';\n'\
' for(var i in mlist)\n'\
' html += "<td class=vt>"+mlist[i][0]+"</td>";\n'\
' html += "</tr><tr><th>Calls</th>";\n'\
' for(var i in mlist)\n'\
' html += "<td>"+mlist[i][1]+"</td>";\n'\
' html += "</tr><tr><th>Time(ms)</th>";\n'\
' for(var i in mlist)\n'\
' html += "<td>"+mlist[i][2]+"</td>";\n'\
' html += "</tr><tr><th>Percent</th>";\n'\
' for(var i in mlist)\n'\
' html += "<td>"+mlist[i][3]+"</td>";\n'\
' html += "</tr></table>";\n'\
' }\n'\
' dd.innerHTML = html;\n'\
' var height = (maxlen*5)+100;\n'\
' dd.style.height = height+"px";\n'\
' document.getElementById("devicedetail").style.height = height+"px";\n'\
' }\n'\
' function callSelect() {\n'\
' var cglist = document.getElementById("callgraphs");\n'\
' if(!cglist) return;\n'\
' var cg = cglist.getElementsByClassName("atop");\n'\
' for (var i = 0; i < cg.length; i++) {\n'\
' if(this.id == cg[i].id) {\n'\
' cg[i].style.display = "block";\n'\
' } else {\n'\
' cg[i].style.display = "none";\n'\
' }\n'\
' }\n'\
' }\n'\
' function devListWindow(e) {\n'\
' var win = window.open();\n'\
' var html = "<title>"+e.target.innerHTML+"</title>"+\n'\
' "<style type=\\"text/css\\">"+\n'\
' " ul {list-style-type:circle;padding-left:10px;margin-left:10px;}"+\n'\
' "</style>"\n'\
' var dt = devtable[0];\n'\
' if(e.target.id != "devlist1")\n'\
' dt = devtable[1];\n'\
' win.document.write(html+dt);\n'\
' }\n'\
' function errWindow() {\n'\
' var range = this.id.split("_");\n'\
' var idx1 = parseInt(range[0]);\n'\
' var idx2 = parseInt(range[1]);\n'\
' var win = window.open();\n'\
' var log = document.getElementById("dmesglog");\n'\
' var title = "<title>dmesg log</title>";\n'\
' var text = log.innerHTML.split("\\n");\n'\
' var html = "";\n'\
' for(var i = 0; i < text.length; i++) {\n'\
' if(i == idx1) {\n'\
' html += "<e id=target>"+text[i]+"</e>\\n";\n'\
' } else if(i > idx1 && i <= idx2) {\n'\
' html += "<e>"+text[i]+"</e>\\n";\n'\
' } else {\n'\
' html += text[i]+"\\n";\n'\
' }\n'\
' }\n'\
' win.document.write("<style>e{color:red}</style>"+title+"<pre>"+html+"</pre>");\n'\
' win.location.hash = "#target";\n'\
' win.document.close();\n'\
' }\n'\
' function logWindow(e) {\n'\
' var name = e.target.id.slice(4);\n'\
' var win = window.open();\n'\
' var log = document.getElementById(name+"log");\n'\
' var title = "<title>"+document.title.split(" ")[0]+" "+name+" log</title>";\n'\
' win.document.write(title+"<pre>"+log.innerHTML+"</pre>");\n'\
' win.document.close();\n'\
' }\n'\
' function onMouseDown(e) {\n'\
' dragval[0] = e.clientX;\n'\
' dragval[1] = document.getElementById("dmesgzoombox").scrollLeft;\n'\
' document.onmousemove = onMouseMove;\n'\
' }\n'\
' function onMouseMove(e) {\n'\
' var zoombox = document.getElementById("dmesgzoombox");\n'\
' zoombox.scrollLeft = dragval[1] + dragval[0] - e.clientX;\n'\
' }\n'\
' function onMouseUp(e) {\n'\
' document.onmousemove = null;\n'\
' }\n'\
' function onKeyPress(e) {\n'\
' var c = e.charCode;\n'\
' if(c != 42 && c != 43 && c != 45) return;\n'\
' var click = document.createEvent("Events");\n'\
' click.initEvent("click", true, false);\n'\
' if(c == 43) \n'\
' document.getElementById("zoomin").dispatchEvent(click);\n'\
' else if(c == 45)\n'\
' document.getElementById("zoomout").dispatchEvent(click);\n'\
' else if(c == 42)\n'\
' document.getElementById("zoomdef").dispatchEvent(click);\n'\
' }\n'\
' window.addEventListener("resize", function () {zoomTimeline();});\n'\
' window.addEventListener("load", function () {\n'\
' var dmesg = document.getElementById("dmesg");\n'\
' dmesg.style.width = "100%"\n'\
' dmesg.onmousedown = onMouseDown;\n'\
' document.onmouseup = onMouseUp;\n'\
' document.onkeypress = onKeyPress;\n'\
' document.getElementById("zoomin").onclick = zoomTimeline;\n'\
' document.getElementById("zoomout").onclick = zoomTimeline;\n'\
' document.getElementById("zoomdef").onclick = zoomTimeline;\n'\
' var list = document.getElementsByClassName("err");\n'\
' for (var i = 0; i < list.length; i++)\n'\
' list[i].onclick = errWindow;\n'\
' var list = document.getElementsByClassName("logbtn");\n'\
' for (var i = 0; i < list.length; i++)\n'\
' list[i].onclick = logWindow;\n'\
' list = document.getElementsByClassName("devlist");\n'\
' for (var i = 0; i < list.length; i++)\n'\
' list[i].onclick = devListWindow;\n'\
' var dev = dmesg.getElementsByClassName("thread");\n'\
' for (var i = 0; i < dev.length; i++) {\n'\
' dev[i].onclick = deviceDetail;\n'\
' dev[i].onmouseover = deviceHover;\n'\
' dev[i].onmouseout = deviceUnhover;\n'\
' }\n'\
' var dev = dmesg.getElementsByClassName("srccall");\n'\
' for (var i = 0; i < dev.length; i++)\n'\
' dev[i].onclick = callSelect;\n'\
' zoomTimeline();\n'\
' });\n'\
'</script>\n'
hf.write(script_code);
# Function: executeSuspend
# Description:
# Execute system suspend through the sysfs interface, then copy the output
# dmesg and ftrace files to the test output directory.
def executeSuspend(quiet=False):
sv, tp, pm = sysvals, sysvals.tpath, ProcessMonitor()
if sv.wifi:
wifi = sv.checkWifi()
sv.dlog('wifi check, connected device is "%s"' % wifi)
testdata = []
# run these commands to prepare the system for suspend
if sv.display:
if not quiet:
pprint('SET DISPLAY TO %s' % sv.display.upper())
ret = sv.displayControl(sv.display)
sv.dlog('xset display %s, ret = %d' % (sv.display, ret))
time.sleep(1)
if sv.sync:
if not quiet:
pprint('SYNCING FILESYSTEMS')
sv.dlog('syncing filesystems')
call('sync', shell=True)
sv.dlog('read dmesg')
sv.initdmesg()
sv.dlog('cmdinfo before')
sv.cmdinfo(True)
sv.start(pm)
# execute however many s/r runs requested
for count in range(1,sv.execcount+1):
# x2delay in between test runs
if(count > 1 and sv.x2delay > 0):
sv.fsetVal('WAIT %d' % sv.x2delay, 'trace_marker')
time.sleep(sv.x2delay/1000.0)
sv.fsetVal('WAIT END', 'trace_marker')
# start message
if sv.testcommand != '':
pprint('COMMAND START')
else:
if(sv.rtcwake):
pprint('SUSPEND START')
else:
pprint('SUSPEND START (press a key to resume)')
# set rtcwake
if(sv.rtcwake):
if not quiet:
pprint('will issue an rtcwake in %d seconds' % sv.rtcwaketime)
sv.dlog('enable RTC wake alarm')
sv.rtcWakeAlarmOn()
# start of suspend trace marker
sv.fsetVal(datetime.now().strftime(sv.tmstart), 'trace_marker')
# predelay delay
if(count == 1 and sv.predelay > 0):
sv.fsetVal('WAIT %d' % sv.predelay, 'trace_marker')
time.sleep(sv.predelay/1000.0)
sv.fsetVal('WAIT END', 'trace_marker')
# initiate suspend or command
sv.dlog('system executing a suspend')
tdata = {'error': ''}
if sv.testcommand != '':
res = call(sv.testcommand+' 2>&1', shell=True);
if res != 0:
tdata['error'] = 'cmd returned %d' % res
else:
s0ixready = sv.s0ixSupport()
mode = sv.suspendmode
if sv.memmode and os.path.exists(sv.mempowerfile):
mode = 'mem'
sv.testVal(sv.mempowerfile, 'radio', sv.memmode)
if sv.diskmode and os.path.exists(sv.diskpowerfile):
mode = 'disk'
sv.testVal(sv.diskpowerfile, 'radio', sv.diskmode)
if sv.acpidebug:
sv.testVal(sv.acpipath, 'acpi', '0xe')
if ((mode == 'freeze') or (sv.memmode == 's2idle')) \
and sv.haveTurbostat():
# execution will pause here
turbo = sv.turbostat(s0ixready)
if turbo:
tdata['turbo'] = turbo
else:
pf = open(sv.powerfile, 'w')
pf.write(mode)
# execution will pause here
try:
pf.close()
except Exception as e:
tdata['error'] = str(e)
sv.fsetVal('CMD COMPLETE', 'trace_marker')
sv.dlog('system returned')
# reset everything
sv.testVal('restoreall')
if(sv.rtcwake):
sv.dlog('disable RTC wake alarm')
sv.rtcWakeAlarmOff()
# postdelay delay
if(count == sv.execcount and sv.postdelay > 0):
sv.fsetVal('WAIT %d' % sv.postdelay, 'trace_marker')
time.sleep(sv.postdelay/1000.0)
sv.fsetVal('WAIT END', 'trace_marker')
# return from suspend
pprint('RESUME COMPLETE')
if(count < sv.execcount):
sv.fsetVal(datetime.now().strftime(sv.tmend), 'trace_marker')
elif(not sv.wifitrace):
sv.fsetVal(datetime.now().strftime(sv.tmend), 'trace_marker')
sv.stop(pm)
if sv.wifi and wifi:
tdata['wifi'] = sv.pollWifi(wifi)
sv.dlog('wifi check, %s' % tdata['wifi'])
if(count == sv.execcount and sv.wifitrace):
sv.fsetVal(datetime.now().strftime(sv.tmend), 'trace_marker')
sv.stop(pm)
if sv.netfix:
tdata['netfix'] = sv.netfixon()
sv.dlog('netfix, %s' % tdata['netfix'])
if(sv.suspendmode == 'mem' or sv.suspendmode == 'command'):
sv.dlog('read the ACPI FPDT')
tdata['fw'] = getFPDT(False)
testdata.append(tdata)
sv.dlog('cmdinfo after')
cmdafter = sv.cmdinfo(False)
# grab a copy of the dmesg output
if not quiet:
pprint('CAPTURING DMESG')
sv.getdmesg(testdata)
# grab a copy of the ftrace output
if sv.useftrace:
if not quiet:
pprint('CAPTURING TRACE')
op = sv.writeDatafileHeader(sv.ftracefile, testdata)
fp = open(tp+'trace', 'r')
for line in fp:
op.write(line)
op.close()
sv.fsetVal('', 'trace')
sv.platforminfo(cmdafter)
def readFile(file):
if os.path.islink(file):
return os.readlink(file).split('/')[-1]
else:
return sysvals.getVal(file).strip()
# Function: ms2nice
# Description:
# Print out a very concise time string in minutes and seconds
# Output:
# The time string, e.g. "1901m16s"
def ms2nice(val):
val = int(val)
h = val // 3600000
m = (val // 60000) % 60
s = (val // 1000) % 60
if h > 0:
return '%d:%02d:%02d' % (h, m, s)
if m > 0:
return '%02d:%02d' % (m, s)
return '%ds' % s
def yesno(val):
list = {'enabled':'A', 'disabled':'S', 'auto':'E', 'on':'D',
'active':'A', 'suspended':'S', 'suspending':'S'}
if val not in list:
return ' '
return list[val]
# Function: deviceInfo
# Description:
# Detect all the USB hosts and devices currently connected and add
# a list of USB device names to sysvals for better timeline readability
def deviceInfo(output=''):
if not output:
pprint('LEGEND\n'\
'---------------------------------------------------------------------------------------------\n'\
' A = async/sync PM queue (A/S) C = runtime active children\n'\
' R = runtime suspend enabled/disabled (E/D) rACTIVE = runtime active (min/sec)\n'\
' S = runtime status active/suspended (A/S) rSUSPEND = runtime suspend (min/sec)\n'\
' U = runtime usage count\n'\
'---------------------------------------------------------------------------------------------\n'\
'DEVICE NAME A R S U C rACTIVE rSUSPEND\n'\
'---------------------------------------------------------------------------------------------')
res = []
tgtval = 'runtime_status'
lines = dict()
for dirname, dirnames, filenames in os.walk('/sys/devices'):
if(not re.match('.*/power', dirname) or
'control' not in filenames or
tgtval not in filenames):
continue
name = ''
dirname = dirname[:-6]
device = dirname.split('/')[-1]
power = dict()
power[tgtval] = readFile('%s/power/%s' % (dirname, tgtval))
# only list devices which support runtime suspend
if power[tgtval] not in ['active', 'suspended', 'suspending']:
continue
for i in ['product', 'driver', 'subsystem']:
file = '%s/%s' % (dirname, i)
if os.path.exists(file):
name = readFile(file)
break
for i in ['async', 'control', 'runtime_status', 'runtime_usage',
'runtime_active_kids', 'runtime_active_time',
'runtime_suspended_time']:
if i in filenames:
power[i] = readFile('%s/power/%s' % (dirname, i))
if output:
if power['control'] == output:
res.append('%s/power/control' % dirname)
continue
lines[dirname] = '%-26s %-26s %1s %1s %1s %1s %1s %10s %10s' % \
(device[:26], name[:26],
yesno(power['async']), \
yesno(power['control']), \
yesno(power['runtime_status']), \
power['runtime_usage'], \
power['runtime_active_kids'], \
ms2nice(power['runtime_active_time']), \
ms2nice(power['runtime_suspended_time']))
for i in sorted(lines):
print(lines[i])
return res
# Function: getModes
# Description:
# Determine the supported power modes on this system
# Output:
# A string list of the available modes
def getModes():
modes = []
if(os.path.exists(sysvals.powerfile)):
fp = open(sysvals.powerfile, 'r')
modes = fp.read().split()
fp.close()
if(os.path.exists(sysvals.mempowerfile)):
deep = False
fp = open(sysvals.mempowerfile, 'r')
for m in fp.read().split():
memmode = m.strip('[]')
if memmode == 'deep':
deep = True
else:
modes.append('mem-%s' % memmode)
fp.close()
if 'mem' in modes and not deep:
modes.remove('mem')
if('disk' in modes and os.path.exists(sysvals.diskpowerfile)):
fp = open(sysvals.diskpowerfile, 'r')
for m in fp.read().split():
modes.append('disk-%s' % m.strip('[]'))
fp.close()
return modes
# Function: dmidecode
# Description:
# Read the bios tables and pull out system info
# Arguments:
# mempath: /dev/mem or custom mem path
# fatal: True to exit on error, False to return empty dict
# Output:
# A dict object with all available key/values
def dmidecode(mempath, fatal=False):
out = dict()
# the list of values to retrieve, with hardcoded (type, idx)
info = {
'bios-vendor': (0, 4),
'bios-version': (0, 5),
'bios-release-date': (0, 8),
'system-manufacturer': (1, 4),
'system-product-name': (1, 5),
'system-version': (1, 6),
'system-serial-number': (1, 7),
'baseboard-manufacturer': (2, 4),
'baseboard-product-name': (2, 5),
'baseboard-version': (2, 6),
'baseboard-serial-number': (2, 7),
'chassis-manufacturer': (3, 4),
'chassis-type': (3, 5),
'chassis-version': (3, 6),
'chassis-serial-number': (3, 7),
'processor-manufacturer': (4, 7),
'processor-version': (4, 16),
}
if(not os.path.exists(mempath)):
if(fatal):
doError('file does not exist: %s' % mempath)
return out
if(not os.access(mempath, os.R_OK)):
if(fatal):
doError('file is not readable: %s' % mempath)
return out
# by default use legacy scan, but try to use EFI first
memaddr = 0xf0000
memsize = 0x10000
for ep in ['/sys/firmware/efi/systab', '/proc/efi/systab']:
if not os.path.exists(ep) or not os.access(ep, os.R_OK):
continue
fp = open(ep, 'r')
buf = fp.read()
fp.close()
i = buf.find('SMBIOS=')
if i >= 0:
try:
memaddr = int(buf[i+7:], 16)
memsize = 0x20
except:
continue
# read in the memory for scanning
try:
fp = open(mempath, 'rb')
fp.seek(memaddr)
buf = fp.read(memsize)
except:
if(fatal):
doError('DMI table is unreachable, sorry')
else:
pprint('WARNING: /dev/mem is not readable, ignoring DMI data')
return out
fp.close()
# search for either an SM table or DMI table
i = base = length = num = 0
while(i < memsize):
if buf[i:i+4] == b'_SM_' and i < memsize - 16:
length = struct.unpack('H', buf[i+22:i+24])[0]
base, num = struct.unpack('IH', buf[i+24:i+30])
break
elif buf[i:i+5] == b'_DMI_':
length = struct.unpack('H', buf[i+6:i+8])[0]
base, num = struct.unpack('IH', buf[i+8:i+14])
break
i += 16
if base == 0 and length == 0 and num == 0:
if(fatal):
doError('Neither SMBIOS nor DMI were found')
else:
return out
# read in the SM or DMI table
try:
fp = open(mempath, 'rb')
fp.seek(base)
buf = fp.read(length)
except:
if(fatal):
doError('DMI table is unreachable, sorry')
else:
pprint('WARNING: /dev/mem is not readable, ignoring DMI data')
return out
fp.close()
# scan the table for the values we want
count = i = 0
while(count < num and i <= len(buf) - 4):
type, size, handle = struct.unpack('BBH', buf[i:i+4])
n = i + size
while n < len(buf) - 1:
if 0 == struct.unpack('H', buf[n:n+2])[0]:
break
n += 1
data = buf[i+size:n+2].split(b'\0')
for name in info:
itype, idxadr = info[name]
if itype == type:
idx = struct.unpack('B', buf[i+idxadr:i+idxadr+1])[0]
if idx > 0 and idx < len(data) - 1:
s = data[idx-1].decode('utf-8')
if s.strip() and s.strip().lower() != 'to be filled by o.e.m.':
out[name] = s
i = n + 2
count += 1
return out
# Function: getFPDT
# Description:
# Read the acpi bios tables and pull out FPDT, the firmware data
# Arguments:
# output: True to output the info to stdout, False otherwise
def getFPDT(output):
rectype = {}
rectype[0] = 'Firmware Basic Boot Performance Record'
rectype[1] = 'S3 Performance Table Record'
prectype = {}
prectype[0] = 'Basic S3 Resume Performance Record'
prectype[1] = 'Basic S3 Suspend Performance Record'
sysvals.rootCheck(True)
if(not os.path.exists(sysvals.fpdtpath)):
if(output):
doError('file does not exist: %s' % sysvals.fpdtpath)
return False
if(not os.access(sysvals.fpdtpath, os.R_OK)):
if(output):
doError('file is not readable: %s' % sysvals.fpdtpath)
return False
if(not os.path.exists(sysvals.mempath)):
if(output):
doError('file does not exist: %s' % sysvals.mempath)
return False
if(not os.access(sysvals.mempath, os.R_OK)):
if(output):
doError('file is not readable: %s' % sysvals.mempath)
return False
fp = open(sysvals.fpdtpath, 'rb')
buf = fp.read()
fp.close()
if(len(buf) < 36):
if(output):
doError('Invalid FPDT table data, should '+\
'be at least 36 bytes')
return False
table = struct.unpack('4sIBB6s8sI4sI', buf[0:36])
if(output):
pprint('\n'\
'Firmware Performance Data Table (%s)\n'\
' Signature : %s\n'\
' Table Length : %u\n'\
' Revision : %u\n'\
' Checksum : 0x%x\n'\
' OEM ID : %s\n'\
' OEM Table ID : %s\n'\
' OEM Revision : %u\n'\
' Creator ID : %s\n'\
' Creator Revision : 0x%x\n'\
'' % (ascii(table[0]), ascii(table[0]), table[1], table[2],
table[3], ascii(table[4]), ascii(table[5]), table[6],
ascii(table[7]), table[8]))
if(table[0] != b'FPDT'):
if(output):
doError('Invalid FPDT table')
return False
if(len(buf) <= 36):
return False
i = 0
fwData = [0, 0]
records = buf[36:]
try:
fp = open(sysvals.mempath, 'rb')
except:
pprint('WARNING: /dev/mem is not readable, ignoring the FPDT data')
return False
while(i < len(records)):
header = struct.unpack('HBB', records[i:i+4])
if(header[0] not in rectype):
i += header[1]
continue
if(header[1] != 16):
i += header[1]
continue
addr = struct.unpack('Q', records[i+8:i+16])[0]
try:
fp.seek(addr)
first = fp.read(8)
except:
if(output):
pprint('Bad address 0x%x in %s' % (addr, sysvals.mempath))
return [0, 0]
rechead = struct.unpack('4sI', first)
recdata = fp.read(rechead[1]-8)
if(rechead[0] == b'FBPT'):
record = struct.unpack('HBBIQQQQQ', recdata[:48])
if(output):
pprint('%s (%s)\n'\
' Reset END : %u ns\n'\
' OS Loader LoadImage Start : %u ns\n'\
' OS Loader StartImage Start : %u ns\n'\
' ExitBootServices Entry : %u ns\n'\
' ExitBootServices Exit : %u ns'\
'' % (rectype[header[0]], ascii(rechead[0]), record[4], record[5],
record[6], record[7], record[8]))
elif(rechead[0] == b'S3PT'):
if(output):
pprint('%s (%s)' % (rectype[header[0]], ascii(rechead[0])))
j = 0
while(j < len(recdata)):
prechead = struct.unpack('HBB', recdata[j:j+4])
if(prechead[0] not in prectype):
continue
if(prechead[0] == 0):
record = struct.unpack('IIQQ', recdata[j:j+prechead[1]])
fwData[1] = record[2]
if(output):
pprint(' %s\n'\
' Resume Count : %u\n'\
' FullResume : %u ns\n'\
' AverageResume : %u ns'\
'' % (prectype[prechead[0]], record[1],
record[2], record[3]))
elif(prechead[0] == 1):
record = struct.unpack('QQ', recdata[j+4:j+prechead[1]])
fwData[0] = record[1] - record[0]
if(output):
pprint(' %s\n'\
' SuspendStart : %u ns\n'\
' SuspendEnd : %u ns\n'\
' SuspendTime : %u ns'\
'' % (prectype[prechead[0]], record[0],
record[1], fwData[0]))
j += prechead[1]
if(output):
pprint('')
i += header[1]
fp.close()
return fwData
# Function: statusCheck
# Description:
# Verify that the requested command and options will work, and
# print the results to the terminal
# Output:
# True if the test will work, False if not
def statusCheck(probecheck=False):
status = ''
pprint('Checking this system (%s)...' % platform.node())
# check we have root access
res = sysvals.colorText('NO (No features of this tool will work!)')
if(sysvals.rootCheck(False)):
res = 'YES'
pprint(' have root access: %s' % res)
if(res != 'YES'):
pprint(' Try running this script with sudo')
return 'missing root access'
# check sysfs is mounted
res = sysvals.colorText('NO (No features of this tool will work!)')
if(os.path.exists(sysvals.powerfile)):
res = 'YES'
pprint(' is sysfs mounted: %s' % res)
if(res != 'YES'):
return 'sysfs is missing'
# check target mode is a valid mode
if sysvals.suspendmode != 'command':
res = sysvals.colorText('NO')
modes = getModes()
if(sysvals.suspendmode in modes):
res = 'YES'
else:
status = '%s mode is not supported' % sysvals.suspendmode
pprint(' is "%s" a valid power mode: %s' % (sysvals.suspendmode, res))
if(res == 'NO'):
pprint(' valid power modes are: %s' % modes)
pprint(' please choose one with -m')
# check if ftrace is available
if sysvals.useftrace:
res = sysvals.colorText('NO')
sysvals.useftrace = sysvals.verifyFtrace()
efmt = '"{0}" uses ftrace, and it is not properly supported'
if sysvals.useftrace:
res = 'YES'
elif sysvals.usecallgraph:
status = efmt.format('-f')
elif sysvals.usedevsrc:
status = efmt.format('-dev')
elif sysvals.useprocmon:
status = efmt.format('-proc')
pprint(' is ftrace supported: %s' % res)
# check if kprobes are available
if sysvals.usekprobes:
res = sysvals.colorText('NO')
sysvals.usekprobes = sysvals.verifyKprobes()
if(sysvals.usekprobes):
res = 'YES'
else:
sysvals.usedevsrc = False
pprint(' are kprobes supported: %s' % res)
# what data source are we using
res = 'DMESG (very limited, ftrace is preferred)'
if sysvals.useftrace:
sysvals.usetraceevents = True
for e in sysvals.traceevents:
if not os.path.exists(sysvals.epath+e):
sysvals.usetraceevents = False
if(sysvals.usetraceevents):
res = 'FTRACE (all trace events found)'
pprint(' timeline data source: %s' % res)
# check if rtcwake
res = sysvals.colorText('NO')
if(sysvals.rtcpath != ''):
res = 'YES'
elif(sysvals.rtcwake):
status = 'rtcwake is not properly supported'
pprint(' is rtcwake supported: %s' % res)
# check info commands
pprint(' optional commands this tool may use for info:')
no = sysvals.colorText('MISSING')
yes = sysvals.colorText('FOUND', 32)
for c in ['turbostat', 'mcelog', 'lspci', 'lsusb', 'netfix']:
if c == 'turbostat':
res = yes if sysvals.haveTurbostat() else no
else:
res = yes if sysvals.getExec(c) else no
pprint(' %s: %s' % (c, res))
if not probecheck:
return status
# verify kprobes
if sysvals.usekprobes:
for name in sysvals.tracefuncs:
sysvals.defaultKprobe(name, sysvals.tracefuncs[name])
if sysvals.usedevsrc:
for name in sysvals.dev_tracefuncs:
sysvals.defaultKprobe(name, sysvals.dev_tracefuncs[name])
sysvals.addKprobes(True)
return status
# Function: doError
# Description:
# generic error function for catastrphic failures
# Arguments:
# msg: the error message to print
# help: True if printHelp should be called after, False otherwise
def doError(msg, help=False):
if(help == True):
printHelp()
pprint('ERROR: %s\n' % msg)
sysvals.outputResult({'error':msg})
sys.exit(1)
# Function: getArgInt
# Description:
# pull out an integer argument from the command line with checks
def getArgInt(name, args, min, max, main=True):
if main:
try:
arg = next(args)
except:
doError(name+': no argument supplied', True)
else:
arg = args
try:
val = int(arg)
except:
doError(name+': non-integer value given', True)
if(val < min or val > max):
doError(name+': value should be between %d and %d' % (min, max), True)
return val
# Function: getArgFloat
# Description:
# pull out a float argument from the command line with checks
def getArgFloat(name, args, min, max, main=True):
if main:
try:
arg = next(args)
except:
doError(name+': no argument supplied', True)
else:
arg = args
try:
val = float(arg)
except:
doError(name+': non-numerical value given', True)
if(val < min or val > max):
doError(name+': value should be between %f and %f' % (min, max), True)
return val
def processData(live=False, quiet=False):
if not quiet:
pprint('PROCESSING: %s' % sysvals.htmlfile)
sysvals.vprint('usetraceevents=%s, usetracemarkers=%s, usekprobes=%s' % \
(sysvals.usetraceevents, sysvals.usetracemarkers, sysvals.usekprobes))
error = ''
if(sysvals.usetraceevents):
testruns, error = parseTraceLog(live)
if sysvals.dmesgfile:
for data in testruns:
data.extractErrorInfo()
else:
testruns = loadKernelLog()
for data in testruns:
parseKernelLog(data)
if(sysvals.ftracefile and (sysvals.usecallgraph or sysvals.usetraceevents)):
appendIncompleteTraceLog(testruns)
if not sysvals.stamp:
pprint('ERROR: data does not include the expected stamp')
return (testruns, {'error': 'timeline generation failed'})
shown = ['os', 'bios', 'biosdate', 'cpu', 'host', 'kernel', 'man', 'memfr',
'memsz', 'mode', 'numcpu', 'plat', 'time', 'wifi']
sysvals.vprint('System Info:')
for key in sorted(sysvals.stamp):
if key in shown:
sysvals.vprint(' %-8s : %s' % (key.upper(), sysvals.stamp[key]))
sysvals.vprint('Command:\n %s' % sysvals.cmdline)
for data in testruns:
if data.turbostat:
idx, s = 0, 'Turbostat:\n '
for val in data.turbostat.split('|'):
idx += len(val) + 1
if idx >= 80:
idx = 0
s += '\n '
s += val + ' '
sysvals.vprint(s)
data.printDetails()
if len(sysvals.platinfo) > 0:
sysvals.vprint('\nPlatform Info:')
for info in sysvals.platinfo:
sysvals.vprint('[%s - %s]' % (info[0], info[1]))
sysvals.vprint(info[2])
sysvals.vprint('')
if sysvals.cgdump:
for data in testruns:
data.debugPrint()
sys.exit(0)
if len(testruns) < 1:
pprint('ERROR: Not enough test data to build a timeline')
return (testruns, {'error': 'timeline generation failed'})
sysvals.vprint('Creating the html timeline (%s)...' % sysvals.htmlfile)
createHTML(testruns, error)
if not quiet:
pprint('DONE: %s' % sysvals.htmlfile)
data = testruns[0]
stamp = data.stamp
stamp['suspend'], stamp['resume'] = data.getTimeValues()
if data.fwValid:
stamp['fwsuspend'], stamp['fwresume'] = data.fwSuspend, data.fwResume
if error:
stamp['error'] = error
return (testruns, stamp)
# Function: rerunTest
# Description:
# generate an output from an existing set of ftrace/dmesg logs
def rerunTest(htmlfile=''):
if sysvals.ftracefile:
doesTraceLogHaveTraceEvents()
if not sysvals.dmesgfile and not sysvals.usetraceevents:
doError('recreating this html output requires a dmesg file')
if htmlfile:
sysvals.htmlfile = htmlfile
else:
sysvals.setOutputFile()
if os.path.exists(sysvals.htmlfile):
if not os.path.isfile(sysvals.htmlfile):
doError('a directory already exists with this name: %s' % sysvals.htmlfile)
elif not os.access(sysvals.htmlfile, os.W_OK):
doError('missing permission to write to %s' % sysvals.htmlfile)
testruns, stamp = processData()
sysvals.resetlog()
return stamp
# Function: runTest
# Description:
# execute a suspend/resume, gather the logs, and generate the output
def runTest(n=0, quiet=False):
# prepare for the test
sysvals.initTestOutput('suspend')
op = sysvals.writeDatafileHeader(sysvals.dmesgfile, [])
op.write('# EXECUTION TRACE START\n')
op.close()
if n <= 1:
if sysvals.rs != 0:
sysvals.dlog('%sabling runtime suspend' % ('en' if sysvals.rs > 0 else 'dis'))
sysvals.setRuntimeSuspend(True)
if sysvals.display:
ret = sysvals.displayControl('init')
sysvals.dlog('xset display init, ret = %d' % ret)
sysvals.testVal(sysvals.pmdpath, 'basic', '1')
sysvals.testVal(sysvals.s0ixpath, 'basic', 'Y')
sysvals.dlog('initialize ftrace')
sysvals.initFtrace(quiet)
# execute the test
executeSuspend(quiet)
sysvals.cleanupFtrace()
if sysvals.skiphtml:
sysvals.outputResult({}, n)
sysvals.sudoUserchown(sysvals.testdir)
return
testruns, stamp = processData(True, quiet)
for data in testruns:
del data
sysvals.sudoUserchown(sysvals.testdir)
sysvals.outputResult(stamp, n)
if 'error' in stamp:
return 2
return 0
def find_in_html(html, start, end, firstonly=True):
cnt, out, list = len(html), [], []
if firstonly:
m = re.search(start, html)
if m:
list.append(m)
else:
list = re.finditer(start, html)
for match in list:
s = match.end()
e = cnt if (len(out) < 1 or s + 10000 > cnt) else s + 10000
m = re.search(end, html[s:e])
if not m:
break
e = s + m.start()
str = html[s:e]
if end == 'ms':
num = re.search(r'[-+]?\d*\.\d+|\d+', str)
str = num.group() if num else 'NaN'
if firstonly:
return str
out.append(str)
if firstonly:
return ''
return out
def data_from_html(file, outpath, issues, fulldetail=False):
html = open(file, 'r').read()
sysvals.htmlfile = os.path.relpath(file, outpath)
# extract general info
suspend = find_in_html(html, 'Kernel Suspend', 'ms')
resume = find_in_html(html, 'Kernel Resume', 'ms')
sysinfo = find_in_html(html, '<div class="stamp sysinfo">', '</div>')
line = find_in_html(html, '<div class="stamp">', '</div>')
stmp = line.split()
if not suspend or not resume or len(stmp) != 8:
return False
try:
dt = datetime.strptime(' '.join(stmp[3:]), '%B %d %Y, %I:%M:%S %p')
except:
return False
sysvals.hostname = stmp[0]
tstr = dt.strftime('%Y/%m/%d %H:%M:%S')
error = find_in_html(html, '<table class="testfail"><tr><td>', '</td>')
if error:
m = re.match('[a-z0-9]* failed in (?P<p>\S*).*', error)
if m:
result = 'fail in %s' % m.group('p')
else:
result = 'fail'
else:
result = 'pass'
# extract error info
tp, ilist = False, []
extra = dict()
log = find_in_html(html, '<div id="dmesglog" style="display:none;">',
'</div>').strip()
if log:
d = Data(0)
d.end = 999999999
d.dmesgtext = log.split('\n')
tp = d.extractErrorInfo()
for msg in tp.msglist:
sysvals.errorSummary(issues, msg)
if stmp[2] == 'freeze':
extra = d.turbostatInfo()
elist = dict()
for dir in d.errorinfo:
for err in d.errorinfo[dir]:
if err[0] not in elist:
elist[err[0]] = 0
elist[err[0]] += 1
for i in elist:
ilist.append('%sx%d' % (i, elist[i]) if elist[i] > 1 else i)
line = find_in_html(log, '# wifi ', '\n')
if line:
extra['wifi'] = line
line = find_in_html(log, '# netfix ', '\n')
if line:
extra['netfix'] = line
low = find_in_html(html, 'freeze time: <b>', ' ms</b>')
for lowstr in ['waking', '+']:
if not low:
break
if lowstr not in low:
continue
if lowstr == '+':
issue = 'S2LOOPx%d' % len(low.split('+'))
else:
m = re.match('.*waking *(?P<n>[0-9]*) *times.*', low)
issue = 'S2WAKEx%s' % m.group('n') if m else 'S2WAKExNaN'
match = [i for i in issues if i['match'] == issue]
if len(match) > 0:
match[0]['count'] += 1
if sysvals.hostname not in match[0]['urls']:
match[0]['urls'][sysvals.hostname] = [sysvals.htmlfile]
elif sysvals.htmlfile not in match[0]['urls'][sysvals.hostname]:
match[0]['urls'][sysvals.hostname].append(sysvals.htmlfile)
else:
issues.append({
'match': issue, 'count': 1, 'line': issue,
'urls': {sysvals.hostname: [sysvals.htmlfile]},
})
ilist.append(issue)
# extract device info
devices = dict()
for line in html.split('\n'):
m = re.match(' *<div id=\"[a,0-9]*\" *title=\"(?P<title>.*)\" class=\"thread.*', line)
if not m or 'thread kth' in line or 'thread sec' in line:
continue
m = re.match('(?P<n>.*) \((?P<t>[0-9,\.]*) ms\) (?P<p>.*)', m.group('title'))
if not m:
continue
name, time, phase = m.group('n'), m.group('t'), m.group('p')
if name == 'async_synchronize_full':
continue
if ' async' in name or ' sync' in name:
name = ' '.join(name.split(' ')[:-1])
if phase.startswith('suspend'):
d = 'suspend'
elif phase.startswith('resume'):
d = 'resume'
else:
continue
if d not in devices:
devices[d] = dict()
if name not in devices[d]:
devices[d][name] = 0.0
devices[d][name] += float(time)
# create worst device info
worst = dict()
for d in ['suspend', 'resume']:
worst[d] = {'name':'', 'time': 0.0}
dev = devices[d] if d in devices else 0
if dev and len(dev.keys()) > 0:
n = sorted(dev, key=lambda k:(dev[k], k), reverse=True)[0]
worst[d]['name'], worst[d]['time'] = n, dev[n]
data = {
'mode': stmp[2],
'host': stmp[0],
'kernel': stmp[1],
'sysinfo': sysinfo,
'time': tstr,
'result': result,
'issues': ' '.join(ilist),
'suspend': suspend,
'resume': resume,
'devlist': devices,
'sus_worst': worst['suspend']['name'],
'sus_worsttime': worst['suspend']['time'],
'res_worst': worst['resume']['name'],
'res_worsttime': worst['resume']['time'],
'url': sysvals.htmlfile,
}
for key in extra:
data[key] = extra[key]
if fulldetail:
data['funclist'] = find_in_html(html, '<div title="', '" class="traceevent"', False)
if tp:
for arg in ['-multi ', '-info ']:
if arg in tp.cmdline:
data['target'] = tp.cmdline[tp.cmdline.find(arg):].split()[1]
break
return data
def genHtml(subdir, force=False):
for dirname, dirnames, filenames in os.walk(subdir):
sysvals.dmesgfile = sysvals.ftracefile = sysvals.htmlfile = ''
for filename in filenames:
file = os.path.join(dirname, filename)
if sysvals.usable(file):
if(re.match('.*_dmesg.txt', filename)):
sysvals.dmesgfile = file
elif(re.match('.*_ftrace.txt', filename)):
sysvals.ftracefile = file
sysvals.setOutputFile()
if (sysvals.dmesgfile or sysvals.ftracefile) and sysvals.htmlfile and \
(force or not sysvals.usable(sysvals.htmlfile, True)):
pprint('FTRACE: %s' % sysvals.ftracefile)
if sysvals.dmesgfile:
pprint('DMESG : %s' % sysvals.dmesgfile)
rerunTest()
# Function: runSummary
# Description:
# create a summary of tests in a sub-directory
def runSummary(subdir, local=True, genhtml=False):
inpath = os.path.abspath(subdir)
outpath = os.path.abspath('.') if local else inpath
pprint('Generating a summary of folder:\n %s' % inpath)
if genhtml:
genHtml(subdir)
target, issues, testruns = '', [], []
desc = {'host':[],'mode':[],'kernel':[]}
for dirname, dirnames, filenames in os.walk(subdir):
for filename in filenames:
if(not re.match('.*.html', filename)):
continue
data = data_from_html(os.path.join(dirname, filename), outpath, issues)
if(not data):
continue
if 'target' in data:
target = data['target']
testruns.append(data)
for key in desc:
if data[key] not in desc[key]:
desc[key].append(data[key])
pprint('Summary files:')
if len(desc['host']) == len(desc['mode']) == len(desc['kernel']) == 1:
title = '%s %s %s' % (desc['host'][0], desc['kernel'][0], desc['mode'][0])
if target:
title += ' %s' % target
else:
title = inpath
createHTMLSummarySimple(testruns, os.path.join(outpath, 'summary.html'), title)
pprint(' summary.html - tabular list of test data found')
createHTMLDeviceSummary(testruns, os.path.join(outpath, 'summary-devices.html'), title)
pprint(' summary-devices.html - kernel device list sorted by total execution time')
createHTMLIssuesSummary(testruns, issues, os.path.join(outpath, 'summary-issues.html'), title)
pprint(' summary-issues.html - kernel issues found sorted by frequency')
# Function: checkArgBool
# Description:
# check if a boolean string value is true or false
def checkArgBool(name, value):
if value in switchvalues:
if value in switchoff:
return False
return True
doError('invalid boolean --> (%s: %s), use "true/false" or "1/0"' % (name, value), True)
return False
# Function: configFromFile
# Description:
# Configure the script via the info in a config file
def configFromFile(file):
Config = configparser.ConfigParser()
Config.read(file)
sections = Config.sections()
overridekprobes = False
overridedevkprobes = False
if 'Settings' in sections:
for opt in Config.options('Settings'):
value = Config.get('Settings', opt).lower()
option = opt.lower()
if(option == 'verbose'):
sysvals.verbose = checkArgBool(option, value)
elif(option == 'addlogs'):
sysvals.dmesglog = sysvals.ftracelog = checkArgBool(option, value)
elif(option == 'dev'):
sysvals.usedevsrc = checkArgBool(option, value)
elif(option == 'proc'):
sysvals.useprocmon = checkArgBool(option, value)
elif(option == 'x2'):
if checkArgBool(option, value):
sysvals.execcount = 2
elif(option == 'callgraph'):
sysvals.usecallgraph = checkArgBool(option, value)
elif(option == 'override-timeline-functions'):
overridekprobes = checkArgBool(option, value)
elif(option == 'override-dev-timeline-functions'):
overridedevkprobes = checkArgBool(option, value)
elif(option == 'skiphtml'):
sysvals.skiphtml = checkArgBool(option, value)
elif(option == 'sync'):
sysvals.sync = checkArgBool(option, value)
elif(option == 'rs' or option == 'runtimesuspend'):
if value in switchvalues:
if value in switchoff:
sysvals.rs = -1
else:
sysvals.rs = 1
else:
doError('invalid value --> (%s: %s), use "enable/disable"' % (option, value), True)
elif(option == 'display'):
disopt = ['on', 'off', 'standby', 'suspend']
if value not in disopt:
doError('invalid value --> (%s: %s), use %s' % (option, value, disopt), True)
sysvals.display = value
elif(option == 'gzip'):
sysvals.gzip = checkArgBool(option, value)
elif(option == 'cgfilter'):
sysvals.setCallgraphFilter(value)
elif(option == 'cgskip'):
if value in switchoff:
sysvals.cgskip = ''
else:
sysvals.cgskip = sysvals.configFile(val)
if(not sysvals.cgskip):
doError('%s does not exist' % sysvals.cgskip)
elif(option == 'cgtest'):
sysvals.cgtest = getArgInt('cgtest', value, 0, 1, False)
elif(option == 'cgphase'):
d = Data(0)
if value not in d.phasedef:
doError('invalid phase --> (%s: %s), valid phases are %s'\
% (option, value, d.phasedef.keys()), True)
sysvals.cgphase = value
elif(option == 'fadd'):
file = sysvals.configFile(value)
if(not file):
doError('%s does not exist' % value)
sysvals.addFtraceFilterFunctions(file)
elif(option == 'result'):
sysvals.result = value
elif(option == 'multi'):
nums = value.split()
if len(nums) != 2:
doError('multi requires 2 integers (exec_count and delay)', True)
sysvals.multiinit(nums[0], nums[1])
elif(option == 'devicefilter'):
sysvals.setDeviceFilter(value)
elif(option == 'expandcg'):
sysvals.cgexp = checkArgBool(option, value)
elif(option == 'srgap'):
if checkArgBool(option, value):
sysvals.srgap = 5
elif(option == 'mode'):
sysvals.suspendmode = value
elif(option == 'command' or option == 'cmd'):
sysvals.testcommand = value
elif(option == 'x2delay'):
sysvals.x2delay = getArgInt('x2delay', value, 0, 60000, False)
elif(option == 'predelay'):
sysvals.predelay = getArgInt('predelay', value, 0, 60000, False)
elif(option == 'postdelay'):
sysvals.postdelay = getArgInt('postdelay', value, 0, 60000, False)
elif(option == 'maxdepth'):
sysvals.max_graph_depth = getArgInt('maxdepth', value, 0, 1000, False)
elif(option == 'rtcwake'):
if value in switchoff:
sysvals.rtcwake = False
else:
sysvals.rtcwake = True
sysvals.rtcwaketime = getArgInt('rtcwake', value, 0, 3600, False)
elif(option == 'timeprec'):
sysvals.setPrecision(getArgInt('timeprec', value, 0, 6, False))
elif(option == 'mindev'):
sysvals.mindevlen = getArgFloat('mindev', value, 0.0, 10000.0, False)
elif(option == 'callloop-maxgap'):
sysvals.callloopmaxgap = getArgFloat('callloop-maxgap', value, 0.0, 1.0, False)
elif(option == 'callloop-maxlen'):
sysvals.callloopmaxgap = getArgFloat('callloop-maxlen', value, 0.0, 1.0, False)
elif(option == 'mincg'):
sysvals.mincglen = getArgFloat('mincg', value, 0.0, 10000.0, False)
elif(option == 'bufsize'):
sysvals.bufsize = getArgInt('bufsize', value, 1, 1024*1024*8, False)
elif(option == 'output-dir'):
sysvals.outdir = sysvals.setOutputFolder(value)
if sysvals.suspendmode == 'command' and not sysvals.testcommand:
doError('No command supplied for mode "command"')
# compatibility errors
if sysvals.usedevsrc and sysvals.usecallgraph:
doError('-dev is not compatible with -f')
if sysvals.usecallgraph and sysvals.useprocmon:
doError('-proc is not compatible with -f')
if overridekprobes:
sysvals.tracefuncs = dict()
if overridedevkprobes:
sysvals.dev_tracefuncs = dict()
kprobes = dict()
kprobesec = 'dev_timeline_functions_'+platform.machine()
if kprobesec in sections:
for name in Config.options(kprobesec):
text = Config.get(kprobesec, name)
kprobes[name] = (text, True)
kprobesec = 'timeline_functions_'+platform.machine()
if kprobesec in sections:
for name in Config.options(kprobesec):
if name in kprobes:
doError('Duplicate timeline function found "%s"' % (name))
text = Config.get(kprobesec, name)
kprobes[name] = (text, False)
for name in kprobes:
function = name
format = name
color = ''
args = dict()
text, dev = kprobes[name]
data = text.split()
i = 0
for val in data:
# bracketted strings are special formatting, read them separately
if val[0] == '[' and val[-1] == ']':
for prop in val[1:-1].split(','):
p = prop.split('=')
if p[0] == 'color':
try:
color = int(p[1], 16)
color = '#'+p[1]
except:
color = p[1]
continue
# first real arg should be the format string
if i == 0:
format = val
# all other args are actual function args
else:
d = val.split('=')
args[d[0]] = d[1]
i += 1
if not function or not format:
doError('Invalid kprobe: %s' % name)
for arg in re.findall('{(?P<n>[a-z,A-Z,0-9]*)}', format):
if arg not in args:
doError('Kprobe "%s" is missing argument "%s"' % (name, arg))
if (dev and name in sysvals.dev_tracefuncs) or (not dev and name in sysvals.tracefuncs):
doError('Duplicate timeline function found "%s"' % (name))
kp = {
'name': name,
'func': function,
'format': format,
sysvals.archargs: args
}
if color:
kp['color'] = color
if dev:
sysvals.dev_tracefuncs[name] = kp
else:
sysvals.tracefuncs[name] = kp
# Function: printHelp
# Description:
# print out the help text
def printHelp():
pprint('\n%s v%s\n'\
'Usage: sudo sleepgraph <options> <commands>\n'\
'\n'\
'Description:\n'\
' This tool is designed to assist kernel and OS developers in optimizing\n'\
' their linux stack\'s suspend/resume time. Using a kernel image built\n'\
' with a few extra options enabled, the tool will execute a suspend and\n'\
' capture dmesg and ftrace data until resume is complete. This data is\n'\
' transformed into a device timeline and an optional callgraph to give\n'\
' a detailed view of which devices/subsystems are taking the most\n'\
' time in suspend/resume.\n'\
'\n'\
' If no specific command is given, the default behavior is to initiate\n'\
' a suspend/resume and capture the dmesg/ftrace output as an html timeline.\n'\
'\n'\
' Generates output files in subdirectory: suspend-yymmdd-HHMMSS\n'\
' HTML output: <hostname>_<mode>.html\n'\
' raw dmesg output: <hostname>_<mode>_dmesg.txt\n'\
' raw ftrace output: <hostname>_<mode>_ftrace.txt\n'\
'\n'\
'Options:\n'\
' -h Print this help text\n'\
' -v Print the current tool version\n'\
' -config fn Pull arguments and config options from file fn\n'\
' -verbose Print extra information during execution and analysis\n'\
' -m mode Mode to initiate for suspend (default: %s)\n'\
' -o name Overrides the output subdirectory name when running a new test\n'\
' default: suspend-{date}-{time}\n'\
' -rtcwake t Wakeup t seconds after suspend, set t to "off" to disable (default: 15)\n'\
' -addlogs Add the dmesg and ftrace logs to the html output\n'\
' -noturbostat Dont use turbostat in freeze mode (default: disabled)\n'\
' -srgap Add a visible gap in the timeline between sus/res (default: disabled)\n'\
' -skiphtml Run the test and capture the trace logs, but skip the timeline (default: disabled)\n'\
' -result fn Export a results table to a text file for parsing.\n'\
' -wifi If a wifi connection is available, check that it reconnects after resume.\n'\
' -wifitrace Trace kernel execution through wifi reconnect.\n'\
' -netfix Use netfix to reset the network in the event it fails to resume.\n'\
' [testprep]\n'\
' -sync Sync the filesystems before starting the test\n'\
' -rs on/off Enable/disable runtime suspend for all devices, restore all after test\n'\
' -display m Change the display mode to m for the test (on/off/standby/suspend)\n'\
' [advanced]\n'\
' -gzip Gzip the trace and dmesg logs to save space\n'\
' -cmd {s} Run the timeline over a custom command, e.g. "sync -d"\n'\
' -proc Add usermode process info into the timeline (default: disabled)\n'\
' -dev Add kernel function calls and threads to the timeline (default: disabled)\n'\
' -x2 Run two suspend/resumes back to back (default: disabled)\n'\
' -x2delay t Include t ms delay between multiple test runs (default: 0 ms)\n'\
' -predelay t Include t ms delay before 1st suspend (default: 0 ms)\n'\
' -postdelay t Include t ms delay after last resume (default: 0 ms)\n'\
' -mindev ms Discard all device blocks shorter than ms milliseconds (e.g. 0.001 for us)\n'\
' -multi n d Execute <n> consecutive tests at <d> seconds intervals. If <n> is followed\n'\
' by a "d", "h", or "m" execute for <n> days, hours, or mins instead.\n'\
' The outputs will be created in a new subdirectory with a summary page.\n'\
' -maxfail n Abort a -multi run after n consecutive fails (default is 0 = never abort)\n'\
' [debug]\n'\
' -f Use ftrace to create device callgraphs (default: disabled)\n'\
' -ftop Use ftrace on the top level call: "%s" (default: disabled)\n'\
' -maxdepth N limit the callgraph data to N call levels (default: 0=all)\n'\
' -expandcg pre-expand the callgraph data in the html output (default: disabled)\n'\
' -fadd file Add functions to be graphed in the timeline from a list in a text file\n'\
' -filter "d1,d2,..." Filter out all but this comma-delimited list of device names\n'\
' -mincg ms Discard all callgraphs shorter than ms milliseconds (e.g. 0.001 for us)\n'\
' -cgphase P Only show callgraph data for phase P (e.g. suspend_late)\n'\
' -cgtest N Only show callgraph data for test N (e.g. 0 or 1 in an x2 run)\n'\
' -timeprec N Number of significant digits in timestamps (0:S, [3:ms], 6:us)\n'\
' -cgfilter S Filter the callgraph output in the timeline\n'\
' -cgskip file Callgraph functions to skip, off to disable (default: cgskip.txt)\n'\
' -bufsize N Set trace buffer size to N kilo-bytes (default: all of free memory)\n'\
' -devdump Print out all the raw device data for each phase\n'\
' -cgdump Print out all the raw callgraph data\n'\
'\n'\
'Other commands:\n'\
' -modes List available suspend modes\n'\
' -status Test to see if the system is enabled to run this tool\n'\
' -fpdt Print out the contents of the ACPI Firmware Performance Data Table\n'\
' -wificheck Print out wifi connection info\n'\
' -x<mode> Test xset by toggling the given mode (on/off/standby/suspend)\n'\
' -sysinfo Print out system info extracted from BIOS\n'\
' -devinfo Print out the pm settings of all devices which support runtime suspend\n'\
' -cmdinfo Print out all the platform info collected before and after suspend/resume\n'\
' -flist Print the list of functions currently being captured in ftrace\n'\
' -flistall Print all functions capable of being captured in ftrace\n'\
' -summary dir Create a summary of tests in this dir [-genhtml builds missing html]\n'\
' [redo]\n'\
' -ftrace ftracefile Create HTML output using ftrace input (used with -dmesg)\n'\
' -dmesg dmesgfile Create HTML output using dmesg (used with -ftrace)\n'\
'' % (sysvals.title, sysvals.version, sysvals.suspendmode, sysvals.ftopfunc))
return True
# ----------------- MAIN --------------------
# exec start (skipped if script is loaded as library)
if __name__ == '__main__':
genhtml = False
cmd = ''
simplecmds = ['-sysinfo', '-modes', '-fpdt', '-flist', '-flistall',
'-devinfo', '-status', '-xon', '-xoff', '-xstandby', '-xsuspend',
'-xinit', '-xreset', '-xstat', '-wificheck', '-cmdinfo']
if '-f' in sys.argv:
sysvals.cgskip = sysvals.configFile('cgskip.txt')
# loop through the command line arguments
args = iter(sys.argv[1:])
for arg in args:
if(arg == '-m'):
try:
val = next(args)
except:
doError('No mode supplied', True)
if val == 'command' and not sysvals.testcommand:
doError('No command supplied for mode "command"', True)
sysvals.suspendmode = val
elif(arg in simplecmds):
cmd = arg[1:]
elif(arg == '-h'):
printHelp()
sys.exit(0)
elif(arg == '-v'):
pprint("Version %s" % sysvals.version)
sys.exit(0)
elif(arg == '-debugtiming'):
debugtiming = True
elif(arg == '-x2'):
sysvals.execcount = 2
elif(arg == '-x2delay'):
sysvals.x2delay = getArgInt('-x2delay', args, 0, 60000)
elif(arg == '-predelay'):
sysvals.predelay = getArgInt('-predelay', args, 0, 60000)
elif(arg == '-postdelay'):
sysvals.postdelay = getArgInt('-postdelay', args, 0, 60000)
elif(arg == '-f'):
sysvals.usecallgraph = True
elif(arg == '-ftop'):
sysvals.usecallgraph = True
sysvals.ftop = True
sysvals.usekprobes = False
elif(arg == '-skiphtml'):
sysvals.skiphtml = True
elif(arg == '-cgdump'):
sysvals.cgdump = True
elif(arg == '-devdump'):
sysvals.devdump = True
elif(arg == '-genhtml'):
genhtml = True
elif(arg == '-addlogs'):
sysvals.dmesglog = sysvals.ftracelog = True
elif(arg == '-nologs'):
sysvals.dmesglog = sysvals.ftracelog = False
elif(arg == '-addlogdmesg'):
sysvals.dmesglog = True
elif(arg == '-addlogftrace'):
sysvals.ftracelog = True
elif(arg == '-noturbostat'):
sysvals.tstat = False
elif(arg == '-verbose'):
sysvals.verbose = True
elif(arg == '-proc'):
sysvals.useprocmon = True
elif(arg == '-dev'):
sysvals.usedevsrc = True
elif(arg == '-sync'):
sysvals.sync = True
elif(arg == '-wifi'):
sysvals.wifi = True
elif(arg == '-wifitrace'):
sysvals.wifitrace = True
elif(arg == '-netfix'):
sysvals.netfix = True
elif(arg == '-gzip'):
sysvals.gzip = True
elif(arg == '-info'):
try:
val = next(args)
except:
doError('-info requires one string argument', True)
elif(arg == '-desc'):
try:
val = next(args)
except:
doError('-desc requires one string argument', True)
elif(arg == '-rs'):
try:
val = next(args)
except:
doError('-rs requires "enable" or "disable"', True)
if val.lower() in switchvalues:
if val.lower() in switchoff:
sysvals.rs = -1
else:
sysvals.rs = 1
else:
doError('invalid option: %s, use "enable/disable" or "on/off"' % val, True)
elif(arg == '-display'):
try:
val = next(args)
except:
doError('-display requires an mode value', True)
disopt = ['on', 'off', 'standby', 'suspend']
if val.lower() not in disopt:
doError('valid display mode values are %s' % disopt, True)
sysvals.display = val.lower()
elif(arg == '-maxdepth'):
sysvals.max_graph_depth = getArgInt('-maxdepth', args, 0, 1000)
elif(arg == '-rtcwake'):
try:
val = next(args)
except:
doError('No rtcwake time supplied', True)
if val.lower() in switchoff:
sysvals.rtcwake = False
else:
sysvals.rtcwake = True
sysvals.rtcwaketime = getArgInt('-rtcwake', val, 0, 3600, False)
elif(arg == '-timeprec'):
sysvals.setPrecision(getArgInt('-timeprec', args, 0, 6))
elif(arg == '-mindev'):
sysvals.mindevlen = getArgFloat('-mindev', args, 0.0, 10000.0)
elif(arg == '-mincg'):
sysvals.mincglen = getArgFloat('-mincg', args, 0.0, 10000.0)
elif(arg == '-bufsize'):
sysvals.bufsize = getArgInt('-bufsize', args, 1, 1024*1024*8)
elif(arg == '-cgtest'):
sysvals.cgtest = getArgInt('-cgtest', args, 0, 1)
elif(arg == '-cgphase'):
try:
val = next(args)
except:
doError('No phase name supplied', True)
d = Data(0)
if val not in d.phasedef:
doError('invalid phase --> (%s: %s), valid phases are %s'\
% (arg, val, d.phasedef.keys()), True)
sysvals.cgphase = val
elif(arg == '-cgfilter'):
try:
val = next(args)
except:
doError('No callgraph functions supplied', True)
sysvals.setCallgraphFilter(val)
elif(arg == '-skipkprobe'):
try:
val = next(args)
except:
doError('No kprobe functions supplied', True)
sysvals.skipKprobes(val)
elif(arg == '-cgskip'):
try:
val = next(args)
except:
doError('No file supplied', True)
if val.lower() in switchoff:
sysvals.cgskip = ''
else:
sysvals.cgskip = sysvals.configFile(val)
if(not sysvals.cgskip):
doError('%s does not exist' % sysvals.cgskip)
elif(arg == '-callloop-maxgap'):
sysvals.callloopmaxgap = getArgFloat('-callloop-maxgap', args, 0.0, 1.0)
elif(arg == '-callloop-maxlen'):
sysvals.callloopmaxlen = getArgFloat('-callloop-maxlen', args, 0.0, 1.0)
elif(arg == '-cmd'):
try:
val = next(args)
except:
doError('No command string supplied', True)
sysvals.testcommand = val
sysvals.suspendmode = 'command'
elif(arg == '-expandcg'):
sysvals.cgexp = True
elif(arg == '-srgap'):
sysvals.srgap = 5
elif(arg == '-maxfail'):
sysvals.maxfail = getArgInt('-maxfail', args, 0, 1000000)
elif(arg == '-multi'):
try:
c, d = next(args), next(args)
except:
doError('-multi requires two values', True)
sysvals.multiinit(c, d)
elif(arg == '-o'):
try:
val = next(args)
except:
doError('No subdirectory name supplied', True)
sysvals.outdir = sysvals.setOutputFolder(val)
elif(arg == '-config'):
try:
val = next(args)
except:
doError('No text file supplied', True)
file = sysvals.configFile(val)
if(not file):
doError('%s does not exist' % val)
configFromFile(file)
elif(arg == '-fadd'):
try:
val = next(args)
except:
doError('No text file supplied', True)
file = sysvals.configFile(val)
if(not file):
doError('%s does not exist' % val)
sysvals.addFtraceFilterFunctions(file)
elif(arg == '-dmesg'):
try:
val = next(args)
except:
doError('No dmesg file supplied', True)
sysvals.notestrun = True
sysvals.dmesgfile = val
if(os.path.exists(sysvals.dmesgfile) == False):
doError('%s does not exist' % sysvals.dmesgfile)
elif(arg == '-ftrace'):
try:
val = next(args)
except:
doError('No ftrace file supplied', True)
sysvals.notestrun = True
sysvals.ftracefile = val
if(os.path.exists(sysvals.ftracefile) == False):
doError('%s does not exist' % sysvals.ftracefile)
elif(arg == '-summary'):
try:
val = next(args)
except:
doError('No directory supplied', True)
cmd = 'summary'
sysvals.outdir = val
sysvals.notestrun = True
if(os.path.isdir(val) == False):
doError('%s is not accesible' % val)
elif(arg == '-filter'):
try:
val = next(args)
except:
doError('No devnames supplied', True)
sysvals.setDeviceFilter(val)
elif(arg == '-result'):
try:
val = next(args)
except:
doError('No result file supplied', True)
sysvals.result = val
sysvals.signalHandlerInit()
else:
doError('Invalid argument: '+arg, True)
# compatibility errors
if(sysvals.usecallgraph and sysvals.usedevsrc):
doError('-dev is not compatible with -f')
if(sysvals.usecallgraph and sysvals.useprocmon):
doError('-proc is not compatible with -f')
if sysvals.usecallgraph and sysvals.cgskip:
sysvals.vprint('Using cgskip file: %s' % sysvals.cgskip)
sysvals.setCallgraphBlacklist(sysvals.cgskip)
# callgraph size cannot exceed device size
if sysvals.mincglen < sysvals.mindevlen:
sysvals.mincglen = sysvals.mindevlen
# remove existing buffers before calculating memory
if(sysvals.usecallgraph or sysvals.usedevsrc):
sysvals.fsetVal('16', 'buffer_size_kb')
sysvals.cpuInfo()
# just run a utility command and exit
if(cmd != ''):
ret = 0
if(cmd == 'status'):
if not statusCheck(True):
ret = 1
elif(cmd == 'fpdt'):
if not getFPDT(True):
ret = 1
elif(cmd == 'sysinfo'):
sysvals.printSystemInfo(True)
elif(cmd == 'devinfo'):
deviceInfo()
elif(cmd == 'modes'):
pprint(getModes())
elif(cmd == 'flist'):
sysvals.getFtraceFilterFunctions(True)
elif(cmd == 'flistall'):
sysvals.getFtraceFilterFunctions(False)
elif(cmd == 'summary'):
runSummary(sysvals.outdir, True, genhtml)
elif(cmd in ['xon', 'xoff', 'xstandby', 'xsuspend', 'xinit', 'xreset']):
sysvals.verbose = True
ret = sysvals.displayControl(cmd[1:])
elif(cmd == 'xstat'):
pprint('Display Status: %s' % sysvals.displayControl('stat').upper())
elif(cmd == 'wificheck'):
dev = sysvals.checkWifi()
if dev:
print('%s is connected' % sysvals.wifiDetails(dev))
else:
print('No wifi connection found')
elif(cmd == 'cmdinfo'):
for out in sysvals.cmdinfo(False, True):
print('[%s - %s]\n%s\n' % out)
sys.exit(ret)
# if instructed, re-analyze existing data files
if(sysvals.notestrun):
stamp = rerunTest(sysvals.outdir)
sysvals.outputResult(stamp)
sys.exit(0)
# verify that we can run a test
error = statusCheck()
if(error):
doError(error)
# extract mem/disk extra modes and convert
mode = sysvals.suspendmode
if mode.startswith('mem'):
memmode = mode.split('-', 1)[-1] if '-' in mode else 'deep'
if memmode == 'shallow':
mode = 'standby'
elif memmode == 's2idle':
mode = 'freeze'
else:
mode = 'mem'
sysvals.memmode = memmode
sysvals.suspendmode = mode
if mode.startswith('disk-'):
sysvals.diskmode = mode.split('-', 1)[-1]
sysvals.suspendmode = 'disk'
sysvals.systemInfo(dmidecode(sysvals.mempath))
failcnt, ret = 0, 0
if sysvals.multitest['run']:
# run multiple tests in a separate subdirectory
if not sysvals.outdir:
if 'time' in sysvals.multitest:
s = '-%dm' % sysvals.multitest['time']
else:
s = '-x%d' % sysvals.multitest['count']
sysvals.outdir = datetime.now().strftime('suspend-%y%m%d-%H%M%S'+s)
if not os.path.isdir(sysvals.outdir):
os.makedirs(sysvals.outdir)
sysvals.sudoUserchown(sysvals.outdir)
finish = datetime.now()
if 'time' in sysvals.multitest:
finish += timedelta(minutes=sysvals.multitest['time'])
for i in range(sysvals.multitest['count']):
sysvals.multistat(True, i, finish)
if i != 0 and sysvals.multitest['delay'] > 0:
pprint('Waiting %d seconds...' % (sysvals.multitest['delay']))
time.sleep(sysvals.multitest['delay'])
fmt = 'suspend-%y%m%d-%H%M%S'
sysvals.testdir = os.path.join(sysvals.outdir, datetime.now().strftime(fmt))
ret = runTest(i+1, not sysvals.verbose)
failcnt = 0 if not ret else failcnt + 1
if sysvals.maxfail > 0 and failcnt >= sysvals.maxfail:
pprint('Maximum fail count of %d reached, aborting multitest' % (sysvals.maxfail))
break
sysvals.resetlog()
sysvals.multistat(False, i, finish)
if 'time' in sysvals.multitest and datetime.now() >= finish:
break
if not sysvals.skiphtml:
runSummary(sysvals.outdir, False, False)
sysvals.sudoUserchown(sysvals.outdir)
else:
if sysvals.outdir:
sysvals.testdir = sysvals.outdir
# run the test in the current directory
ret = runTest()
# reset to default values after testing
if sysvals.display:
sysvals.displayControl('reset')
if sysvals.rs != 0:
sysvals.setRuntimeSuspend(False)
sys.exit(ret)
| grace-kernel-grace-kernel-6.1.y | tools/power/pm-graph/sleepgraph.py |
#!/usr/bin/env python
# SPDX-License-Identifier: GPL-2.0-only
# -*- coding: utf-8 -*-
#
""" This utility can be used to debug and tune the performance of the
intel_pstate driver. This utility can be used in two ways:
- If there is Linux trace file with pstate_sample events enabled, then
this utility can parse the trace file and generate performance plots.
- If user has not specified a trace file as input via command line parameters,
then this utility enables and collects trace data for a user specified interval
and generates performance plots.
Prerequisites:
Python version 2.7.x or higher
gnuplot 5.0 or higher
gnuplot-py 1.8 or higher
(Most of the distributions have these required packages. They may be called
gnuplot-py, phython-gnuplot or phython3-gnuplot, gnuplot-nox, ... )
HWP (Hardware P-States are disabled)
Kernel config for Linux trace is enabled
see print_help(): for Usage and Output details
"""
from __future__ import print_function
from datetime import datetime
import subprocess
import os
import time
import re
import signal
import sys
import getopt
import Gnuplot
from numpy import *
from decimal import *
__author__ = "Srinivas Pandruvada"
__copyright__ = " Copyright (c) 2017, Intel Corporation. "
__license__ = "GPL version 2"
MAX_CPUS = 256
# Define the csv file columns
C_COMM = 18
C_GHZ = 17
C_ELAPSED = 16
C_SAMPLE = 15
C_DURATION = 14
C_LOAD = 13
C_BOOST = 12
C_FREQ = 11
C_TSC = 10
C_APERF = 9
C_MPERF = 8
C_TO = 7
C_FROM = 6
C_SCALED = 5
C_CORE = 4
C_USEC = 3
C_SEC = 2
C_CPU = 1
global sample_num, last_sec_cpu, last_usec_cpu, start_time, testname, trace_file
# 11 digits covers uptime to 115 days
getcontext().prec = 11
sample_num =0
last_sec_cpu = [0] * MAX_CPUS
last_usec_cpu = [0] * MAX_CPUS
def print_help(driver_name):
print('%s_tracer.py:'%driver_name)
print(' Usage:')
print(' If the trace file is available, then to simply parse and plot, use (sudo not required):')
print(' ./%s_tracer.py [-c cpus] -t <trace_file> -n <test_name>'%driver_name)
print(' Or')
print(' ./%s_tracer.py [--cpu cpus] ---trace_file <trace_file> --name <test_name>'%driver_name)
print(' To generate trace file, parse and plot, use (sudo required):')
print(' sudo ./%s_tracer.py [-c cpus] -i <interval> -n <test_name> -m <kbytes>'%driver_name)
print(' Or')
print(' sudo ./%s_tracer.py [--cpu cpus] --interval <interval> --name <test_name> --memory <kbytes>'%driver_name)
print(' Optional argument:')
print(' cpus: comma separated list of CPUs')
print(' kbytes: Kilo bytes of memory per CPU to allocate to the trace buffer. Default: 10240')
print(' Output:')
print(' If not already present, creates a "results/test_name" folder in the current working directory with:')
print(' cpu.csv - comma seperated values file with trace contents and some additional calculations.')
print(' cpu???.csv - comma seperated values file for CPU number ???.')
print(' *.png - a variety of PNG format plot files created from the trace contents and the additional calculations.')
print(' Notes:')
print(' Avoid the use of _ (underscore) in test names, because in gnuplot it is a subscript directive.')
print(' Maximum number of CPUs is {0:d}. If there are more the script will abort with an error.'.format(MAX_CPUS))
print(' Off-line CPUs cause the script to list some warnings, and create some empty files. Use the CPU mask feature for a clean run.')
print(' Empty y range warnings for autoscaled plots can occur and can be ignored.')
def plot_perf_busy_with_sample(cpu_index):
""" Plot method to per cpu information """
file_name = 'cpu{:0>3}.csv'.format(cpu_index)
if os.path.exists(file_name):
output_png = "cpu%03d_perf_busy_vs_samples.png" % cpu_index
g_plot = common_all_gnuplot_settings(output_png)
# autoscale this one, no set y1 range
g_plot('set y2range [0:200]')
g_plot('set y2tics 0, 10')
g_plot('set title "{} : cpu perf busy vs. sample : CPU {:0>3} : {:%F %H:%M}"'.format(testname, cpu_index, datetime.now()))
# Override common
g_plot('set xlabel "Samples"')
g_plot('set ylabel "P-State"')
g_plot('set y2label "Scaled Busy/performance/io-busy(%)"')
set_4_plot_linestyles(g_plot)
g_plot('plot "' + file_name + '" using {:d}:{:d} with linespoints linestyle 1 axis x1y2 title "performance",\\'.format(C_SAMPLE, C_CORE))
g_plot('"' + file_name + '" using {:d}:{:d} with linespoints linestyle 2 axis x1y2 title "scaled-busy",\\'.format(C_SAMPLE, C_SCALED))
g_plot('"' + file_name + '" using {:d}:{:d} with linespoints linestyle 3 axis x1y2 title "io-boost",\\'.format(C_SAMPLE, C_BOOST))
g_plot('"' + file_name + '" using {:d}:{:d} with linespoints linestyle 4 axis x1y1 title "P-State"'.format(C_SAMPLE, C_TO))
def plot_perf_busy(cpu_index):
""" Plot some per cpu information """
file_name = 'cpu{:0>3}.csv'.format(cpu_index)
if os.path.exists(file_name):
output_png = "cpu%03d_perf_busy.png" % cpu_index
g_plot = common_all_gnuplot_settings(output_png)
# autoscale this one, no set y1 range
g_plot('set y2range [0:200]')
g_plot('set y2tics 0, 10')
g_plot('set title "{} : perf busy : CPU {:0>3} : {:%F %H:%M}"'.format(testname, cpu_index, datetime.now()))
g_plot('set ylabel "P-State"')
g_plot('set y2label "Scaled Busy/performance/io-busy(%)"')
set_4_plot_linestyles(g_plot)
g_plot('plot "' + file_name + '" using {:d}:{:d} with linespoints linestyle 1 axis x1y2 title "performance",\\'.format(C_ELAPSED, C_CORE))
g_plot('"' + file_name + '" using {:d}:{:d} with linespoints linestyle 2 axis x1y2 title "scaled-busy",\\'.format(C_ELAPSED, C_SCALED))
g_plot('"' + file_name + '" using {:d}:{:d} with linespoints linestyle 3 axis x1y2 title "io-boost",\\'.format(C_ELAPSED, C_BOOST))
g_plot('"' + file_name + '" using {:d}:{:d} with linespoints linestyle 4 axis x1y1 title "P-State"'.format(C_ELAPSED, C_TO))
def plot_durations(cpu_index):
""" Plot per cpu durations """
file_name = 'cpu{:0>3}.csv'.format(cpu_index)
if os.path.exists(file_name):
output_png = "cpu%03d_durations.png" % cpu_index
g_plot = common_all_gnuplot_settings(output_png)
# autoscale this one, no set y range
g_plot('set title "{} : durations : CPU {:0>3} : {:%F %H:%M}"'.format(testname, cpu_index, datetime.now()))
g_plot('set ylabel "Timer Duration (MilliSeconds)"')
# override common
g_plot('set key off')
set_4_plot_linestyles(g_plot)
g_plot('plot "' + file_name + '" using {:d}:{:d} with linespoints linestyle 1 axis x1y1'.format(C_ELAPSED, C_DURATION))
def plot_loads(cpu_index):
""" Plot per cpu loads """
file_name = 'cpu{:0>3}.csv'.format(cpu_index)
if os.path.exists(file_name):
output_png = "cpu%03d_loads.png" % cpu_index
g_plot = common_all_gnuplot_settings(output_png)
g_plot('set yrange [0:100]')
g_plot('set ytics 0, 10')
g_plot('set title "{} : loads : CPU {:0>3} : {:%F %H:%M}"'.format(testname, cpu_index, datetime.now()))
g_plot('set ylabel "CPU load (percent)"')
# override common
g_plot('set key off')
set_4_plot_linestyles(g_plot)
g_plot('plot "' + file_name + '" using {:d}:{:d} with linespoints linestyle 1 axis x1y1'.format(C_ELAPSED, C_LOAD))
def plot_pstate_cpu_with_sample():
""" Plot all cpu information """
if os.path.exists('cpu.csv'):
output_png = 'all_cpu_pstates_vs_samples.png'
g_plot = common_all_gnuplot_settings(output_png)
# autoscale this one, no set y range
# override common
g_plot('set xlabel "Samples"')
g_plot('set ylabel "P-State"')
g_plot('set title "{} : cpu pstate vs. sample : {:%F %H:%M}"'.format(testname, datetime.now()))
title_list = subprocess.check_output('ls cpu???.csv | sed -e \'s/.csv//\'',shell=True).decode('utf-8').replace('\n', ' ')
plot_str = "plot for [i in title_list] i.'.csv' using {:d}:{:d} pt 7 ps 1 title i".format(C_SAMPLE, C_TO)
g_plot('title_list = "{}"'.format(title_list))
g_plot(plot_str)
def plot_pstate_cpu():
""" Plot all cpu information from csv files """
output_png = 'all_cpu_pstates.png'
g_plot = common_all_gnuplot_settings(output_png)
# autoscale this one, no set y range
g_plot('set ylabel "P-State"')
g_plot('set title "{} : cpu pstates : {:%F %H:%M}"'.format(testname, datetime.now()))
# the following command is really cool, but doesn't work with the CPU masking option because it aborts on the first missing file.
# plot_str = 'plot for [i=0:*] file=sprintf("cpu%03d.csv",i) title_s=sprintf("cpu%03d",i) file using 16:7 pt 7 ps 1 title title_s'
#
title_list = subprocess.check_output('ls cpu???.csv | sed -e \'s/.csv//\'',shell=True).decode('utf-8').replace('\n', ' ')
plot_str = "plot for [i in title_list] i.'.csv' using {:d}:{:d} pt 7 ps 1 title i".format(C_ELAPSED, C_TO)
g_plot('title_list = "{}"'.format(title_list))
g_plot(plot_str)
def plot_load_cpu():
""" Plot all cpu loads """
output_png = 'all_cpu_loads.png'
g_plot = common_all_gnuplot_settings(output_png)
g_plot('set yrange [0:100]')
g_plot('set ylabel "CPU load (percent)"')
g_plot('set title "{} : cpu loads : {:%F %H:%M}"'.format(testname, datetime.now()))
title_list = subprocess.check_output('ls cpu???.csv | sed -e \'s/.csv//\'',shell=True).decode('utf-8').replace('\n', ' ')
plot_str = "plot for [i in title_list] i.'.csv' using {:d}:{:d} pt 7 ps 1 title i".format(C_ELAPSED, C_LOAD)
g_plot('title_list = "{}"'.format(title_list))
g_plot(plot_str)
def plot_frequency_cpu():
""" Plot all cpu frequencies """
output_png = 'all_cpu_frequencies.png'
g_plot = common_all_gnuplot_settings(output_png)
# autoscale this one, no set y range
g_plot('set ylabel "CPU Frequency (GHz)"')
g_plot('set title "{} : cpu frequencies : {:%F %H:%M}"'.format(testname, datetime.now()))
title_list = subprocess.check_output('ls cpu???.csv | sed -e \'s/.csv//\'',shell=True).decode('utf-8').replace('\n', ' ')
plot_str = "plot for [i in title_list] i.'.csv' using {:d}:{:d} pt 7 ps 1 title i".format(C_ELAPSED, C_FREQ)
g_plot('title_list = "{}"'.format(title_list))
g_plot(plot_str)
def plot_duration_cpu():
""" Plot all cpu durations """
output_png = 'all_cpu_durations.png'
g_plot = common_all_gnuplot_settings(output_png)
# autoscale this one, no set y range
g_plot('set ylabel "Timer Duration (MilliSeconds)"')
g_plot('set title "{} : cpu durations : {:%F %H:%M}"'.format(testname, datetime.now()))
title_list = subprocess.check_output('ls cpu???.csv | sed -e \'s/.csv//\'',shell=True).decode('utf-8').replace('\n', ' ')
plot_str = "plot for [i in title_list] i.'.csv' using {:d}:{:d} pt 7 ps 1 title i".format(C_ELAPSED, C_DURATION)
g_plot('title_list = "{}"'.format(title_list))
g_plot(plot_str)
def plot_scaled_cpu():
""" Plot all cpu scaled busy """
output_png = 'all_cpu_scaled.png'
g_plot = common_all_gnuplot_settings(output_png)
# autoscale this one, no set y range
g_plot('set ylabel "Scaled Busy (Unitless)"')
g_plot('set title "{} : cpu scaled busy : {:%F %H:%M}"'.format(testname, datetime.now()))
title_list = subprocess.check_output('ls cpu???.csv | sed -e \'s/.csv//\'',shell=True).decode('utf-8').replace('\n', ' ')
plot_str = "plot for [i in title_list] i.'.csv' using {:d}:{:d} pt 7 ps 1 title i".format(C_ELAPSED, C_SCALED)
g_plot('title_list = "{}"'.format(title_list))
g_plot(plot_str)
def plot_boost_cpu():
""" Plot all cpu IO Boosts """
output_png = 'all_cpu_boost.png'
g_plot = common_all_gnuplot_settings(output_png)
g_plot('set yrange [0:100]')
g_plot('set ylabel "CPU IO Boost (percent)"')
g_plot('set title "{} : cpu io boost : {:%F %H:%M}"'.format(testname, datetime.now()))
title_list = subprocess.check_output('ls cpu???.csv | sed -e \'s/.csv//\'',shell=True).decode('utf-8').replace('\n', ' ')
plot_str = "plot for [i in title_list] i.'.csv' using {:d}:{:d} pt 7 ps 1 title i".format(C_ELAPSED, C_BOOST)
g_plot('title_list = "{}"'.format(title_list))
g_plot(plot_str)
def plot_ghz_cpu():
""" Plot all cpu tsc ghz """
output_png = 'all_cpu_ghz.png'
g_plot = common_all_gnuplot_settings(output_png)
# autoscale this one, no set y range
g_plot('set ylabel "TSC Frequency (GHz)"')
g_plot('set title "{} : cpu TSC Frequencies (Sanity check calculation) : {:%F %H:%M}"'.format(testname, datetime.now()))
title_list = subprocess.check_output('ls cpu???.csv | sed -e \'s/.csv//\'',shell=True).decode('utf-8').replace('\n', ' ')
plot_str = "plot for [i in title_list] i.'.csv' using {:d}:{:d} pt 7 ps 1 title i".format(C_ELAPSED, C_GHZ)
g_plot('title_list = "{}"'.format(title_list))
g_plot(plot_str)
def common_all_gnuplot_settings(output_png):
""" common gnuplot settings for multiple CPUs one one graph. """
g_plot = common_gnuplot_settings()
g_plot('set output "' + output_png + '"')
return(g_plot)
def common_gnuplot_settings():
""" common gnuplot settings. """
g_plot = Gnuplot.Gnuplot(persist=1)
# The following line is for rigor only. It seems to be assumed for .csv files
g_plot('set datafile separator \",\"')
g_plot('set ytics nomirror')
g_plot('set xtics nomirror')
g_plot('set xtics font ", 10"')
g_plot('set ytics font ", 10"')
g_plot('set tics out scale 1.0')
g_plot('set grid')
g_plot('set key out horiz')
g_plot('set key bot center')
g_plot('set key samplen 2 spacing .8 font ", 9"')
g_plot('set term png size 1200, 600')
g_plot('set title font ", 11"')
g_plot('set ylabel font ", 10"')
g_plot('set xlabel font ", 10"')
g_plot('set xlabel offset 0, 0.5')
g_plot('set xlabel "Elapsed Time (Seconds)"')
return(g_plot)
def set_4_plot_linestyles(g_plot):
""" set the linestyles used for 4 plots in 1 graphs. """
g_plot('set style line 1 linetype 1 linecolor rgb "green" pointtype -1')
g_plot('set style line 2 linetype 1 linecolor rgb "red" pointtype -1')
g_plot('set style line 3 linetype 1 linecolor rgb "purple" pointtype -1')
g_plot('set style line 4 linetype 1 linecolor rgb "blue" pointtype -1')
def store_csv(cpu_int, time_pre_dec, time_post_dec, core_busy, scaled, _from, _to, mperf, aperf, tsc, freq_ghz, io_boost, common_comm, load, duration_ms, sample_num, elapsed_time, tsc_ghz, cpu_mask):
""" Store master csv file information """
global graph_data_present
if cpu_mask[cpu_int] == 0:
return
try:
f_handle = open('cpu.csv', 'a')
string_buffer = "CPU_%03u, %05u, %06u, %u, %u, %u, %u, %u, %u, %u, %.4f, %u, %.2f, %.3f, %u, %.3f, %.3f, %s\n" % (cpu_int, int(time_pre_dec), int(time_post_dec), int(core_busy), int(scaled), int(_from), int(_to), int(mperf), int(aperf), int(tsc), freq_ghz, int(io_boost), load, duration_ms, sample_num, elapsed_time, tsc_ghz, common_comm)
f_handle.write(string_buffer);
f_handle.close()
except:
print('IO error cpu.csv')
return
graph_data_present = True;
def split_csv(current_max_cpu, cpu_mask):
""" seperate the all csv file into per CPU csv files. """
if os.path.exists('cpu.csv'):
for index in range(0, current_max_cpu + 1):
if cpu_mask[int(index)] != 0:
os.system('grep -m 1 common_cpu cpu.csv > cpu{:0>3}.csv'.format(index))
os.system('grep CPU_{:0>3} cpu.csv >> cpu{:0>3}.csv'.format(index, index))
def fix_ownership(path):
"""Change the owner of the file to SUDO_UID, if required"""
uid = os.environ.get('SUDO_UID')
gid = os.environ.get('SUDO_GID')
if uid is not None:
os.chown(path, int(uid), int(gid))
def cleanup_data_files():
""" clean up existing data files """
if os.path.exists('cpu.csv'):
os.remove('cpu.csv')
f_handle = open('cpu.csv', 'a')
f_handle.write('common_cpu, common_secs, common_usecs, core_busy, scaled_busy, from, to, mperf, aperf, tsc, freq, boost, load, duration_ms, sample_num, elapsed_time, tsc_ghz, common_comm')
f_handle.write('\n')
f_handle.close()
def clear_trace_file():
""" Clear trace file """
try:
f_handle = open('/sys/kernel/debug/tracing/trace', 'w')
f_handle.close()
except:
print('IO error clearing trace file ')
sys.exit(2)
def enable_trace(trace_file):
""" Enable trace """
try:
open(trace_file,'w').write("1")
except:
print('IO error enabling trace ')
sys.exit(2)
def disable_trace(trace_file):
""" Disable trace """
try:
open(trace_file, 'w').write("0")
except:
print('IO error disabling trace ')
sys.exit(2)
def set_trace_buffer_size(memory):
""" Set trace buffer size """
try:
with open('/sys/kernel/debug/tracing/buffer_size_kb', 'w') as fp:
fp.write(memory)
except:
print('IO error setting trace buffer size ')
sys.exit(2)
def free_trace_buffer():
""" Free the trace buffer memory """
try:
open('/sys/kernel/debug/tracing/buffer_size_kb'
, 'w').write("1")
except:
print('IO error freeing trace buffer ')
sys.exit(2)
def read_trace_data(filename, cpu_mask):
""" Read and parse trace data """
global current_max_cpu
global sample_num, last_sec_cpu, last_usec_cpu, start_time
try:
data = open(filename, 'r').read()
except:
print('Error opening ', filename)
sys.exit(2)
for line in data.splitlines():
search_obj = \
re.search(r'(^(.*?)\[)((\d+)[^\]])(.*?)(\d+)([.])(\d+)(.*?core_busy=)(\d+)(.*?scaled=)(\d+)(.*?from=)(\d+)(.*?to=)(\d+)(.*?mperf=)(\d+)(.*?aperf=)(\d+)(.*?tsc=)(\d+)(.*?freq=)(\d+)'
, line)
if search_obj:
cpu = search_obj.group(3)
cpu_int = int(cpu)
cpu = str(cpu_int)
time_pre_dec = search_obj.group(6)
time_post_dec = search_obj.group(8)
core_busy = search_obj.group(10)
scaled = search_obj.group(12)
_from = search_obj.group(14)
_to = search_obj.group(16)
mperf = search_obj.group(18)
aperf = search_obj.group(20)
tsc = search_obj.group(22)
freq = search_obj.group(24)
common_comm = search_obj.group(2).replace(' ', '')
# Not all kernel versions have io_boost field
io_boost = '0'
search_obj = re.search(r'.*?io_boost=(\d+)', line)
if search_obj:
io_boost = search_obj.group(1)
if sample_num == 0 :
start_time = Decimal(time_pre_dec) + Decimal(time_post_dec) / Decimal(1000000)
sample_num += 1
if last_sec_cpu[cpu_int] == 0 :
last_sec_cpu[cpu_int] = time_pre_dec
last_usec_cpu[cpu_int] = time_post_dec
else :
duration_us = (int(time_pre_dec) - int(last_sec_cpu[cpu_int])) * 1000000 + (int(time_post_dec) - int(last_usec_cpu[cpu_int]))
duration_ms = Decimal(duration_us) / Decimal(1000)
last_sec_cpu[cpu_int] = time_pre_dec
last_usec_cpu[cpu_int] = time_post_dec
elapsed_time = Decimal(time_pre_dec) + Decimal(time_post_dec) / Decimal(1000000) - start_time
load = Decimal(int(mperf)*100)/ Decimal(tsc)
freq_ghz = Decimal(freq)/Decimal(1000000)
# Sanity check calculation, typically anomalies indicate missed samples
# However, check for 0 (should never occur)
tsc_ghz = Decimal(0)
if duration_ms != Decimal(0) :
tsc_ghz = Decimal(tsc)/duration_ms/Decimal(1000000)
store_csv(cpu_int, time_pre_dec, time_post_dec, core_busy, scaled, _from, _to, mperf, aperf, tsc, freq_ghz, io_boost, common_comm, load, duration_ms, sample_num, elapsed_time, tsc_ghz, cpu_mask)
if cpu_int > current_max_cpu:
current_max_cpu = cpu_int
# End of for each trace line loop
# Now seperate the main overall csv file into per CPU csv files.
split_csv(current_max_cpu, cpu_mask)
def signal_handler(signal, frame):
print(' SIGINT: Forcing cleanup before exit.')
if interval:
disable_trace(trace_file)
clear_trace_file()
# Free the memory
free_trace_buffer()
sys.exit(0)
if __name__ == "__main__":
trace_file = "/sys/kernel/debug/tracing/events/power/pstate_sample/enable"
signal.signal(signal.SIGINT, signal_handler)
interval = ""
filename = ""
cpu_list = ""
testname = ""
memory = "10240"
graph_data_present = False;
valid1 = False
valid2 = False
cpu_mask = zeros((MAX_CPUS,), dtype=int)
try:
opts, args = getopt.getopt(sys.argv[1:],"ht:i:c:n:m:",["help","trace_file=","interval=","cpu=","name=","memory="])
except getopt.GetoptError:
print_help('intel_pstate')
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print_help('intel_pstate')
sys.exit()
elif opt in ("-t", "--trace_file"):
valid1 = True
location = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
filename = os.path.join(location, arg)
elif opt in ("-i", "--interval"):
valid1 = True
interval = arg
elif opt in ("-c", "--cpu"):
cpu_list = arg
elif opt in ("-n", "--name"):
valid2 = True
testname = arg
elif opt in ("-m", "--memory"):
memory = arg
if not (valid1 and valid2):
print_help('intel_pstate')
sys.exit()
if cpu_list:
for p in re.split("[,]", cpu_list):
if int(p) < MAX_CPUS :
cpu_mask[int(p)] = 1
else:
for i in range (0, MAX_CPUS):
cpu_mask[i] = 1
if not os.path.exists('results'):
os.mkdir('results')
# The regular user needs to own the directory, not root.
fix_ownership('results')
os.chdir('results')
if os.path.exists(testname):
print('The test name directory already exists. Please provide a unique test name. Test re-run not supported, yet.')
sys.exit()
os.mkdir(testname)
# The regular user needs to own the directory, not root.
fix_ownership(testname)
os.chdir(testname)
# Temporary (or perhaps not)
cur_version = sys.version_info
print('python version (should be >= 2.7):')
print(cur_version)
# Left as "cleanup" for potential future re-run ability.
cleanup_data_files()
if interval:
filename = "/sys/kernel/debug/tracing/trace"
clear_trace_file()
set_trace_buffer_size(memory)
enable_trace(trace_file)
print('Sleeping for ', interval, 'seconds')
time.sleep(int(interval))
disable_trace(trace_file)
current_max_cpu = 0
read_trace_data(filename, cpu_mask)
if interval:
clear_trace_file()
# Free the memory
free_trace_buffer()
if graph_data_present == False:
print('No valid data to plot')
sys.exit(2)
for cpu_no in range(0, current_max_cpu + 1):
plot_perf_busy_with_sample(cpu_no)
plot_perf_busy(cpu_no)
plot_durations(cpu_no)
plot_loads(cpu_no)
plot_pstate_cpu_with_sample()
plot_pstate_cpu()
plot_load_cpu()
plot_frequency_cpu()
plot_duration_cpu()
plot_scaled_cpu()
plot_boost_cpu()
plot_ghz_cpu()
# It is preferrable, but not necessary, that the regular user owns the files, not root.
for root, dirs, files in os.walk('.'):
for f in files:
fix_ownership(f)
os.chdir('../../')
| grace-kernel-grace-kernel-6.1.y | tools/power/x86/intel_pstate_tracer/intel_pstate_tracer.py |
#!/usr/bin/env python3
# SPDX-License-Identifier: GPL-2.0-only
# -*- coding: utf-8 -*-
#
""" This utility can be used to debug and tune the performance of the
AMD P-State driver. It imports intel_pstate_tracer to analyze AMD P-State
trace event.
Prerequisites:
Python version 2.7.x or higher
gnuplot 5.0 or higher
gnuplot-py 1.8 or higher
(Most of the distributions have these required packages. They may be called
gnuplot-py, phython-gnuplot or phython3-gnuplot, gnuplot-nox, ... )
Kernel config for Linux trace is enabled
see print_help(): for Usage and Output details
"""
from __future__ import print_function
from datetime import datetime
import subprocess
import os
import time
import re
import signal
import sys
import getopt
import Gnuplot
from numpy import *
from decimal import *
sys.path.append('../intel_pstate_tracer')
#import intel_pstate_tracer
import intel_pstate_tracer as ipt
__license__ = "GPL version 2"
MAX_CPUS = 256
# Define the csv file columns
C_COMM = 15
C_ELAPSED = 14
C_SAMPLE = 13
C_DURATION = 12
C_LOAD = 11
C_TSC = 10
C_APERF = 9
C_MPERF = 8
C_FREQ = 7
C_MAX_PERF = 6
C_DES_PERF = 5
C_MIN_PERF = 4
C_USEC = 3
C_SEC = 2
C_CPU = 1
global sample_num, last_sec_cpu, last_usec_cpu, start_time, test_name, trace_file
getcontext().prec = 11
sample_num =0
last_sec_cpu = [0] * MAX_CPUS
last_usec_cpu = [0] * MAX_CPUS
def plot_per_cpu_freq(cpu_index):
""" Plot per cpu frequency """
file_name = 'cpu{:0>3}.csv'.format(cpu_index)
if os.path.exists(file_name):
output_png = "cpu%03d_frequency.png" % cpu_index
g_plot = ipt.common_gnuplot_settings()
g_plot('set output "' + output_png + '"')
g_plot('set yrange [0:7]')
g_plot('set ytics 0, 1')
g_plot('set ylabel "CPU Frequency (GHz)"')
g_plot('set title "{} : frequency : CPU {:0>3} : {:%F %H:%M}"'.format(test_name, cpu_index, datetime.now()))
g_plot('set ylabel "CPU frequency"')
g_plot('set key off')
ipt.set_4_plot_linestyles(g_plot)
g_plot('plot "' + file_name + '" using {:d}:{:d} with linespoints linestyle 1 axis x1y1'.format(C_ELAPSED, C_FREQ))
def plot_per_cpu_des_perf(cpu_index):
""" Plot per cpu desired perf """
file_name = 'cpu{:0>3}.csv'.format(cpu_index)
if os.path.exists(file_name):
output_png = "cpu%03d_des_perf.png" % cpu_index
g_plot = ipt.common_gnuplot_settings()
g_plot('set output "' + output_png + '"')
g_plot('set yrange [0:255]')
g_plot('set ylabel "des perf"')
g_plot('set title "{} : cpu des perf : CPU {:0>3} : {:%F %H:%M}"'.format(test_name, cpu_index, datetime.now()))
g_plot('set key off')
ipt.set_4_plot_linestyles(g_plot)
g_plot('plot "' + file_name + '" using {:d}:{:d} with linespoints linestyle 1 axis x1y1'.format(C_ELAPSED, C_DES_PERF))
def plot_per_cpu_load(cpu_index):
""" Plot per cpu load """
file_name = 'cpu{:0>3}.csv'.format(cpu_index)
if os.path.exists(file_name):
output_png = "cpu%03d_load.png" % cpu_index
g_plot = ipt.common_gnuplot_settings()
g_plot('set output "' + output_png + '"')
g_plot('set yrange [0:100]')
g_plot('set ytics 0, 10')
g_plot('set ylabel "CPU load (percent)"')
g_plot('set title "{} : cpu load : CPU {:0>3} : {:%F %H:%M}"'.format(test_name, cpu_index, datetime.now()))
g_plot('set key off')
ipt.set_4_plot_linestyles(g_plot)
g_plot('plot "' + file_name + '" using {:d}:{:d} with linespoints linestyle 1 axis x1y1'.format(C_ELAPSED, C_LOAD))
def plot_all_cpu_frequency():
""" Plot all cpu frequencies """
output_png = 'all_cpu_frequencies.png'
g_plot = ipt.common_gnuplot_settings()
g_plot('set output "' + output_png + '"')
g_plot('set ylabel "CPU Frequency (GHz)"')
g_plot('set title "{} : cpu frequencies : {:%F %H:%M}"'.format(test_name, datetime.now()))
title_list = subprocess.check_output('ls cpu???.csv | sed -e \'s/.csv//\'',shell=True).decode('utf-8').replace('\n', ' ')
plot_str = "plot for [i in title_list] i.'.csv' using {:d}:{:d} pt 7 ps 1 title i".format(C_ELAPSED, C_FREQ)
g_plot('title_list = "{}"'.format(title_list))
g_plot(plot_str)
def plot_all_cpu_des_perf():
""" Plot all cpu desired perf """
output_png = 'all_cpu_des_perf.png'
g_plot = ipt.common_gnuplot_settings()
g_plot('set output "' + output_png + '"')
g_plot('set ylabel "des perf"')
g_plot('set title "{} : cpu des perf : {:%F %H:%M}"'.format(test_name, datetime.now()))
title_list = subprocess.check_output('ls cpu???.csv | sed -e \'s/.csv//\'',shell=True).decode('utf-8').replace('\n', ' ')
plot_str = "plot for [i in title_list] i.'.csv' using {:d}:{:d} pt 255 ps 1 title i".format(C_ELAPSED, C_DES_PERF)
g_plot('title_list = "{}"'.format(title_list))
g_plot(plot_str)
def plot_all_cpu_load():
""" Plot all cpu load """
output_png = 'all_cpu_load.png'
g_plot = ipt.common_gnuplot_settings()
g_plot('set output "' + output_png + '"')
g_plot('set yrange [0:100]')
g_plot('set ylabel "CPU load (percent)"')
g_plot('set title "{} : cpu load : {:%F %H:%M}"'.format(test_name, datetime.now()))
title_list = subprocess.check_output('ls cpu???.csv | sed -e \'s/.csv//\'',shell=True).decode('utf-8').replace('\n', ' ')
plot_str = "plot for [i in title_list] i.'.csv' using {:d}:{:d} pt 255 ps 1 title i".format(C_ELAPSED, C_LOAD)
g_plot('title_list = "{}"'.format(title_list))
g_plot(plot_str)
def store_csv(cpu_int, time_pre_dec, time_post_dec, min_perf, des_perf, max_perf, freq_ghz, mperf, aperf, tsc, common_comm, load, duration_ms, sample_num, elapsed_time, cpu_mask):
""" Store master csv file information """
global graph_data_present
if cpu_mask[cpu_int] == 0:
return
try:
f_handle = open('cpu.csv', 'a')
string_buffer = "CPU_%03u, %05u, %06u, %u, %u, %u, %.4f, %u, %u, %u, %.2f, %.3f, %u, %.3f, %s\n" % (cpu_int, int(time_pre_dec), int(time_post_dec), int(min_perf), int(des_perf), int(max_perf), freq_ghz, int(mperf), int(aperf), int(tsc), load, duration_ms, sample_num, elapsed_time, common_comm)
f_handle.write(string_buffer)
f_handle.close()
except:
print('IO error cpu.csv')
return
graph_data_present = True;
def cleanup_data_files():
""" clean up existing data files """
if os.path.exists('cpu.csv'):
os.remove('cpu.csv')
f_handle = open('cpu.csv', 'a')
f_handle.write('common_cpu, common_secs, common_usecs, min_perf, des_perf, max_perf, freq, mperf, aperf, tsc, load, duration_ms, sample_num, elapsed_time, common_comm')
f_handle.write('\n')
f_handle.close()
def read_trace_data(file_name, cpu_mask):
""" Read and parse trace data """
global current_max_cpu
global sample_num, last_sec_cpu, last_usec_cpu, start_time
try:
data = open(file_name, 'r').read()
except:
print('Error opening ', file_name)
sys.exit(2)
for line in data.splitlines():
search_obj = \
re.search(r'(^(.*?)\[)((\d+)[^\]])(.*?)(\d+)([.])(\d+)(.*?amd_min_perf=)(\d+)(.*?amd_des_perf=)(\d+)(.*?amd_max_perf=)(\d+)(.*?freq=)(\d+)(.*?mperf=)(\d+)(.*?aperf=)(\d+)(.*?tsc=)(\d+)'
, line)
if search_obj:
cpu = search_obj.group(3)
cpu_int = int(cpu)
cpu = str(cpu_int)
time_pre_dec = search_obj.group(6)
time_post_dec = search_obj.group(8)
min_perf = search_obj.group(10)
des_perf = search_obj.group(12)
max_perf = search_obj.group(14)
freq = search_obj.group(16)
mperf = search_obj.group(18)
aperf = search_obj.group(20)
tsc = search_obj.group(22)
common_comm = search_obj.group(2).replace(' ', '')
if sample_num == 0 :
start_time = Decimal(time_pre_dec) + Decimal(time_post_dec) / Decimal(1000000)
sample_num += 1
if last_sec_cpu[cpu_int] == 0 :
last_sec_cpu[cpu_int] = time_pre_dec
last_usec_cpu[cpu_int] = time_post_dec
else :
duration_us = (int(time_pre_dec) - int(last_sec_cpu[cpu_int])) * 1000000 + (int(time_post_dec) - int(last_usec_cpu[cpu_int]))
duration_ms = Decimal(duration_us) / Decimal(1000)
last_sec_cpu[cpu_int] = time_pre_dec
last_usec_cpu[cpu_int] = time_post_dec
elapsed_time = Decimal(time_pre_dec) + Decimal(time_post_dec) / Decimal(1000000) - start_time
load = Decimal(int(mperf)*100)/ Decimal(tsc)
freq_ghz = Decimal(freq)/Decimal(1000000)
store_csv(cpu_int, time_pre_dec, time_post_dec, min_perf, des_perf, max_perf, freq_ghz, mperf, aperf, tsc, common_comm, load, duration_ms, sample_num, elapsed_time, cpu_mask)
if cpu_int > current_max_cpu:
current_max_cpu = cpu_int
# Now separate the main overall csv file into per CPU csv files.
ipt.split_csv(current_max_cpu, cpu_mask)
def signal_handler(signal, frame):
print(' SIGINT: Forcing cleanup before exit.')
if interval:
ipt.disable_trace(trace_file)
ipt.clear_trace_file()
ipt.free_trace_buffer()
sys.exit(0)
trace_file = "/sys/kernel/debug/tracing/events/amd_cpu/enable"
signal.signal(signal.SIGINT, signal_handler)
interval = ""
file_name = ""
cpu_list = ""
test_name = ""
memory = "10240"
graph_data_present = False;
valid1 = False
valid2 = False
cpu_mask = zeros((MAX_CPUS,), dtype=int)
try:
opts, args = getopt.getopt(sys.argv[1:],"ht:i:c:n:m:",["help","trace_file=","interval=","cpu=","name=","memory="])
except getopt.GetoptError:
ipt.print_help('amd_pstate')
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print()
sys.exit()
elif opt in ("-t", "--trace_file"):
valid1 = True
location = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
file_name = os.path.join(location, arg)
elif opt in ("-i", "--interval"):
valid1 = True
interval = arg
elif opt in ("-c", "--cpu"):
cpu_list = arg
elif opt in ("-n", "--name"):
valid2 = True
test_name = arg
elif opt in ("-m", "--memory"):
memory = arg
if not (valid1 and valid2):
ipt.print_help('amd_pstate')
sys.exit()
if cpu_list:
for p in re.split("[,]", cpu_list):
if int(p) < MAX_CPUS :
cpu_mask[int(p)] = 1
else:
for i in range (0, MAX_CPUS):
cpu_mask[i] = 1
if not os.path.exists('results'):
os.mkdir('results')
ipt.fix_ownership('results')
os.chdir('results')
if os.path.exists(test_name):
print('The test name directory already exists. Please provide a unique test name. Test re-run not supported, yet.')
sys.exit()
os.mkdir(test_name)
ipt.fix_ownership(test_name)
os.chdir(test_name)
cur_version = sys.version_info
print('python version (should be >= 2.7):')
print(cur_version)
cleanup_data_files()
if interval:
file_name = "/sys/kernel/debug/tracing/trace"
ipt.clear_trace_file()
ipt.set_trace_buffer_size(memory)
ipt.enable_trace(trace_file)
time.sleep(int(interval))
ipt.disable_trace(trace_file)
current_max_cpu = 0
read_trace_data(file_name, cpu_mask)
if interval:
ipt.clear_trace_file()
ipt.free_trace_buffer()
if graph_data_present == False:
print('No valid data to plot')
sys.exit(2)
for cpu_no in range(0, current_max_cpu + 1):
plot_per_cpu_freq(cpu_no)
plot_per_cpu_des_perf(cpu_no)
plot_per_cpu_load(cpu_no)
plot_all_cpu_des_perf()
plot_all_cpu_frequency()
plot_all_cpu_load()
for root, dirs, files in os.walk('.'):
for f in files:
ipt.fix_ownership(f)
os.chdir('../../')
| grace-kernel-grace-kernel-6.1.y | tools/power/x86/amd_pstate_tracer/amd_pstate_trace.py |
#!/usr/bin/env python3
#
# Copyright (C) 2022 Roman Gushchin <[email protected]>
# Copyright (C) 2022 Meta
import os
import argparse
import sys
def scan_cgroups(cgroup_root):
cgroups = {}
for root, subdirs, _ in os.walk(cgroup_root):
for cgroup in subdirs:
path = os.path.join(root, cgroup)
ino = os.stat(path).st_ino
cgroups[ino] = path
# (memcg ino, path)
return cgroups
def scan_shrinkers(shrinker_debugfs):
shrinkers = []
for root, subdirs, _ in os.walk(shrinker_debugfs):
for shrinker in subdirs:
count_path = os.path.join(root, shrinker, "count")
with open(count_path) as f:
for line in f.readlines():
items = line.split(' ')
ino = int(items[0])
# (count, shrinker, memcg ino)
shrinkers.append((int(items[1]), shrinker, ino))
return shrinkers
def main():
parser = argparse.ArgumentParser(description='Display biggest shrinkers')
parser.add_argument('-n', '--lines', type=int, help='Number of lines to print')
args = parser.parse_args()
cgroups = scan_cgroups("/sys/fs/cgroup/")
shrinkers = scan_shrinkers("/sys/kernel/debug/shrinker/")
shrinkers = sorted(shrinkers, reverse = True, key = lambda x: x[0])
n = 0
for s in shrinkers:
count, name, ino = (s[0], s[1], s[2])
if count == 0:
break
if ino == 0 or ino == 1:
cg = "/"
else:
try:
cg = cgroups[ino]
except KeyError:
cg = "unknown (%d)" % ino
print("%-8s %-20s %s" % (count, name, cg))
n += 1
if args.lines and n >= args.lines:
break
if __name__ == '__main__':
main()
| grace-kernel-grace-kernel-6.1.y | tools/cgroup/memcg_shrinker.py |
#!/usr/bin/env drgn
#
# Copyright (C) 2020 Roman Gushchin <[email protected]>
# Copyright (C) 2020 Facebook
from os import stat
import argparse
import sys
from drgn.helpers.linux import list_for_each_entry, list_empty
from drgn.helpers.linux import for_each_page
from drgn.helpers.linux.cpumask import for_each_online_cpu
from drgn.helpers.linux.percpu import per_cpu_ptr
from drgn import container_of, FaultError, Object, cast
DESC = """
This is a drgn script to provide slab statistics for memory cgroups.
It supports cgroup v2 and v1 and can emulate memory.kmem.slabinfo
interface of cgroup v1.
For drgn, visit https://github.com/osandov/drgn.
"""
MEMCGS = {}
OO_SHIFT = 16
OO_MASK = ((1 << OO_SHIFT) - 1)
def err(s):
print('slabinfo.py: error: %s' % s, file=sys.stderr, flush=True)
sys.exit(1)
def find_memcg_ids(css=prog['root_mem_cgroup'].css, prefix=''):
if not list_empty(css.children.address_of_()):
for css in list_for_each_entry('struct cgroup_subsys_state',
css.children.address_of_(),
'sibling'):
name = prefix + '/' + css.cgroup.kn.name.string_().decode('utf-8')
memcg = container_of(css, 'struct mem_cgroup', 'css')
MEMCGS[css.cgroup.kn.id.value_()] = memcg
find_memcg_ids(css, name)
def is_root_cache(s):
try:
return False if s.memcg_params.root_cache else True
except AttributeError:
return True
def cache_name(s):
if is_root_cache(s):
return s.name.string_().decode('utf-8')
else:
return s.memcg_params.root_cache.name.string_().decode('utf-8')
# SLUB
def oo_order(s):
return s.oo.x >> OO_SHIFT
def oo_objects(s):
return s.oo.x & OO_MASK
def count_partial(n, fn):
nr_objs = 0
for slab in list_for_each_entry('struct slab', n.partial.address_of_(),
'slab_list'):
nr_objs += fn(slab)
return nr_objs
def count_free(slab):
return slab.objects - slab.inuse
def slub_get_slabinfo(s, cfg):
nr_slabs = 0
nr_objs = 0
nr_free = 0
for node in range(cfg['nr_nodes']):
n = s.node[node]
nr_slabs += n.nr_slabs.counter.value_()
nr_objs += n.total_objects.counter.value_()
nr_free += count_partial(n, count_free)
return {'active_objs': nr_objs - nr_free,
'num_objs': nr_objs,
'active_slabs': nr_slabs,
'num_slabs': nr_slabs,
'objects_per_slab': oo_objects(s),
'cache_order': oo_order(s),
'limit': 0,
'batchcount': 0,
'shared': 0,
'shared_avail': 0}
def cache_show(s, cfg, objs):
if cfg['allocator'] == 'SLUB':
sinfo = slub_get_slabinfo(s, cfg)
else:
err('SLAB isn\'t supported yet')
if cfg['shared_slab_pages']:
sinfo['active_objs'] = objs
sinfo['num_objs'] = objs
print('%-17s %6lu %6lu %6u %4u %4d'
' : tunables %4u %4u %4u'
' : slabdata %6lu %6lu %6lu' % (
cache_name(s), sinfo['active_objs'], sinfo['num_objs'],
s.size, sinfo['objects_per_slab'], 1 << sinfo['cache_order'],
sinfo['limit'], sinfo['batchcount'], sinfo['shared'],
sinfo['active_slabs'], sinfo['num_slabs'],
sinfo['shared_avail']))
def detect_kernel_config():
cfg = {}
cfg['nr_nodes'] = prog['nr_online_nodes'].value_()
if prog.type('struct kmem_cache').members[1].name == 'flags':
cfg['allocator'] = 'SLUB'
elif prog.type('struct kmem_cache').members[1].name == 'batchcount':
cfg['allocator'] = 'SLAB'
else:
err('Can\'t determine the slab allocator')
cfg['shared_slab_pages'] = False
try:
if prog.type('struct obj_cgroup'):
cfg['shared_slab_pages'] = True
except:
pass
return cfg
def for_each_slab(prog):
PGSlab = 1 << prog.constant('PG_slab')
PGHead = 1 << prog.constant('PG_head')
for page in for_each_page(prog):
try:
if page.flags.value_() & PGSlab:
yield cast('struct slab *', page)
except FaultError:
pass
def main():
parser = argparse.ArgumentParser(description=DESC,
formatter_class=
argparse.RawTextHelpFormatter)
parser.add_argument('cgroup', metavar='CGROUP',
help='Target memory cgroup')
args = parser.parse_args()
try:
cgroup_id = stat(args.cgroup).st_ino
find_memcg_ids()
memcg = MEMCGS[cgroup_id]
except KeyError:
err('Can\'t find the memory cgroup')
cfg = detect_kernel_config()
print('# name <active_objs> <num_objs> <objsize> <objperslab> <pagesperslab>'
' : tunables <limit> <batchcount> <sharedfactor>'
' : slabdata <active_slabs> <num_slabs> <sharedavail>')
if cfg['shared_slab_pages']:
obj_cgroups = set()
stats = {}
caches = {}
# find memcg pointers belonging to the specified cgroup
obj_cgroups.add(memcg.objcg.value_())
for ptr in list_for_each_entry('struct obj_cgroup',
memcg.objcg_list.address_of_(),
'list'):
obj_cgroups.add(ptr.value_())
# look over all slab folios and look for objects belonging
# to the given memory cgroup
for slab in for_each_slab(prog):
objcg_vec_raw = slab.memcg_data.value_()
if objcg_vec_raw == 0:
continue
cache = slab.slab_cache
if not cache:
continue
addr = cache.value_()
caches[addr] = cache
# clear the lowest bit to get the true obj_cgroups
objcg_vec = Object(prog, 'struct obj_cgroup **',
value=objcg_vec_raw & ~1)
if addr not in stats:
stats[addr] = 0
for i in range(oo_objects(cache)):
if objcg_vec[i].value_() in obj_cgroups:
stats[addr] += 1
for addr in caches:
if stats[addr] > 0:
cache_show(caches[addr], cfg, stats[addr])
else:
for s in list_for_each_entry('struct kmem_cache',
memcg.kmem_caches.address_of_(),
'memcg_params.kmem_caches_node'):
cache_show(s, cfg, None)
main()
| grace-kernel-grace-kernel-6.1.y | tools/cgroup/memcg_slabinfo.py |
#!/usr/bin/env python3
#
# Copyright (C) 2019 Tejun Heo <[email protected]>
# Copyright (C) 2019 Andy Newell <[email protected]>
# Copyright (C) 2019 Facebook
desc = """
Generate linear IO cost model coefficients used by the blk-iocost
controller. If the target raw testdev is specified, destructive tests
are performed against the whole device; otherwise, on
./iocost-coef-fio.testfile. The result can be written directly to
/sys/fs/cgroup/io.cost.model.
On high performance devices, --numjobs > 1 is needed to achieve
saturation.
See Documentation/admin-guide/cgroup-v2.rst and block/blk-iocost.c
for more details.
"""
import argparse
import re
import json
import glob
import os
import sys
import atexit
import shutil
import tempfile
import subprocess
parser = argparse.ArgumentParser(description=desc,
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('--testdev', metavar='DEV',
help='Raw block device to use for testing, ignores --testfile-size')
parser.add_argument('--testfile-size-gb', type=float, metavar='GIGABYTES', default=16,
help='Testfile size in gigabytes (default: %(default)s)')
parser.add_argument('--duration', type=int, metavar='SECONDS', default=120,
help='Individual test run duration in seconds (default: %(default)s)')
parser.add_argument('--seqio-block-mb', metavar='MEGABYTES', type=int, default=128,
help='Sequential test block size in megabytes (default: %(default)s)')
parser.add_argument('--seq-depth', type=int, metavar='DEPTH', default=64,
help='Sequential test queue depth (default: %(default)s)')
parser.add_argument('--rand-depth', type=int, metavar='DEPTH', default=64,
help='Random test queue depth (default: %(default)s)')
parser.add_argument('--numjobs', type=int, metavar='JOBS', default=1,
help='Number of parallel fio jobs to run (default: %(default)s)')
parser.add_argument('--quiet', action='store_true')
parser.add_argument('--verbose', action='store_true')
def info(msg):
if not args.quiet:
print(msg)
def dbg(msg):
if args.verbose and not args.quiet:
print(msg)
# determine ('DEVNAME', 'MAJ:MIN') for @path
def dir_to_dev(path):
# find the block device the current directory is on
devname = subprocess.run(f'findmnt -nvo SOURCE -T{path}',
stdout=subprocess.PIPE, shell=True).stdout
devname = os.path.basename(devname).decode('utf-8').strip()
# partition -> whole device
parents = glob.glob('/sys/block/*/' + devname)
if len(parents):
devname = os.path.basename(os.path.dirname(parents[0]))
rdev = os.stat(f'/dev/{devname}').st_rdev
return (devname, f'{os.major(rdev)}:{os.minor(rdev)}')
def create_testfile(path, size):
global args
if os.path.isfile(path) and os.stat(path).st_size == size:
return
info(f'Creating testfile {path}')
subprocess.check_call(f'rm -f {path}', shell=True)
subprocess.check_call(f'touch {path}', shell=True)
subprocess.call(f'chattr +C {path}', shell=True)
subprocess.check_call(
f'pv -s {size} -pr /dev/urandom {"-q" if args.quiet else ""} | '
f'dd of={path} count={size} '
f'iflag=count_bytes,fullblock oflag=direct bs=16M status=none',
shell=True)
def run_fio(testfile, duration, iotype, iodepth, blocksize, jobs):
global args
eta = 'never' if args.quiet else 'always'
outfile = tempfile.NamedTemporaryFile()
cmd = (f'fio --direct=1 --ioengine=libaio --name=coef '
f'--filename={testfile} --runtime={round(duration)} '
f'--readwrite={iotype} --iodepth={iodepth} --blocksize={blocksize} '
f'--eta={eta} --output-format json --output={outfile.name} '
f'--time_based --numjobs={jobs}')
if args.verbose:
dbg(f'Running {cmd}')
subprocess.check_call(cmd, shell=True)
with open(outfile.name, 'r') as f:
d = json.loads(f.read())
return sum(j['read']['bw_bytes'] + j['write']['bw_bytes'] for j in d['jobs'])
def restore_elevator_nomerges():
global elevator_path, nomerges_path, elevator, nomerges
info(f'Restoring elevator to {elevator} and nomerges to {nomerges}')
with open(elevator_path, 'w') as f:
f.write(elevator)
with open(nomerges_path, 'w') as f:
f.write(nomerges)
args = parser.parse_args()
missing = False
for cmd in [ 'findmnt', 'pv', 'dd', 'fio' ]:
if not shutil.which(cmd):
print(f'Required command "{cmd}" is missing', file=sys.stderr)
missing = True
if missing:
sys.exit(1)
if args.testdev:
devname = os.path.basename(args.testdev)
rdev = os.stat(f'/dev/{devname}').st_rdev
devno = f'{os.major(rdev)}:{os.minor(rdev)}'
testfile = f'/dev/{devname}'
info(f'Test target: {devname}({devno})')
else:
devname, devno = dir_to_dev('.')
testfile = 'iocost-coef-fio.testfile'
testfile_size = int(args.testfile_size_gb * 2 ** 30)
create_testfile(testfile, testfile_size)
info(f'Test target: {testfile} on {devname}({devno})')
elevator_path = f'/sys/block/{devname}/queue/scheduler'
nomerges_path = f'/sys/block/{devname}/queue/nomerges'
with open(elevator_path, 'r') as f:
elevator = re.sub(r'.*\[(.*)\].*', r'\1', f.read().strip())
with open(nomerges_path, 'r') as f:
nomerges = f.read().strip()
info(f'Temporarily disabling elevator and merges')
atexit.register(restore_elevator_nomerges)
with open(elevator_path, 'w') as f:
f.write('none')
with open(nomerges_path, 'w') as f:
f.write('1')
info('Determining rbps...')
rbps = run_fio(testfile, args.duration, 'read',
1, args.seqio_block_mb * (2 ** 20), args.numjobs)
info(f'\nrbps={rbps}, determining rseqiops...')
rseqiops = round(run_fio(testfile, args.duration, 'read',
args.seq_depth, 4096, args.numjobs) / 4096)
info(f'\nrseqiops={rseqiops}, determining rrandiops...')
rrandiops = round(run_fio(testfile, args.duration, 'randread',
args.rand_depth, 4096, args.numjobs) / 4096)
info(f'\nrrandiops={rrandiops}, determining wbps...')
wbps = run_fio(testfile, args.duration, 'write',
1, args.seqio_block_mb * (2 ** 20), args.numjobs)
info(f'\nwbps={wbps}, determining wseqiops...')
wseqiops = round(run_fio(testfile, args.duration, 'write',
args.seq_depth, 4096, args.numjobs) / 4096)
info(f'\nwseqiops={wseqiops}, determining wrandiops...')
wrandiops = round(run_fio(testfile, args.duration, 'randwrite',
args.rand_depth, 4096, args.numjobs) / 4096)
info(f'\nwrandiops={wrandiops}')
restore_elevator_nomerges()
atexit.unregister(restore_elevator_nomerges)
info('')
print(f'{devno} rbps={rbps} rseqiops={rseqiops} rrandiops={rrandiops} '
f'wbps={wbps} wseqiops={wseqiops} wrandiops={wrandiops}')
| grace-kernel-grace-kernel-6.1.y | tools/cgroup/iocost_coef_gen.py |
#!/usr/bin/env drgn
#
# Copyright (C) 2019 Tejun Heo <[email protected]>
# Copyright (C) 2019 Facebook
desc = """
This is a drgn script to monitor the blk-iocost cgroup controller.
See the comment at the top of block/blk-iocost.c for more details.
For drgn, visit https://github.com/osandov/drgn.
"""
import sys
import re
import time
import json
import math
import drgn
from drgn import container_of
from drgn.helpers.linux.list import list_for_each_entry,list_empty
from drgn.helpers.linux.radixtree import radix_tree_for_each,radix_tree_lookup
import argparse
parser = argparse.ArgumentParser(description=desc,
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('devname', metavar='DEV',
help='Target block device name (e.g. sda)')
parser.add_argument('--cgroup', action='append', metavar='REGEX',
help='Regex for target cgroups, ')
parser.add_argument('--interval', '-i', metavar='SECONDS', type=float, default=1,
help='Monitoring interval in seconds (0 exits immediately '
'after checking requirements)')
parser.add_argument('--json', action='store_true',
help='Output in json')
args = parser.parse_args()
def err(s):
print(s, file=sys.stderr, flush=True)
sys.exit(1)
try:
blkcg_root = prog['blkcg_root']
plid = prog['blkcg_policy_iocost'].plid.value_()
except:
err('The kernel does not have iocost enabled')
IOC_RUNNING = prog['IOC_RUNNING'].value_()
WEIGHT_ONE = prog['WEIGHT_ONE'].value_()
VTIME_PER_SEC = prog['VTIME_PER_SEC'].value_()
VTIME_PER_USEC = prog['VTIME_PER_USEC'].value_()
AUTOP_SSD_FAST = prog['AUTOP_SSD_FAST'].value_()
AUTOP_SSD_DFL = prog['AUTOP_SSD_DFL'].value_()
AUTOP_SSD_QD1 = prog['AUTOP_SSD_QD1'].value_()
AUTOP_HDD = prog['AUTOP_HDD'].value_()
autop_names = {
AUTOP_SSD_FAST: 'ssd_fast',
AUTOP_SSD_DFL: 'ssd_dfl',
AUTOP_SSD_QD1: 'ssd_qd1',
AUTOP_HDD: 'hdd',
}
class BlkgIterator:
def __init__(self, root_blkcg, q_id, include_dying=False):
self.include_dying = include_dying
self.blkgs = []
self.walk(root_blkcg, q_id, '')
def blkcg_name(blkcg):
return blkcg.css.cgroup.kn.name.string_().decode('utf-8')
def walk(self, blkcg, q_id, parent_path):
if not self.include_dying and \
not (blkcg.css.flags.value_() & prog['CSS_ONLINE'].value_()):
return
name = BlkgIterator.blkcg_name(blkcg)
path = parent_path + '/' + name if parent_path else name
blkg = drgn.Object(prog, 'struct blkcg_gq',
address=radix_tree_lookup(blkcg.blkg_tree.address_of_(), q_id))
if not blkg.address_:
return
self.blkgs.append((path if path else '/', blkg))
for c in list_for_each_entry('struct blkcg',
blkcg.css.children.address_of_(), 'css.sibling'):
self.walk(c, q_id, path)
def __iter__(self):
return iter(self.blkgs)
class IocStat:
def __init__(self, ioc):
global autop_names
self.enabled = ioc.enabled.value_()
self.running = ioc.running.value_() == IOC_RUNNING
self.period_ms = ioc.period_us.value_() / 1_000
self.period_at = ioc.period_at.value_() / 1_000_000
self.vperiod_at = ioc.period_at_vtime.value_() / VTIME_PER_SEC
self.vrate_pct = ioc.vtime_base_rate.value_() * 100 / VTIME_PER_USEC
self.busy_level = ioc.busy_level.value_()
self.autop_idx = ioc.autop_idx.value_()
self.user_cost_model = ioc.user_cost_model.value_()
self.user_qos_params = ioc.user_qos_params.value_()
if self.autop_idx in autop_names:
self.autop_name = autop_names[self.autop_idx]
else:
self.autop_name = '?'
def dict(self, now):
return { 'device' : devname,
'timestamp' : now,
'enabled' : self.enabled,
'running' : self.running,
'period_ms' : self.period_ms,
'period_at' : self.period_at,
'period_vtime_at' : self.vperiod_at,
'busy_level' : self.busy_level,
'vrate_pct' : self.vrate_pct, }
def table_preamble_str(self):
state = ('RUN' if self.running else 'IDLE') if self.enabled else 'OFF'
output = f'{devname} {state:4} ' \
f'per={self.period_ms}ms ' \
f'cur_per={self.period_at:.3f}:v{self.vperiod_at:.3f} ' \
f'busy={self.busy_level:+3} ' \
f'vrate={self.vrate_pct:6.2f}% ' \
f'params={self.autop_name}'
if self.user_cost_model or self.user_qos_params:
output += f'({"C" if self.user_cost_model else ""}{"Q" if self.user_qos_params else ""})'
return output
def table_header_str(self):
return f'{"":25} active {"weight":>9} {"hweight%":>13} {"inflt%":>6} ' \
f'{"debt":>7} {"delay":>7} {"usage%"}'
class IocgStat:
def __init__(self, iocg):
ioc = iocg.ioc
blkg = iocg.pd.blkg
self.is_active = not list_empty(iocg.active_list.address_of_())
self.weight = iocg.weight.value_() / WEIGHT_ONE
self.active = iocg.active.value_() / WEIGHT_ONE
self.inuse = iocg.inuse.value_() / WEIGHT_ONE
self.hwa_pct = iocg.hweight_active.value_() * 100 / WEIGHT_ONE
self.hwi_pct = iocg.hweight_inuse.value_() * 100 / WEIGHT_ONE
self.address = iocg.value_()
vdone = iocg.done_vtime.counter.value_()
vtime = iocg.vtime.counter.value_()
vrate = ioc.vtime_rate.counter.value_()
period_vtime = ioc.period_us.value_() * vrate
if period_vtime:
self.inflight_pct = (vtime - vdone) * 100 / period_vtime
else:
self.inflight_pct = 0
self.usage = (100 * iocg.usage_delta_us.value_() /
ioc.period_us.value_()) if self.active else 0
self.debt_ms = iocg.abs_vdebt.value_() / VTIME_PER_USEC / 1000
if blkg.use_delay.counter.value_() != 0:
self.delay_ms = blkg.delay_nsec.counter.value_() / 1_000_000
else:
self.delay_ms = 0
def dict(self, now, path):
out = { 'cgroup' : path,
'timestamp' : now,
'is_active' : self.is_active,
'weight' : self.weight,
'weight_active' : self.active,
'weight_inuse' : self.inuse,
'hweight_active_pct' : self.hwa_pct,
'hweight_inuse_pct' : self.hwi_pct,
'inflight_pct' : self.inflight_pct,
'debt_ms' : self.debt_ms,
'delay_ms' : self.delay_ms,
'usage_pct' : self.usage,
'address' : self.address }
return out
def table_row_str(self, path):
out = f'{path[-28:]:28} ' \
f'{"*" if self.is_active else " "} ' \
f'{round(self.inuse):5}/{round(self.active):5} ' \
f'{self.hwi_pct:6.2f}/{self.hwa_pct:6.2f} ' \
f'{self.inflight_pct:6.2f} ' \
f'{self.debt_ms:7.2f} ' \
f'{self.delay_ms:7.2f} '\
f'{min(self.usage, 999):6.2f}'
out = out.rstrip(':')
return out
# handle args
table_fmt = not args.json
interval = args.interval
devname = args.devname
if args.json:
table_fmt = False
re_str = None
if args.cgroup:
for r in args.cgroup:
if re_str is None:
re_str = r
else:
re_str += '|' + r
filter_re = re.compile(re_str) if re_str else None
# Locate the roots
q_id = None
root_iocg = None
ioc = None
for i, ptr in radix_tree_for_each(blkcg_root.blkg_tree.address_of_()):
blkg = drgn.Object(prog, 'struct blkcg_gq', address=ptr)
try:
if devname == blkg.q.kobj.parent.name.string_().decode('utf-8'):
q_id = blkg.q.id.value_()
if blkg.pd[plid]:
root_iocg = container_of(blkg.pd[plid], 'struct ioc_gq', 'pd')
ioc = root_iocg.ioc
break
except:
pass
if ioc is None:
err(f'Could not find ioc for {devname}');
if interval == 0:
sys.exit(0)
# Keep printing
while True:
now = time.time()
iocstat = IocStat(ioc)
output = ''
if table_fmt:
output += '\n' + iocstat.table_preamble_str()
output += '\n' + iocstat.table_header_str()
else:
output += json.dumps(iocstat.dict(now))
for path, blkg in BlkgIterator(blkcg_root, q_id):
if filter_re and not filter_re.match(path):
continue
if not blkg.pd[plid]:
continue
iocg = container_of(blkg.pd[plid], 'struct ioc_gq', 'pd')
iocg_stat = IocgStat(iocg)
if not filter_re and not iocg_stat.is_active:
continue
if table_fmt:
output += '\n' + iocg_stat.table_row_str(path)
else:
output += '\n' + json.dumps(iocg_stat.dict(now, path))
print(output)
sys.stdout.flush()
time.sleep(interval)
| grace-kernel-grace-kernel-6.1.y | tools/cgroup/iocost_monitor.py |
#!/usr/bin/env python3
# SPDX-License-Identifier: GPL-2.0
from subprocess import PIPE, Popen
import json
import time
import argparse
import collections
import sys
#
# Test port split configuration using devlink-port lanes attribute.
# The test is skipped in case the attribute is not available.
#
# First, check that all the ports with 1 lane fail to split.
# Second, check that all the ports with more than 1 lane can be split
# to all valid configurations (e.g., split to 2, split to 4 etc.)
#
# Kselftest framework requirement - SKIP code is 4
KSFT_SKIP=4
Port = collections.namedtuple('Port', 'bus_info name')
def run_command(cmd, should_fail=False):
"""
Run a command in subprocess.
Return: Tuple of (stdout, stderr).
"""
p = Popen(cmd, stdout=PIPE, stderr=PIPE, shell=True)
stdout, stderr = p.communicate()
stdout, stderr = stdout.decode(), stderr.decode()
if stderr != "" and not should_fail:
print("Error sending command: %s" % cmd)
print(stdout)
print(stderr)
return stdout, stderr
class devlink_ports(object):
"""
Class that holds information on the devlink ports, required to the tests;
if_names: A list of interfaces in the devlink ports.
"""
def get_if_names(dev):
"""
Get a list of physical devlink ports.
Return: Array of tuples (bus_info/port, if_name).
"""
arr = []
cmd = "devlink -j port show"
stdout, stderr = run_command(cmd)
assert stderr == ""
ports = json.loads(stdout)['port']
validate_devlink_output(ports, 'flavour')
for port in ports:
if dev in port:
if ports[port]['flavour'] == 'physical':
arr.append(Port(bus_info=port, name=ports[port]['netdev']))
return arr
def __init__(self, dev):
self.if_names = devlink_ports.get_if_names(dev)
def get_max_lanes(port):
"""
Get the $port's maximum number of lanes.
Return: number of lanes, e.g. 1, 2, 4 and 8.
"""
cmd = "devlink -j port show %s" % port
stdout, stderr = run_command(cmd)
assert stderr == ""
values = list(json.loads(stdout)['port'].values())[0]
if 'lanes' in values:
lanes = values['lanes']
else:
lanes = 0
return lanes
def get_split_ability(port):
"""
Get the $port split ability.
Return: split ability, true or false.
"""
cmd = "devlink -j port show %s" % port.name
stdout, stderr = run_command(cmd)
assert stderr == ""
values = list(json.loads(stdout)['port'].values())[0]
return values['splittable']
def split(k, port, should_fail=False):
"""
Split $port into $k ports.
If should_fail == True, the split should fail. Otherwise, should pass.
Return: Array of sub ports after splitting.
If the $port wasn't split, the array will be empty.
"""
cmd = "devlink port split %s count %s" % (port.bus_info, k)
stdout, stderr = run_command(cmd, should_fail=should_fail)
if should_fail:
if not test(stderr != "", "%s is unsplittable" % port.name):
print("split an unsplittable port %s" % port.name)
return create_split_group(port, k)
else:
if stderr == "":
return create_split_group(port, k)
print("didn't split a splittable port %s" % port.name)
return []
def unsplit(port):
"""
Unsplit $port.
"""
cmd = "devlink port unsplit %s" % port
stdout, stderr = run_command(cmd)
test(stderr == "", "Unsplit port %s" % port)
def exists(port, dev):
"""
Check if $port exists in the devlink ports.
Return: True is so, False otherwise.
"""
return any(dev_port.name == port
for dev_port in devlink_ports.get_if_names(dev))
def exists_and_lanes(ports, lanes, dev):
"""
Check if every port in the list $ports exists in the devlink ports and has
$lanes number of lanes after splitting.
Return: True if both are True, False otherwise.
"""
for port in ports:
max_lanes = get_max_lanes(port)
if not exists(port, dev):
print("port %s doesn't exist in devlink ports" % port)
return False
if max_lanes != lanes:
print("port %s has %d lanes, but %s were expected"
% (port, lanes, max_lanes))
return False
return True
def test(cond, msg):
"""
Check $cond and print a message accordingly.
Return: True is pass, False otherwise.
"""
if cond:
print("TEST: %-60s [ OK ]" % msg)
else:
print("TEST: %-60s [FAIL]" % msg)
return cond
def create_split_group(port, k):
"""
Create the split group for $port.
Return: Array with $k elements, which are the split port group.
"""
return list(port.name + "s" + str(i) for i in range(k))
def split_unsplittable_port(port, k):
"""
Test that splitting of unsplittable port fails.
"""
# split to max
new_split_group = split(k, port, should_fail=True)
if new_split_group != []:
unsplit(port.bus_info)
def split_splittable_port(port, k, lanes, dev):
"""
Test that splitting of splittable port passes correctly.
"""
new_split_group = split(k, port)
# Once the split command ends, it takes some time to the sub ifaces'
# to get their names. Use udevadm to continue only when all current udev
# events are handled.
cmd = "udevadm settle"
stdout, stderr = run_command(cmd)
assert stderr == ""
if new_split_group != []:
test(exists_and_lanes(new_split_group, lanes/k, dev),
"split port %s into %s" % (port.name, k))
unsplit(port.bus_info)
def validate_devlink_output(devlink_data, target_property=None):
"""
Determine if test should be skipped by checking:
1. devlink_data contains values
2. The target_property exist in devlink_data
"""
skip_reason = None
if any(devlink_data.values()):
if target_property:
skip_reason = "{} not found in devlink output, test skipped".format(target_property)
for key in devlink_data:
if target_property in devlink_data[key]:
skip_reason = None
else:
skip_reason = 'devlink output is empty, test skipped'
if skip_reason:
print(skip_reason)
sys.exit(KSFT_SKIP)
def make_parser():
parser = argparse.ArgumentParser(description='A test for port splitting.')
parser.add_argument('--dev',
help='The devlink handle of the device under test. ' +
'The default is the first registered devlink ' +
'handle.')
return parser
def main(cmdline=None):
parser = make_parser()
args = parser.parse_args(cmdline)
dev = args.dev
if not dev:
cmd = "devlink -j dev show"
stdout, stderr = run_command(cmd)
assert stderr == ""
validate_devlink_output(json.loads(stdout))
devs = json.loads(stdout)['dev']
dev = list(devs.keys())[0]
cmd = "devlink dev show %s" % dev
stdout, stderr = run_command(cmd)
if stderr != "":
print("devlink device %s can not be found" % dev)
sys.exit(1)
ports = devlink_ports(dev)
found_max_lanes = False
for port in ports.if_names:
max_lanes = get_max_lanes(port.name)
# If max lanes is 0, do not test port splitting at all
if max_lanes == 0:
continue
# If 1 lane, shouldn't be able to split
elif max_lanes == 1:
test(not get_split_ability(port),
"%s should not be able to split" % port.name)
split_unsplittable_port(port, max_lanes)
# Else, splitting should pass and all the split ports should exist.
else:
lane = max_lanes
test(get_split_ability(port),
"%s should be able to split" % port.name)
while lane > 1:
split_splittable_port(port, lane, max_lanes, dev)
lane //= 2
found_max_lanes = True
if not found_max_lanes:
print(f"Test not started, no port of device {dev} reports max_lanes")
sys.exit(KSFT_SKIP)
if __name__ == "__main__":
main()
| grace-kernel-grace-kernel-6.1.y | tools/testing/selftests/net/devlink_port_split.py |
#!/usr/bin/env python3
# SPDX-License-Identifier: GPL-2.0
# Controls the openvswitch module. Part of the kselftest suite, but
# can be used for some diagnostic purpose as well.
import argparse
import errno
import sys
try:
from pyroute2 import NDB
from pyroute2.netlink import NLM_F_ACK
from pyroute2.netlink import NLM_F_REQUEST
from pyroute2.netlink import genlmsg
from pyroute2.netlink import nla
from pyroute2.netlink.exceptions import NetlinkError
from pyroute2.netlink.generic import GenericNetlinkSocket
except ModuleNotFoundError:
print("Need to install the python pyroute2 package.")
sys.exit(0)
OVS_DATAPATH_FAMILY = "ovs_datapath"
OVS_VPORT_FAMILY = "ovs_vport"
OVS_FLOW_FAMILY = "ovs_flow"
OVS_PACKET_FAMILY = "ovs_packet"
OVS_METER_FAMILY = "ovs_meter"
OVS_CT_LIMIT_FAMILY = "ovs_ct_limit"
OVS_DATAPATH_VERSION = 2
OVS_DP_CMD_NEW = 1
OVS_DP_CMD_DEL = 2
OVS_DP_CMD_GET = 3
OVS_DP_CMD_SET = 4
OVS_VPORT_CMD_NEW = 1
OVS_VPORT_CMD_DEL = 2
OVS_VPORT_CMD_GET = 3
OVS_VPORT_CMD_SET = 4
class ovs_dp_msg(genlmsg):
# include the OVS version
# We need a custom header rather than just being able to rely on
# genlmsg because fields ends up not expressing everything correctly
# if we use the canonical example of setting fields = (('customfield',),)
fields = genlmsg.fields + (("dpifindex", "I"),)
class OvsDatapath(GenericNetlinkSocket):
OVS_DP_F_VPORT_PIDS = 1 << 1
OVS_DP_F_DISPATCH_UPCALL_PER_CPU = 1 << 3
class dp_cmd_msg(ovs_dp_msg):
"""
Message class that will be used to communicate with the kernel module.
"""
nla_map = (
("OVS_DP_ATTR_UNSPEC", "none"),
("OVS_DP_ATTR_NAME", "asciiz"),
("OVS_DP_ATTR_UPCALL_PID", "array(uint32)"),
("OVS_DP_ATTR_STATS", "dpstats"),
("OVS_DP_ATTR_MEGAFLOW_STATS", "megaflowstats"),
("OVS_DP_ATTR_USER_FEATURES", "uint32"),
("OVS_DP_ATTR_PAD", "none"),
("OVS_DP_ATTR_MASKS_CACHE_SIZE", "uint32"),
("OVS_DP_ATTR_PER_CPU_PIDS", "array(uint32)"),
)
class dpstats(nla):
fields = (
("hit", "=Q"),
("missed", "=Q"),
("lost", "=Q"),
("flows", "=Q"),
)
class megaflowstats(nla):
fields = (
("mask_hit", "=Q"),
("masks", "=I"),
("padding", "=I"),
("cache_hits", "=Q"),
("pad1", "=Q"),
)
def __init__(self):
GenericNetlinkSocket.__init__(self)
self.bind(OVS_DATAPATH_FAMILY, OvsDatapath.dp_cmd_msg)
def info(self, dpname, ifindex=0):
msg = OvsDatapath.dp_cmd_msg()
msg["cmd"] = OVS_DP_CMD_GET
msg["version"] = OVS_DATAPATH_VERSION
msg["reserved"] = 0
msg["dpifindex"] = ifindex
msg["attrs"].append(["OVS_DP_ATTR_NAME", dpname])
try:
reply = self.nlm_request(
msg, msg_type=self.prid, msg_flags=NLM_F_REQUEST
)
reply = reply[0]
except NetlinkError as ne:
if ne.code == errno.ENODEV:
reply = None
else:
raise ne
return reply
def create(self, dpname, shouldUpcall=False, versionStr=None):
msg = OvsDatapath.dp_cmd_msg()
msg["cmd"] = OVS_DP_CMD_NEW
if versionStr is None:
msg["version"] = OVS_DATAPATH_VERSION
else:
msg["version"] = int(versionStr.split(":")[0], 0)
msg["reserved"] = 0
msg["dpifindex"] = 0
msg["attrs"].append(["OVS_DP_ATTR_NAME", dpname])
dpfeatures = 0
if versionStr is not None and versionStr.find(":") != -1:
dpfeatures = int(versionStr.split(":")[1], 0)
else:
dpfeatures = OvsDatapath.OVS_DP_F_VPORT_PIDS
msg["attrs"].append(["OVS_DP_ATTR_USER_FEATURES", dpfeatures])
if not shouldUpcall:
msg["attrs"].append(["OVS_DP_ATTR_UPCALL_PID", 0])
try:
reply = self.nlm_request(
msg, msg_type=self.prid, msg_flags=NLM_F_REQUEST | NLM_F_ACK
)
reply = reply[0]
except NetlinkError as ne:
if ne.code == errno.EEXIST:
reply = None
else:
raise ne
return reply
def destroy(self, dpname):
msg = OvsDatapath.dp_cmd_msg()
msg["cmd"] = OVS_DP_CMD_DEL
msg["version"] = OVS_DATAPATH_VERSION
msg["reserved"] = 0
msg["dpifindex"] = 0
msg["attrs"].append(["OVS_DP_ATTR_NAME", dpname])
try:
reply = self.nlm_request(
msg, msg_type=self.prid, msg_flags=NLM_F_REQUEST | NLM_F_ACK
)
reply = reply[0]
except NetlinkError as ne:
if ne.code == errno.ENODEV:
reply = None
else:
raise ne
return reply
class OvsVport(GenericNetlinkSocket):
class ovs_vport_msg(ovs_dp_msg):
nla_map = (
("OVS_VPORT_ATTR_UNSPEC", "none"),
("OVS_VPORT_ATTR_PORT_NO", "uint32"),
("OVS_VPORT_ATTR_TYPE", "uint32"),
("OVS_VPORT_ATTR_NAME", "asciiz"),
("OVS_VPORT_ATTR_OPTIONS", "none"),
("OVS_VPORT_ATTR_UPCALL_PID", "array(uint32)"),
("OVS_VPORT_ATTR_STATS", "vportstats"),
("OVS_VPORT_ATTR_PAD", "none"),
("OVS_VPORT_ATTR_IFINDEX", "uint32"),
("OVS_VPORT_ATTR_NETNSID", "uint32"),
)
class vportstats(nla):
fields = (
("rx_packets", "=Q"),
("tx_packets", "=Q"),
("rx_bytes", "=Q"),
("tx_bytes", "=Q"),
("rx_errors", "=Q"),
("tx_errors", "=Q"),
("rx_dropped", "=Q"),
("tx_dropped", "=Q"),
)
def type_to_str(vport_type):
if vport_type == 1:
return "netdev"
elif vport_type == 2:
return "internal"
elif vport_type == 3:
return "gre"
elif vport_type == 4:
return "vxlan"
elif vport_type == 5:
return "geneve"
return "unknown:%d" % vport_type
def __init__(self):
GenericNetlinkSocket.__init__(self)
self.bind(OVS_VPORT_FAMILY, OvsVport.ovs_vport_msg)
def info(self, vport_name, dpifindex=0, portno=None):
msg = OvsVport.ovs_vport_msg()
msg["cmd"] = OVS_VPORT_CMD_GET
msg["version"] = OVS_DATAPATH_VERSION
msg["reserved"] = 0
msg["dpifindex"] = dpifindex
if portno is None:
msg["attrs"].append(["OVS_VPORT_ATTR_NAME", vport_name])
else:
msg["attrs"].append(["OVS_VPORT_ATTR_PORT_NO", portno])
try:
reply = self.nlm_request(
msg, msg_type=self.prid, msg_flags=NLM_F_REQUEST
)
reply = reply[0]
except NetlinkError as ne:
if ne.code == errno.ENODEV:
reply = None
else:
raise ne
return reply
def print_ovsdp_full(dp_lookup_rep, ifindex, ndb=NDB()):
dp_name = dp_lookup_rep.get_attr("OVS_DP_ATTR_NAME")
base_stats = dp_lookup_rep.get_attr("OVS_DP_ATTR_STATS")
megaflow_stats = dp_lookup_rep.get_attr("OVS_DP_ATTR_MEGAFLOW_STATS")
user_features = dp_lookup_rep.get_attr("OVS_DP_ATTR_USER_FEATURES")
masks_cache_size = dp_lookup_rep.get_attr("OVS_DP_ATTR_MASKS_CACHE_SIZE")
print("%s:" % dp_name)
print(
" lookups: hit:%d missed:%d lost:%d"
% (base_stats["hit"], base_stats["missed"], base_stats["lost"])
)
print(" flows:%d" % base_stats["flows"])
pkts = base_stats["hit"] + base_stats["missed"]
avg = (megaflow_stats["mask_hit"] / pkts) if pkts != 0 else 0.0
print(
" masks: hit:%d total:%d hit/pkt:%f"
% (megaflow_stats["mask_hit"], megaflow_stats["masks"], avg)
)
print(" caches:")
print(" masks-cache: size:%d" % masks_cache_size)
if user_features is not None:
print(" features: 0x%X" % user_features)
# port print out
vpl = OvsVport()
for iface in ndb.interfaces:
rep = vpl.info(iface.ifname, ifindex)
if rep is not None:
print(
" port %d: %s (%s)"
% (
rep.get_attr("OVS_VPORT_ATTR_PORT_NO"),
rep.get_attr("OVS_VPORT_ATTR_NAME"),
OvsVport.type_to_str(rep.get_attr("OVS_VPORT_ATTR_TYPE")),
)
)
def main(argv):
parser = argparse.ArgumentParser()
parser.add_argument(
"-v",
"--verbose",
action="count",
help="Increment 'verbose' output counter.",
)
subparsers = parser.add_subparsers()
showdpcmd = subparsers.add_parser("show")
showdpcmd.add_argument(
"showdp", metavar="N", type=str, nargs="?", help="Datapath Name"
)
adddpcmd = subparsers.add_parser("add-dp")
adddpcmd.add_argument("adddp", help="Datapath Name")
adddpcmd.add_argument(
"-u",
"--upcall",
action="store_true",
help="Leave open a reader for upcalls",
)
adddpcmd.add_argument(
"-V",
"--versioning",
required=False,
help="Specify a custom version / feature string",
)
deldpcmd = subparsers.add_parser("del-dp")
deldpcmd.add_argument("deldp", help="Datapath Name")
args = parser.parse_args()
ovsdp = OvsDatapath()
ndb = NDB()
if hasattr(args, "showdp"):
found = False
for iface in ndb.interfaces:
rep = None
if args.showdp is None:
rep = ovsdp.info(iface.ifname, 0)
elif args.showdp == iface.ifname:
rep = ovsdp.info(iface.ifname, 0)
if rep is not None:
found = True
print_ovsdp_full(rep, iface.index, ndb)
if not found:
msg = "No DP found"
if args.showdp is not None:
msg += ":'%s'" % args.showdp
print(msg)
elif hasattr(args, "adddp"):
rep = ovsdp.create(args.adddp, args.upcall, args.versioning)
if rep is None:
print("DP '%s' already exists" % args.adddp)
else:
print("DP '%s' added" % args.adddp)
elif hasattr(args, "deldp"):
ovsdp.destroy(args.deldp)
return 0
if __name__ == "__main__":
sys.exit(main(sys.argv))
| grace-kernel-grace-kernel-6.1.y | tools/testing/selftests/net/openvswitch/ovs-dpctl.py |
#!/usr/bin/env python3
# SPDX-License-Identifier: GPL-2.0
from struct import pack
from time import sleep
import errno
import glob
import os
import subprocess
try:
import pytest
except ImportError:
print("Unable to import pytest python module.")
print("\nIf not already installed, you may do so with:")
print("\t\tpip3 install pytest")
exit(1)
SOCKETS = glob.glob('/sys/bus/auxiliary/devices/intel_vsec.sdsi.*')
NUM_SOCKETS = len(SOCKETS)
MODULE_NAME = 'intel_sdsi'
DEV_PREFIX = 'intel_vsec.sdsi'
CLASS_DIR = '/sys/bus/auxiliary/devices'
GUID = "0x6dd191"
def read_bin_file(file):
with open(file, mode='rb') as f:
content = f.read()
return content
def get_dev_file_path(socket, file):
return CLASS_DIR + '/' + DEV_PREFIX + '.' + str(socket) + '/' + file
def kmemleak_enabled():
kmemleak = "/sys/kernel/debug/kmemleak"
return os.path.isfile(kmemleak)
class TestSDSiDriver:
def test_driver_loaded(self):
lsmod_p = subprocess.Popen(('lsmod'), stdout=subprocess.PIPE)
result = subprocess.check_output(('grep', '-q', MODULE_NAME), stdin=lsmod_p.stdout)
@pytest.mark.parametrize('socket', range(0, NUM_SOCKETS))
class TestSDSiFilesClass:
def read_value(self, file):
f = open(file, "r")
value = f.read().strip("\n")
return value
def get_dev_folder(self, socket):
return CLASS_DIR + '/' + DEV_PREFIX + '.' + str(socket) + '/'
def test_sysfs_files_exist(self, socket):
folder = self.get_dev_folder(socket)
print (folder)
assert os.path.isfile(folder + "guid") == True
assert os.path.isfile(folder + "provision_akc") == True
assert os.path.isfile(folder + "provision_cap") == True
assert os.path.isfile(folder + "state_certificate") == True
assert os.path.isfile(folder + "registers") == True
def test_sysfs_file_permissions(self, socket):
folder = self.get_dev_folder(socket)
mode = os.stat(folder + "guid").st_mode & 0o777
assert mode == 0o444 # Read all
mode = os.stat(folder + "registers").st_mode & 0o777
assert mode == 0o400 # Read owner
mode = os.stat(folder + "provision_akc").st_mode & 0o777
assert mode == 0o200 # Read owner
mode = os.stat(folder + "provision_cap").st_mode & 0o777
assert mode == 0o200 # Read owner
mode = os.stat(folder + "state_certificate").st_mode & 0o777
assert mode == 0o400 # Read owner
def test_sysfs_file_ownership(self, socket):
folder = self.get_dev_folder(socket)
st = os.stat(folder + "guid")
assert st.st_uid == 0
assert st.st_gid == 0
st = os.stat(folder + "registers")
assert st.st_uid == 0
assert st.st_gid == 0
st = os.stat(folder + "provision_akc")
assert st.st_uid == 0
assert st.st_gid == 0
st = os.stat(folder + "provision_cap")
assert st.st_uid == 0
assert st.st_gid == 0
st = os.stat(folder + "state_certificate")
assert st.st_uid == 0
assert st.st_gid == 0
def test_sysfs_file_sizes(self, socket):
folder = self.get_dev_folder(socket)
if self.read_value(folder + "guid") == GUID:
st = os.stat(folder + "registers")
assert st.st_size == 72
st = os.stat(folder + "provision_akc")
assert st.st_size == 1024
st = os.stat(folder + "provision_cap")
assert st.st_size == 1024
st = os.stat(folder + "state_certificate")
assert st.st_size == 4096
def test_no_seek_allowed(self, socket):
folder = self.get_dev_folder(socket)
rand_file = bytes(os.urandom(8))
f = open(folder + "provision_cap", "wb", 0)
f.seek(1)
with pytest.raises(OSError) as error:
f.write(rand_file)
assert error.value.errno == errno.ESPIPE
f.close()
f = open(folder + "provision_akc", "wb", 0)
f.seek(1)
with pytest.raises(OSError) as error:
f.write(rand_file)
assert error.value.errno == errno.ESPIPE
f.close()
def test_registers_seek(self, socket):
folder = self.get_dev_folder(socket)
# Check that the value read from an offset of the entire
# file is none-zero and the same as the value read
# from seeking to the same location
f = open(folder + "registers", "rb")
data = f.read()
f.seek(64)
id = f.read()
assert id != bytes(0)
assert data[64:] == id
f.close()
@pytest.mark.parametrize('socket', range(0, NUM_SOCKETS))
class TestSDSiMailboxCmdsClass:
def test_provision_akc_eoverflow_1017_bytes(self, socket):
# The buffer for writes is 1k, of with 8 bytes must be
# reserved for the command, leaving 1016 bytes max.
# Check that we get an overflow error for 1017 bytes.
node = get_dev_file_path(socket, "provision_akc")
rand_file = bytes(os.urandom(1017))
f = open(node, 'wb', 0)
with pytest.raises(OSError) as error:
f.write(rand_file)
assert error.value.errno == errno.EOVERFLOW
f.close()
@pytest.mark.parametrize('socket', range(0, NUM_SOCKETS))
class TestSdsiDriverLocksClass:
def test_enodev_when_pci_device_removed(self, socket):
node = get_dev_file_path(socket, "provision_akc")
dev_name = DEV_PREFIX + '.' + str(socket)
driver_dir = CLASS_DIR + '/' + dev_name + "/driver/"
rand_file = bytes(os.urandom(8))
f = open(node, 'wb', 0)
g = open(node, 'wb', 0)
with open(driver_dir + 'unbind', 'w') as k:
print(dev_name, file = k)
with pytest.raises(OSError) as error:
f.write(rand_file)
assert error.value.errno == errno.ENODEV
with pytest.raises(OSError) as error:
g.write(rand_file)
assert error.value.errno == errno.ENODEV
f.close()
g.close()
# Short wait needed to allow file to close before pulling driver
sleep(1)
p = subprocess.Popen(('modprobe', '-r', 'intel_sdsi'))
p.wait()
p = subprocess.Popen(('modprobe', '-r', 'intel_vsec'))
p.wait()
p = subprocess.Popen(('modprobe', 'intel_vsec'))
p.wait()
# Short wait needed to allow driver time to get inserted
# before continuing tests
sleep(1)
def test_memory_leak(self, socket):
if not kmemleak_enabled():
pytest.skip("kmemleak not enabled in kernel")
dev_name = DEV_PREFIX + '.' + str(socket)
driver_dir = CLASS_DIR + '/' + dev_name + "/driver/"
with open(driver_dir + 'unbind', 'w') as k:
print(dev_name, file = k)
sleep(1)
subprocess.check_output(('modprobe', '-r', 'intel_sdsi'))
subprocess.check_output(('modprobe', '-r', 'intel_vsec'))
with open('/sys/kernel/debug/kmemleak', 'w') as f:
print('scan', file = f)
sleep(5)
assert os.stat('/sys/kernel/debug/kmemleak').st_size == 0
subprocess.check_output(('modprobe', 'intel_vsec'))
sleep(1)
| grace-kernel-grace-kernel-6.1.y | tools/testing/selftests/drivers/sdsi/sdsi_test.py |
#!/usr/bin/env python
# SPDX-License-Identifier: GPL-2.0
import subprocess
import json as j
import random
class SkipTest(Exception):
pass
class RandomValuePicker:
"""
Class for storing shared buffer configuration. Can handle 3 different
objects, pool, tcbind and portpool. Provide an interface to get random
values for a specific object type as the follow:
1. Pool:
- random size
2. TcBind:
- random pool number
- random threshold
3. PortPool:
- random threshold
"""
def __init__(self, pools):
self._pools = []
for pool in pools:
self._pools.append(pool)
def _cell_size(self):
return self._pools[0]["cell_size"]
def _get_static_size(self, th):
# For threshold of 16, this works out to be about 12MB on Spectrum-1,
# and about 17MB on Spectrum-2.
return th * 8000 * self._cell_size()
def _get_size(self):
return self._get_static_size(16)
def _get_thtype(self):
return "static"
def _get_th(self, pool):
# Threshold value could be any integer between 3 to 16
th = random.randint(3, 16)
if pool["thtype"] == "dynamic":
return th
else:
return self._get_static_size(th)
def _get_pool(self, direction):
ing_pools = []
egr_pools = []
for pool in self._pools:
if pool["type"] == "ingress":
ing_pools.append(pool)
else:
egr_pools.append(pool)
if direction == "ingress":
arr = ing_pools
else:
arr = egr_pools
return arr[random.randint(0, len(arr) - 1)]
def get_value(self, objid):
if isinstance(objid, Pool):
if objid["pool"] in [4, 8, 9, 10]:
# The threshold type of pools 4, 8, 9 and 10 cannot be changed
raise SkipTest()
else:
return (self._get_size(), self._get_thtype())
if isinstance(objid, TcBind):
if objid["tc"] >= 8:
# Multicast TCs cannot be changed
raise SkipTest()
else:
pool = self._get_pool(objid["type"])
th = self._get_th(pool)
pool_n = pool["pool"]
return (pool_n, th)
if isinstance(objid, PortPool):
pool_n = objid["pool"]
pool = self._pools[pool_n]
assert pool["pool"] == pool_n
th = self._get_th(pool)
return (th,)
class RecordValuePickerException(Exception):
pass
class RecordValuePicker:
"""
Class for storing shared buffer configuration. Can handle 2 different
objects, pool and tcbind. Provide an interface to get the stored values per
object type.
"""
def __init__(self, objlist):
self._recs = []
for item in objlist:
self._recs.append({"objid": item, "value": item.var_tuple()})
def get_value(self, objid):
if isinstance(objid, Pool) and objid["pool"] in [4, 8, 9, 10]:
# The threshold type of pools 4, 8, 9 and 10 cannot be changed
raise SkipTest()
if isinstance(objid, TcBind) and objid["tc"] >= 8:
# Multicast TCs cannot be changed
raise SkipTest()
for rec in self._recs:
if rec["objid"].weak_eq(objid):
return rec["value"]
raise RecordValuePickerException()
def run_cmd(cmd, json=False):
out = subprocess.check_output(cmd, shell=True)
if json:
return j.loads(out)
return out
def run_json_cmd(cmd):
return run_cmd(cmd, json=True)
def log_test(test_name, err_msg=None):
if err_msg:
print("\t%s" % err_msg)
print("TEST: %-80s [FAIL]" % test_name)
else:
print("TEST: %-80s [ OK ]" % test_name)
class CommonItem(dict):
varitems = []
def var_tuple(self):
ret = []
self.varitems.sort()
for key in self.varitems:
ret.append(self[key])
return tuple(ret)
def weak_eq(self, other):
for key in self:
if key in self.varitems:
continue
if self[key] != other[key]:
return False
return True
class CommonList(list):
def get_by(self, by_obj):
for item in self:
if item.weak_eq(by_obj):
return item
return None
def del_by(self, by_obj):
for item in self:
if item.weak_eq(by_obj):
self.remove(item)
class Pool(CommonItem):
varitems = ["size", "thtype"]
def dl_set(self, dlname, size, thtype):
run_cmd("devlink sb pool set {} sb {} pool {} size {} thtype {}".format(dlname, self["sb"],
self["pool"],
size, thtype))
class PoolList(CommonList):
pass
def get_pools(dlname, direction=None):
d = run_json_cmd("devlink sb pool show -j")
pools = PoolList()
for pooldict in d["pool"][dlname]:
if not direction or direction == pooldict["type"]:
pools.append(Pool(pooldict))
return pools
def do_check_pools(dlname, pools, vp):
for pool in pools:
pre_pools = get_pools(dlname)
try:
(size, thtype) = vp.get_value(pool)
except SkipTest:
continue
pool.dl_set(dlname, size, thtype)
post_pools = get_pools(dlname)
pool = post_pools.get_by(pool)
err_msg = None
if pool["size"] != size:
err_msg = "Incorrect pool size (got {}, expected {})".format(pool["size"], size)
if pool["thtype"] != thtype:
err_msg = "Incorrect pool threshold type (got {}, expected {})".format(pool["thtype"], thtype)
pre_pools.del_by(pool)
post_pools.del_by(pool)
if pre_pools != post_pools:
err_msg = "Other pool setup changed as well"
log_test("pool {} of sb {} set verification".format(pool["pool"],
pool["sb"]), err_msg)
def check_pools(dlname, pools):
# Save defaults
record_vp = RecordValuePicker(pools)
# For each pool, set random size and static threshold type
do_check_pools(dlname, pools, RandomValuePicker(pools))
# Restore defaults
do_check_pools(dlname, pools, record_vp)
class TcBind(CommonItem):
varitems = ["pool", "threshold"]
def __init__(self, port, d):
super(TcBind, self).__init__(d)
self["dlportname"] = port.name
def dl_set(self, pool, th):
run_cmd("devlink sb tc bind set {} sb {} tc {} type {} pool {} th {}".format(self["dlportname"],
self["sb"],
self["tc"],
self["type"],
pool, th))
class TcBindList(CommonList):
pass
def get_tcbinds(ports, verify_existence=False):
d = run_json_cmd("devlink sb tc bind show -j -n")
tcbinds = TcBindList()
for port in ports:
err_msg = None
if port.name not in d["tc_bind"] or len(d["tc_bind"][port.name]) == 0:
err_msg = "No tc bind for port"
else:
for tcbinddict in d["tc_bind"][port.name]:
tcbinds.append(TcBind(port, tcbinddict))
if verify_existence:
log_test("tc bind existence for port {} verification".format(port.name), err_msg)
return tcbinds
def do_check_tcbind(ports, tcbinds, vp):
for tcbind in tcbinds:
pre_tcbinds = get_tcbinds(ports)
try:
(pool, th) = vp.get_value(tcbind)
except SkipTest:
continue
tcbind.dl_set(pool, th)
post_tcbinds = get_tcbinds(ports)
tcbind = post_tcbinds.get_by(tcbind)
err_msg = None
if tcbind["pool"] != pool:
err_msg = "Incorrect pool (got {}, expected {})".format(tcbind["pool"], pool)
if tcbind["threshold"] != th:
err_msg = "Incorrect threshold (got {}, expected {})".format(tcbind["threshold"], th)
pre_tcbinds.del_by(tcbind)
post_tcbinds.del_by(tcbind)
if pre_tcbinds != post_tcbinds:
err_msg = "Other tc bind setup changed as well"
log_test("tc bind {}-{} of sb {} set verification".format(tcbind["dlportname"],
tcbind["tc"],
tcbind["sb"]), err_msg)
def check_tcbind(dlname, ports, pools):
tcbinds = get_tcbinds(ports, verify_existence=True)
# Save defaults
record_vp = RecordValuePicker(tcbinds)
# Bind each port and unicast TC (TCs < 8) to a random pool and a random
# threshold
do_check_tcbind(ports, tcbinds, RandomValuePicker(pools))
# Restore defaults
do_check_tcbind(ports, tcbinds, record_vp)
class PortPool(CommonItem):
varitems = ["threshold"]
def __init__(self, port, d):
super(PortPool, self).__init__(d)
self["dlportname"] = port.name
def dl_set(self, th):
run_cmd("devlink sb port pool set {} sb {} pool {} th {}".format(self["dlportname"],
self["sb"],
self["pool"], th))
class PortPoolList(CommonList):
pass
def get_portpools(ports, verify_existence=False):
d = run_json_cmd("devlink sb port pool -j -n")
portpools = PortPoolList()
for port in ports:
err_msg = None
if port.name not in d["port_pool"] or len(d["port_pool"][port.name]) == 0:
err_msg = "No port pool for port"
else:
for portpooldict in d["port_pool"][port.name]:
portpools.append(PortPool(port, portpooldict))
if verify_existence:
log_test("port pool existence for port {} verification".format(port.name), err_msg)
return portpools
def do_check_portpool(ports, portpools, vp):
for portpool in portpools:
pre_portpools = get_portpools(ports)
(th,) = vp.get_value(portpool)
portpool.dl_set(th)
post_portpools = get_portpools(ports)
portpool = post_portpools.get_by(portpool)
err_msg = None
if portpool["threshold"] != th:
err_msg = "Incorrect threshold (got {}, expected {})".format(portpool["threshold"], th)
pre_portpools.del_by(portpool)
post_portpools.del_by(portpool)
if pre_portpools != post_portpools:
err_msg = "Other port pool setup changed as well"
log_test("port pool {}-{} of sb {} set verification".format(portpool["dlportname"],
portpool["pool"],
portpool["sb"]), err_msg)
def check_portpool(dlname, ports, pools):
portpools = get_portpools(ports, verify_existence=True)
# Save defaults
record_vp = RecordValuePicker(portpools)
# For each port pool, set a random threshold
do_check_portpool(ports, portpools, RandomValuePicker(pools))
# Restore defaults
do_check_portpool(ports, portpools, record_vp)
class Port:
def __init__(self, name):
self.name = name
class PortList(list):
pass
def get_ports(dlname):
d = run_json_cmd("devlink port show -j")
ports = PortList()
for name in d["port"]:
if name.find(dlname) == 0 and d["port"][name]["flavour"] == "physical":
ports.append(Port(name))
return ports
def get_device():
devices_info = run_json_cmd("devlink -j dev info")["info"]
for d in devices_info:
if "mlxsw_spectrum" in devices_info[d]["driver"]:
return d
return None
class UnavailableDevlinkNameException(Exception):
pass
def test_sb_configuration():
# Use static seed
random.seed(0)
dlname = get_device()
if not dlname:
raise UnavailableDevlinkNameException()
ports = get_ports(dlname)
pools = get_pools(dlname)
check_pools(dlname, pools)
check_tcbind(dlname, ports, pools)
check_portpool(dlname, ports, pools)
test_sb_configuration()
| grace-kernel-grace-kernel-6.1.y | tools/testing/selftests/drivers/net/mlxsw/sharedbuffer_configuration.py |
#!/usr/bin/env python3
from enum import Enum
class ResultState(Enum):
noresult = -1
skip = 0
success = 1
fail = 2
class TestResult:
def __init__(self, test_id="", test_name=""):
self.test_id = test_id
self.test_name = test_name
self.result = ResultState.noresult
self.failmsg = ""
self.errormsg = ""
self.steps = []
def set_result(self, result):
if (isinstance(result, ResultState)):
self.result = result
return True
else:
raise TypeError('Unknown result type, must be type ResultState')
def get_result(self):
return self.result
def set_errormsg(self, errormsg):
self.errormsg = errormsg
return True
def append_errormsg(self, errormsg):
self.errormsg = '{}\n{}'.format(self.errormsg, errormsg)
def get_errormsg(self):
return self.errormsg
def set_failmsg(self, failmsg):
self.failmsg = failmsg
return True
def append_failmsg(self, failmsg):
self.failmsg = '{}\n{}'.format(self.failmsg, failmsg)
def get_failmsg(self):
return self.failmsg
def add_steps(self, newstep):
if type(newstep) == list:
self.steps.extend(newstep)
elif type(newstep) == str:
self.steps.append(step)
else:
raise TypeError('TdcResults.add_steps() requires a list or str')
def get_executed_steps(self):
return self.steps
class TestSuiteReport():
_testsuite = []
def add_resultdata(self, result_data):
if isinstance(result_data, TestResult):
self._testsuite.append(result_data)
return True
def count_tests(self):
return len(self._testsuite)
def count_failures(self):
return sum(1 for t in self._testsuite if t.result == ResultState.fail)
def count_skips(self):
return sum(1 for t in self._testsuite if t.result == ResultState.skip)
def find_result(self, test_id):
return next((tr for tr in self._testsuite if tr.test_id == test_id), None)
def update_result(self, result_data):
orig = self.find_result(result_data.test_id)
if orig != None:
idx = self._testsuite.index(orig)
self._testsuite[idx] = result_data
else:
self.add_resultdata(result_data)
def format_tap(self):
ftap = ""
ftap += '1..{}\n'.format(self.count_tests())
index = 1
for t in self._testsuite:
if t.result == ResultState.fail:
ftap += 'not '
ftap += 'ok {} {} - {}'.format(str(index), t.test_id, t.test_name)
if t.result == ResultState.skip or t.result == ResultState.noresult:
ftap += ' # skipped - {}\n'.format(t.errormsg)
elif t.result == ResultState.fail:
if len(t.steps) > 0:
ftap += '\tCommands executed in this test case:'
for step in t.steps:
ftap += '\n\t\t{}'.format(step)
ftap += '\n\t{}'.format(t.failmsg)
ftap += '\n'
index += 1
return ftap
def format_xunit(self):
from xml.sax.saxutils import escape
xunit = "<testsuites>\n"
xunit += '\t<testsuite tests=\"{}\" skips=\"{}\">\n'.format(self.count_tests(), self.count_skips())
for t in self._testsuite:
xunit += '\t\t<testcase classname=\"{}\" '.format(escape(t.test_id))
xunit += 'name=\"{}\">\n'.format(escape(t.test_name))
if t.failmsg:
xunit += '\t\t\t<failure>\n'
if len(t.steps) > 0:
xunit += 'Commands executed in this test case:\n'
for step in t.steps:
xunit += '\t{}\n'.format(escape(step))
xunit += 'FAILURE: {}\n'.format(escape(t.failmsg))
xunit += '\t\t\t</failure>\n'
if t.errormsg:
xunit += '\t\t\t<error>\n{}\n'.format(escape(t.errormsg))
xunit += '\t\t\t</error>\n'
if t.result == ResultState.skip:
xunit += '\t\t\t<skipped/>\n'
xunit += '\t\t</testcase>\n'
xunit += '\t</testsuite>\n'
xunit += '</testsuites>\n'
return xunit
| grace-kernel-grace-kernel-6.1.y | tools/testing/selftests/tc-testing/TdcResults.py |
"""
tdc_config_local.py - tdc plugin-writer-specified values
Copyright (C) 2017 [email protected]
"""
import os
ENVIR = os.environ.copy()
ENV_LD_LIBRARY_PATH = os.getenv('LD_LIBRARY_PATH', '')
ENV_OTHER_LIB = os.getenv('OTHER_LIB', '')
# example adding value to NAMES, without editing tdc_config.py
EXTRA_NAMES = dict()
EXTRA_NAMES['SOME_BIN'] = os.path.join(os.getenv('OTHER_BIN', ''), 'some_bin')
# example adding values to ENVIR, without editing tdc_config.py
ENVIR['VALGRIND_LIB'] = '/usr/lib/valgrind'
ENVIR['VALGRIND_BIN'] = '/usr/bin/valgrind'
ENVIR['VGDB_BIN'] = '/usr/bin/vgdb'
| grace-kernel-grace-kernel-6.1.y | tools/testing/selftests/tc-testing/tdc_config_local_template.py |
#!/usr/bin/env python3
class TdcPlugin:
def __init__(self):
super().__init__()
print(' -- {}.__init__'.format(self.sub_class))
def pre_suite(self, testcount, testidlist):
'''run commands before test_runner goes into a test loop'''
self.testcount = testcount
self.testidlist = testidlist
if self.args.verbose > 1:
print(' -- {}.pre_suite'.format(self.sub_class))
def post_suite(self, index):
'''run commands after test_runner completes the test loop
index is the last ordinal number of test that was attempted'''
if self.args.verbose > 1:
print(' -- {}.post_suite'.format(self.sub_class))
def pre_case(self, caseinfo, test_skip):
'''run commands before test_runner does one test'''
if self.args.verbose > 1:
print(' -- {}.pre_case'.format(self.sub_class))
self.args.caseinfo = caseinfo
self.args.test_skip = test_skip
def post_case(self):
'''run commands after test_runner does one test'''
if self.args.verbose > 1:
print(' -- {}.post_case'.format(self.sub_class))
def pre_execute(self):
'''run command before test-runner does the execute step'''
if self.args.verbose > 1:
print(' -- {}.pre_execute'.format(self.sub_class))
def post_execute(self):
'''run command after test-runner does the execute step'''
if self.args.verbose > 1:
print(' -- {}.post_execute'.format(self.sub_class))
def adjust_command(self, stage, command):
'''adjust the command'''
if self.args.verbose > 1:
print(' -- {}.adjust_command {}'.format(self.sub_class, stage))
# if stage == 'pre':
# pass
# elif stage == 'setup':
# pass
# elif stage == 'execute':
# pass
# elif stage == 'verify':
# pass
# elif stage == 'teardown':
# pass
# elif stage == 'post':
# pass
# else:
# pass
return command
def add_args(self, parser):
'''Get the plugin args from the command line'''
self.argparser = parser
return self.argparser
def check_args(self, args, remaining):
'''Check that the args are set correctly'''
self.args = args
if self.args.verbose > 1:
print(' -- {}.check_args'.format(self.sub_class))
| grace-kernel-grace-kernel-6.1.y | tools/testing/selftests/tc-testing/TdcPlugin.py |
"""
# SPDX-License-Identifier: GPL-2.0
tdc_helper.py - tdc helper functions
Copyright (C) 2017 Lucas Bates <[email protected]>
"""
def get_categorized_testlist(alltests, ucat):
""" Sort the master test list into categories. """
testcases = dict()
for category in ucat:
testcases[category] = list(filter(lambda x: category in x['category'], alltests))
return(testcases)
def get_unique_item(lst):
""" For a list, return a list of the unique items in the list. """
if len(lst) > 1:
return list(set(lst))
else:
return lst
def get_test_categories(alltests):
""" Discover all unique test categories present in the test case file. """
ucat = []
for t in alltests:
ucat.extend(get_unique_item(t['category']))
ucat = get_unique_item(ucat)
return ucat
def list_test_cases(testlist):
""" Print IDs and names of all test cases. """
for curcase in testlist:
print(curcase['id'] + ': (' + ', '.join(curcase['category']) + ") " + curcase['name'])
def list_categories(testlist):
""" Show all categories that are present in a test case file. """
categories = set(map(lambda x: x['category'], testlist))
print("Available categories:")
print(", ".join(str(s) for s in categories))
print("")
def print_list(cmdlist):
""" Print a list of strings prepended with a tab. """
for l in cmdlist:
if (type(l) == list):
print("\t" + str(l[0]))
else:
print("\t" + str(l))
def print_sll(items):
print("\n".join(str(s) for s in items))
def print_test_case(tcase):
""" Pretty-printing of a given test case. """
print('\n==============\nTest {}\t{}\n'.format(tcase['id'], tcase['name']))
for k in tcase.keys():
if (isinstance(tcase[k], list)):
print(k + ":")
print_list(tcase[k])
else:
if not ((k == 'id') or (k == 'name')):
print(k + ": " + str(tcase[k]))
| grace-kernel-grace-kernel-6.1.y | tools/testing/selftests/tc-testing/tdc_helper.py |
#!/usr/bin/env python3
# SPDX-License-Identifier: GPL-2.0
"""
tdc_multibatch.py - a thin wrapper over tdc_batch.py to generate multiple batch
files
Copyright (C) 2019 Vlad Buslov <[email protected]>
"""
import argparse
import os
parser = argparse.ArgumentParser(
description='TC multiple batch file generator')
parser.add_argument("device", help="device name")
parser.add_argument("dir", help="where to put batch files")
parser.add_argument(
"num_filters", type=int, help="how many lines per batch file")
parser.add_argument("num_files", type=int, help="how many batch files")
parser.add_argument(
"operation",
choices=['add', 'del', 'replace'],
help="operation to perform on filters")
parser.add_argument(
"-x",
"--file_prefix",
default="",
help="prefix for generated batch file names")
parser.add_argument(
"-d",
"--duplicate_handles",
action="store_true",
help="duplicate filter handle range in all files")
parser.add_argument(
"-a",
"--handle_start",
type=int,
default=1,
help="start handle range from (default: 1)")
parser.add_argument(
"-m",
"--mac_prefix",
type=int,
default=0,
choices=range(0, 256),
help="add this value to third byte of source MAC address of flower filter"
"(default: 0)")
args = parser.parse_args()
device = args.device
dir = args.dir
file_prefix = args.file_prefix + args.operation + "_"
num_filters = args.num_filters
num_files = args.num_files
operation = args.operation
duplicate_handles = args.duplicate_handles
handle = args.handle_start
mac_prefix = args.mac_prefix
for i in range(num_files):
file = dir + '/' + file_prefix + str(i)
os.system("./tdc_batch.py -n {} -a {} -e {} -m {} {} {}".format(
num_filters, handle, operation, i + mac_prefix, device, file))
if not duplicate_handles:
handle += num_filters
| grace-kernel-grace-kernel-6.1.y | tools/testing/selftests/tc-testing/tdc_multibatch.py |
#!/usr/bin/env python3
"""
tdc_batch.py - a script to generate TC batch file
Copyright (C) 2017 Chris Mi <[email protected]>
"""
import argparse
parser = argparse.ArgumentParser(description='TC batch file generator')
parser.add_argument("device", help="device name")
parser.add_argument("file", help="batch file name")
parser.add_argument("-n", "--number", type=int,
help="how many lines in batch file")
parser.add_argument(
"-a",
"--handle_start",
type=int,
default=1,
help="start handle range from (default: 1)")
parser.add_argument("-o", "--skip_sw",
help="skip_sw (offload), by default skip_hw",
action="store_true")
parser.add_argument("-s", "--share_action",
help="all filters share the same action",
action="store_true")
parser.add_argument("-p", "--prio",
help="all filters have different prio",
action="store_true")
parser.add_argument(
"-e",
"--operation",
choices=['add', 'del', 'replace'],
default='add',
help="operation to perform on filters"
"(default: add filter)")
parser.add_argument(
"-m",
"--mac_prefix",
type=int,
default=0,
choices=range(0, 256),
help="third byte of source MAC address of flower filter"
"(default: 0)")
args = parser.parse_args()
device = args.device
file = open(args.file, 'w')
number = 1
if args.number:
number = args.number
handle_start = args.handle_start
skip = "skip_hw"
if args.skip_sw:
skip = "skip_sw"
share_action = ""
if args.share_action:
share_action = "index 1"
prio = "prio 1"
if args.prio:
prio = ""
if number > 0x4000:
number = 0x4000
mac_prefix = args.mac_prefix
def format_add_filter(device, prio, handle, skip, src_mac, dst_mac,
share_action):
return ("filter add dev {} {} protocol ip ingress handle {} "
" flower {} src_mac {} dst_mac {} action drop {}".format(
device, prio, handle, skip, src_mac, dst_mac, share_action))
def format_rep_filter(device, prio, handle, skip, src_mac, dst_mac,
share_action):
return ("filter replace dev {} {} protocol ip ingress handle {} "
" flower {} src_mac {} dst_mac {} action drop {}".format(
device, prio, handle, skip, src_mac, dst_mac, share_action))
def format_del_filter(device, prio, handle, skip, src_mac, dst_mac,
share_action):
return ("filter del dev {} {} protocol ip ingress handle {} "
"flower".format(device, prio, handle))
formatter = format_add_filter
if args.operation == "del":
formatter = format_del_filter
elif args.operation == "replace":
formatter = format_rep_filter
index = 0
for i in range(0x100):
for j in range(0x100):
for k in range(0x100):
mac = ("{:02x}:{:02x}:{:02x}".format(i, j, k))
src_mac = "e4:11:{:02x}:{}".format(mac_prefix, mac)
dst_mac = "e4:12:00:" + mac
cmd = formatter(device, prio, handle_start + index, skip, src_mac,
dst_mac, share_action)
file.write("{}\n".format(cmd))
index += 1
if index >= number:
file.close()
exit(0)
| grace-kernel-grace-kernel-6.1.y | tools/testing/selftests/tc-testing/tdc_batch.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.